summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_lpss.c8
-rw-r--r--drivers/acpi/acpi_processor.c10
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/acutils.h10
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c10
-rw-r--r--drivers/acpi/acpica/dbmethod.c4
-rw-r--r--drivers/acpi/acpica/dbobject.c1
-rw-r--r--drivers/acpi/acpica/dbstats.c92
-rw-r--r--drivers/acpi/acpica/evxfgpe.c6
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c1
-rw-r--r--drivers/acpi/acpica/utdebug.c4
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/acpica/utosi.c3
-rw-r--r--drivers/acpi/acpica/utownerid.c12
-rw-r--r--drivers/acpi/apei/ghes.c19
-rw-r--r--drivers/acpi/arm64/iort.c10
-rw-r--r--drivers/acpi/cppc_acpi.c6
-rw-r--r--drivers/acpi/custom_method.c5
-rw-r--r--drivers/acpi/device_pm.c11
-rw-r--r--drivers/acpi/ec.c57
-rw-r--r--drivers/acpi/hmat/hmat.c143
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/nfit/core.c28
-rw-r--r--drivers/acpi/nfit/nfit.h24
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/pci_irq.c4
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pptt.c53
-rw-r--r--drivers/acpi/processor_driver.c39
-rw-r--r--drivers/acpi/processor_perflib.c100
-rw-r--r--drivers/acpi/processor_thermal.c84
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/sbshc.h2
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/sleep.c165
-rw-r--r--drivers/acpi/thermal.c11
-rw-r--r--drivers/acpi/x86/utils.c4
-rw-r--r--drivers/amba/tegra-ahb.c11
-rw-r--r--drivers/android/binder.c105
-rw-r--r--drivers/android/binder_internal.h86
-rw-r--r--drivers/android/binderfs.c290
-rw-r--r--drivers/ata/acard-ahci.c38
-rw-r--r--drivers/ata/ahci.c149
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libahci_platform.c4
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/libata-sff.c14
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/ata/pata_atp867x.c7
-rw-r--r--drivers/ata/pata_buddha.c228
-rw-r--r--drivers/ata/pata_cs5520.c6
-rw-r--r--drivers/ata/pata_hpt3x3.c5
-rw-r--r--drivers/ata/pata_ninja32.c5
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/ata/pata_sil680.c5
-rw-r--r--drivers/ata/pdc_adma.c23
-rw-r--r--drivers/ata/sata_inic162x.c8
-rw-r--r--drivers/ata/sata_mv.c38
-rw-r--r--drivers/ata/sata_nv.c10
-rw-r--r--drivers/ata/sata_promise.c5
-rw-r--r--drivers/ata/sata_qstor.c34
-rw-r--r--drivers/ata/sata_sil.c5
-rw-r--r--drivers/ata/sata_sil24.c26
-rw-r--r--drivers/ata/sata_svw.c5
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_via.c9
-rw-r--r--drivers/ata/sata_vsc.c5
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/auxdisplay/Kconfig7
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h44
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/arch_topology.c300
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c24
-rw-r--r--drivers/base/core.c314
-rw-r--r--drivers/base/dd.c14
-rw-r--r--drivers/base/devcon.c51
-rw-r--r--drivers/base/devcoredump.c13
-rw-r--r--drivers/base/firmware_loader/firmware.h4
-rw-r--r--drivers/base/platform.c125
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c25
-rw-r--r--drivers/base/power/main.c35
-rw-r--r--drivers/base/power/power.h18
-rw-r--r--drivers/base/power/runtime.c19
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/wakeup.c72
-rw-r--r--drivers/base/power/wakeup_stats.c214
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-irq.c7
-rw-r--r--drivers/base/soc.c9
-rw-r--r--drivers/base/swnode.c39
-rw-r--r--drivers/block/aoe/aoedev.c13
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c14
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c19
-rw-r--r--drivers/block/nbd.c129
-rw-r--r--drivers/block/null_blk.h18
-rw-r--r--drivers/block/null_blk_main.c183
-rw-r--r--drivers/block/null_blk_zoned.c59
-rw-r--r--drivers/block/paride/pcd.c12
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btqca.c29
-rw-r--r--drivers/bluetooth/btqca.h7
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c13
-rw-r--r--drivers/bluetooth/hci_mrvl.c3
-rw-r--r--drivers/bluetooth/hci_qca.c22
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c1
-rw-r--r--drivers/bus/fsl-mc/mc-io.c1
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/imx-weim.c34
-rw-r--r--drivers/bus/moxtet.c885
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/ti-sysc.c24
-rw-r--r--drivers/bus/uniphier-system-bus.c4
-rw-r--r--drivers/char/Kconfig31
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/Kconfig11
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/sgi-agp.c338
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c2
-rw-r--r--drivers/char/mbcs.c831
-rw-r--r--drivers/char/mbcs.h553
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/char/mspec.c155
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/snsc.c469
-rw-r--r--drivers/char/snsc.h92
-rw-r--r--drivers/char/snsc_event.c303
-rw-r--r--drivers/char/toshiba.c8
-rw-r--r--drivers/char/tpm/Kconfig5
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm-chip.c50
-rw-r--r--drivers/char/tpm/tpm-sysfs.c7
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm1-cmd.c36
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c350
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h40
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/clk/at91/clk-generated.c2
-rw-r--r--drivers/clk/clk-scmi.c2
-rw-r--r--drivers/clk/clk.c55
-rw-r--r--drivers/clk/imx/Kconfig6
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-imx8mm.c17
-rw-r--r--drivers/clk/imx/clk-imx8mn.c636
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c5
-rw-r--r--drivers/clk/imx/clk.c8
-rw-r--r--drivers/clk/imx/clk.h18
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c46
-rw-r--r--drivers/clk/meson/Kconfig11
-rw-r--r--drivers/clk/meson/Makefile2
-rw-r--r--drivers/clk/meson/axg-aoclk.c63
-rw-r--r--drivers/clk/meson/axg-audio.c261
-rw-r--r--drivers/clk/meson/axg.c207
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c73
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.h20
-rw-r--r--drivers/clk/meson/clk-input.c49
-rw-r--r--drivers/clk/meson/clk-input.h19
-rw-r--r--drivers/clk/meson/clk-regmap.h12
-rw-r--r--drivers/clk/meson/g12a-aoclk.c81
-rw-r--r--drivers/clk/meson/g12a.c1626
-rw-r--r--drivers/clk/meson/g12a.h1
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c55
-rw-r--r--drivers/clk/meson/gxbb.c657
-rw-r--r--drivers/clk/meson/meson-aoclk.c37
-rw-r--r--drivers/clk/meson/meson-aoclk.h8
-rw-r--r--drivers/clk/meson/meson-eeclk.c10
-rw-r--r--drivers/clk/meson/meson-eeclk.h2
-rw-r--r--drivers/clk/meson/meson8b.c710
-rw-r--r--drivers/clk/qcom/clk-rpmh.c16
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/clk/sprd/Kconfig1
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/em_sti.c4
-rw-r--r--drivers/clocksource/hyperv_timer.c45
-rw-r--r--drivers/clocksource/renesas-ostm.c2
-rw-r--r--drivers/clocksource/sh_cmt.c19
-rw-r--r--drivers/clocksource/sh_tmu.c5
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c18
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c5
-rw-r--r--drivers/clocksource/timer-npcm7xx.c9
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/clocksource/timer-probe.c4
-rw-r--r--drivers/clocksource/timer-riscv.c23
-rw-r--r--drivers/clocksource/timer-sun4i.c4
-rw-r--r--drivers/connector/connector.c6
-rw-r--r--drivers/counter/ftm-quaddec.c30
-rw-r--r--drivers/cpufreq/Kconfig.arm16
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq.c59
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c146
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c4
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c23
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c19
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h8
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c96
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c23
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c249
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c352
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c226
-rw-r--r--drivers/cpufreq/ti-cpufreq.c1
-rw-r--r--drivers/cpuidle/Kconfig20
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/cpuidle-arm.c13
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c134
-rw-r--r--drivers/cpuidle/cpuidle-psci.c236
-rw-r--r--drivers/cpuidle/cpuidle.c30
-rw-r--r--drivers/cpuidle/cpuidle.h2
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governor.c7
-rw-r--r--drivers/cpuidle/governors/Makefile1
-rw-r--r--drivers/cpuidle/governors/haltpoll.c150
-rw-r--r--drivers/cpuidle/governors/ladder.c21
-rw-r--r--drivers/cpuidle/governors/menu.c21
-rw-r--r--drivers/cpuidle/governors/teo.c60
-rw-r--r--drivers/cpuidle/poll_state.c11
-rw-r--r--drivers/cpuidle/sysfs.c7
-rw-r--r--drivers/crypto/Kconfig20
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c14
-rw-r--r--drivers/crypto/ccp/ccp-dev.c8
-rw-r--r--drivers/crypto/ccp/ccp-ops.c33
-rw-r--r--drivers/devfreq/Kconfig19
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c12
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c104
-rw-r--r--drivers/devfreq/exynos-bus.c153
-rw-r--r--drivers/devfreq/governor_passive.c7
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/tegra20-devfreq.c212
-rw-r--r--drivers/devfreq/tegra30-devfreq.c (renamed from drivers/devfreq/tegra-devfreq.c)315
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/acpi-dma.c12
-rw-r--r--drivers/dma/bcm2835-dma.c42
-rw-r--r--drivers/dma/dma-jz4780.c19
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/dw/Makefile4
-rw-r--r--drivers/dma/dw/acpi.c53
-rw-r--r--drivers/dma/dw/internal.h51
-rw-r--r--drivers/dma/dw/of.c131
-rw-r--r--drivers/dma/dw/pci.c62
-rw-r--r--drivers/dma/dw/platform.c221
-rw-r--r--drivers/dma/fsl-edma-common.c20
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma.c81
-rw-r--r--drivers/dma/fsl-qdma.c9
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dca.c3
-rw-r--r--drivers/dma/iop-adma.c24
-rw-r--r--drivers/dma/iop-adma.h914
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c11
-rw-r--r--drivers/dma/pl330.c9
-rw-r--r--drivers/dma/qcom/hidma_ll.c2
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c9
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c32
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/sprd-dma.c10
-rw-r--r--drivers/dma/st_fdma.c4
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-dma.c18
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/stm32-mdma.c9
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/sun6i-dma.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c75
-rw-r--r--drivers/dma/tegra210-adma.c16
-rw-r--r--drivers/dma/ti/dma-crossbar.c4
-rw-r--r--drivers/dma/ti/edma.c228
-rw-r--r--drivers/dma/ti/omap-dma.c70
-rw-r--r--drivers/dma/uniphier-mdmac.c5
-rw-r--r--drivers/dma/xgene-dma.c8
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/altera_edac.c58
-rw-r--r--drivers/edac/altera_edac.h25
-rw-r--r--drivers/edac/amd64_edac.c371
-rw-r--r--drivers/edac/amd64_edac.h15
-rw-r--r--drivers/edac/bluefield_edac.c356
-rw-r--r--drivers/edac/edac_mc.c53
-rw-r--r--drivers/edac/edac_mc.h6
-rw-r--r--drivers/edac/edac_mc_sysfs.c92
-rw-r--r--drivers/edac/ghes_edac.c2
-rw-r--r--drivers/edac/i10nm_base.c4
-rw-r--r--drivers/edac/i5100_edac.c16
-rw-r--r--drivers/edac/pnd2_edac.c9
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/extcon/extcon-adc-jack.c4
-rw-r--r--drivers/extcon/extcon-arizona.c2
-rw-r--r--drivers/extcon/extcon-axp288.c16
-rw-r--r--drivers/extcon/extcon-fsa9480.c1
-rw-r--r--drivers/extcon/extcon-gpio.c29
-rw-r--r--drivers/extcon/extcon-max77843.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-topology.c1
-rw-r--r--drivers/firmware/Kconfig37
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/base.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h18
-rw-r--r--drivers/firmware/arm_scmi/driver.c366
-rw-r--r--drivers/firmware/arm_scmi/perf.c264
-rw-r--r--drivers/firmware/arm_scmi/power.c6
-rw-r--r--drivers/firmware/arm_scmi/reset.c231
-rw-r--r--drivers/firmware/arm_scmi/sensors.c57
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/efi/Kconfig13
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c39
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c38
-rw-r--r--drivers/firmware/efi/rci2-table.c147
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/google/vpd_decode.c55
-rw-r--r--drivers/firmware/google/vpd_decode.h6
-rw-r--r--drivers/firmware/imx/Kconfig11
-rw-r--r--drivers/firmware/imx/Makefile1
-rw-r--r--drivers/firmware/imx/imx-dsp.c155
-rw-r--r--drivers/firmware/imx/scu-pd.c4
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/firmware/psci/psci.c167
-rw-r--r--drivers/firmware/psci/psci_checker.c16
-rw-r--r--drivers/firmware/qcom_scm.c47
-rw-r--r--drivers/firmware/stratix10-rsu.c451
-rw-r--r--drivers/firmware/stratix10-svc.c76
-rw-r--r--drivers/firmware/ti_sci.c45
-rw-r--r--drivers/firmware/turris-mox-rwtm.c384
-rw-r--r--drivers/fpga/Kconfig7
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/altera-cvp.c342
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c4
-rw-r--r--drivers/fpga/altera-pr-ip-core.c4
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fpga/dfl-afu-error.c230
-rw-r--r--drivers/fpga/dfl-afu-main.c381
-rw-r--r--drivers/fpga/dfl-afu.h9
-rw-r--r--drivers/fpga/dfl-fme-error.c359
-rw-r--r--drivers/fpga/dfl-fme-main.c128
-rw-r--r--drivers/fpga/dfl-fme-pr.c7
-rw-r--r--drivers/fpga/dfl-fme.h6
-rw-r--r--drivers/fpga/dfl-pci.c36
-rw-r--r--drivers/fpga/dfl.c226
-rw-r--r--drivers/fpga/dfl.h52
-rw-r--r--drivers/fpga/fpga-bridge.c8
-rw-r--r--drivers/fpga/fpga-mgr.c8
-rw-r--r--drivers/fsi/fsi-scom.c8
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-arizona.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c117
-rw-r--r--drivers/gpio/gpio-bd70528.c4
-rw-r--r--drivers/gpio/gpio-brcmstb.c4
-rw-r--r--drivers/gpio/gpio-cadence.c36
-rw-r--r--drivers/gpio/gpio-creg-snps.c4
-rw-r--r--drivers/gpio/gpio-dwapb.c1
-rw-r--r--drivers/gpio/gpio-eic-sprd.c4
-rw-r--r--drivers/gpio/gpio-em.c37
-rw-r--r--drivers/gpio/gpio-ep93xx.c140
-rw-r--r--drivers/gpio/gpio-ftgpio010.c14
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-hlwd.c58
-rw-r--r--drivers/gpio/gpio-htc-egpio.c14
-rw-r--r--drivers/gpio/gpio-intel-mid.c35
-rw-r--r--drivers/gpio/gpio-ixp4xx.c279
-rw-r--r--drivers/gpio/gpio-ks8695.c284
-rw-r--r--drivers/gpio/gpio-lpc32xx.c118
-rw-r--r--drivers/gpio/gpio-lynxpoint.c35
-rw-r--r--drivers/gpio/gpio-madera.c12
-rw-r--r--drivers/gpio/gpio-max77620.c4
-rw-r--r--drivers/gpio/gpio-max77650.c1
-rw-r--r--drivers/gpio/gpio-mb86s7x.c1
-rw-r--r--drivers/gpio/gpio-merrifield.c28
-rw-r--r--drivers/gpio/gpio-mockup.c1
-rw-r--r--drivers/gpio/gpio-moxtet.c179
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c32
-rw-r--r--drivers/gpio/gpio-mt7621.c33
-rw-r--r--drivers/gpio/gpio-mxc.c9
-rw-r--r--drivers/gpio/gpio-pca953x.c85
-rw-r--r--drivers/gpio/gpio-pch.c6
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c4
-rw-r--r--drivers/gpio/gpio-sprd.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c36
-rw-r--r--drivers/gpio/gpio-tb10x.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-thunderx.c163
-rw-r--r--drivers/gpio/gpio-tqmx86.c50
-rw-r--r--drivers/gpio/gpio-vf610.c26
-rw-r--r--drivers/gpio/gpio-viperboard.c20
-rw-r--r--drivers/gpio/gpio-xgene-sb.c1
-rw-r--r--drivers/gpio/gpio-xlp.c29
-rw-r--r--drivers/gpio/gpio-zx.c35
-rw-r--r--drivers/gpio/gpio-zynq.c41
-rw-r--r--drivers/gpio/gpiolib-acpi.c69
-rw-r--r--drivers/gpio/gpiolib-acpi.h96
-rw-r--r--drivers/gpio/gpiolib-devres.c28
-rw-r--r--drivers/gpio/gpiolib-of.c185
-rw-r--r--drivers/gpio/gpiolib-of.h38
-rw-r--r--drivers/gpio/gpiolib.c743
-rw-r--r--drivers/gpio/gpiolib.h120
-rw-r--r--drivers/gpio/sgpio-aspeed.c533
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c125
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c211
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c65
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/drm_client.c60
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c7
-rw-r--r--drivers/gpu/drm/drm_modes.c60
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c43
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c59
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h76
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h15
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c24
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c3
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h7
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c130
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h35
-rw-r--r--drivers/greybus/Kconfig32
-rw-r--r--drivers/greybus/Makefile26
-rw-r--r--drivers/greybus/arpc.h63
-rw-r--r--drivers/greybus/bundle.c (renamed from drivers/staging/greybus/bundle.c)2
-rw-r--r--drivers/greybus/connection.c (renamed from drivers/staging/greybus/connection.c)2
-rw-r--r--drivers/greybus/control.c (renamed from drivers/staging/greybus/control.c)2
-rw-r--r--drivers/greybus/core.c (renamed from drivers/staging/greybus/core.c)2
-rw-r--r--drivers/greybus/debugfs.c (renamed from drivers/staging/greybus/debugfs.c)3
-rw-r--r--drivers/greybus/es2.c (renamed from drivers/staging/greybus/es2.c)3
-rw-r--r--drivers/greybus/greybus_trace.h (renamed from drivers/staging/greybus/greybus_trace.h)2
-rw-r--r--drivers/greybus/hd.c (renamed from drivers/staging/greybus/hd.c)12
-rw-r--r--drivers/greybus/interface.c (renamed from drivers/staging/greybus/interface.c)2
-rw-r--r--drivers/greybus/manifest.c (renamed from drivers/staging/greybus/manifest.c)41
-rw-r--r--drivers/greybus/module.c (renamed from drivers/staging/greybus/module.c)2
-rw-r--r--drivers/greybus/operation.c (renamed from drivers/staging/greybus/operation.c)2
-rw-r--r--drivers/greybus/svc.c (renamed from drivers/staging/greybus/svc.c)3
-rw-r--r--drivers/greybus/svc_watchdog.c (renamed from drivers/staging/greybus/svc_watchdog.c)2
-rw-r--r--drivers/hid/hid-a4tech.c30
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c14
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-sony.c15
-rw-r--r--drivers/hid/hid-tmff.c12
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c12
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hv/Kconfig3
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hv_trace.h2
-rw-r--r--drivers/hv/hyperv_vmbus.h10
-rw-r--r--drivers/hwmon/Kconfig31
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/ads1015.c324
-rw-r--r--drivers/hwmon/adt7475.c146
-rw-r--r--drivers/hwmon/as370-hwmon.c145
-rw-r--r--drivers/hwmon/asb100.c12
-rw-r--r--drivers/hwmon/atxp1.c2
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/iio_hwmon.c18
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/k8temp.c233
-rw-r--r--drivers/hwmon/lm75.c462
-rw-r--r--drivers/hwmon/ltc2990.c10
-rw-r--r--drivers/hwmon/nct6775.c183
-rw-r--r--drivers/hwmon/nct7802.c6
-rw-r--r--drivers/hwmon/nct7904.c476
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c4
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c110
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c228
-rw-r--r--drivers/hwmon/pmbus/max31785.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c1
-rw-r--r--drivers/hwmon/pwm-fan.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c2
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwmon/shtc1.c57
-rw-r--r--drivers/hwmon/smm665.c8
-rw-r--r--drivers/hwmon/w83781d.c6
-rw-r--r--drivers/hwmon/w83791d.c32
-rw-r--r--drivers/hwmon/w83792d.c32
-rw-r--r--drivers/hwmon/w83793.c30
-rw-r--r--drivers/hwtracing/coresight/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h11
-rw-r--r--drivers/hwtracing/coresight/coresight.c4
-rw-r--r--drivers/hwtracing/intel_th/Makefile3
-rw-r--r--drivers/hwtracing/intel_th/core.c10
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c116
-rw-r--r--drivers/hwtracing/intel_th/msu.c537
-rw-r--r--drivers/hwtracing/intel_th/msu.h22
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/intel_th/pti.h2
-rw-r--r--drivers/hwtracing/stm/core.c10
-rw-r--r--drivers/i2c/Kconfig4
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c8
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c9
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c1
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-i801.c17
-rw-r--r--drivers/i2c/busses/i2c-imx.c18
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/busses/i2c-rcar.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32.h2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c9
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-of.c7
-rw-r--r--drivers/i3c/device.c53
-rw-r--r--drivers/i3c/master.c67
-rw-r--r--drivers/i3c/master/dw-i3c-master.c4
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c30
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/sgiioc4.c630
-rw-r--r--drivers/idle/intel_idle.c28
-rw-r--r--drivers/iio/accel/Kconfig4
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c353
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/accel/mxc4005.c40
-rw-r--r--drivers/iio/accel/sca3000.c2
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_buffer.c44
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/accel/st_accel_i2c.c23
-rw-r--r--drivers/iio/accel/st_accel_spi.c20
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7606.c100
-rw-r--r--drivers/iio/adc/ad7606.h61
-rw-r--r--drivers/iio/adc/ad7606_par.c4
-rw-r--r--drivers/iio/adc/ad7606_spi.c282
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/axp288_adc.c4
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c7
-rw-r--r--drivers/iio/adc/da9150-gpadc.c4
-rw-r--r--drivers/iio/adc/envelope-detector.c5
-rw-r--r--drivers/iio/adc/exynos_adc.c4
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c1
-rw-r--r--drivers/iio/adc/hi8435.c34
-rw-r--r--drivers/iio/adc/imx7d_adc.c4
-rw-r--r--drivers/iio/adc/ingenic-adc.c54
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/iio/adc/max1027.c38
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/npcm_adc.c1
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/sc27xx_adc.c122
-rw-r--r--drivers/iio/adc/spear_adc.c1
-rw-r--r--drivers/iio/adc/stm32-adc-core.c194
-rw-r--r--drivers/iio/adc/stm32-adc.c4
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c5
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c4
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c4
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c54
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c153
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c69
-rw-r--r--drivers/iio/common/st_sensors/Kconfig2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c10
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c118
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c82
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c148
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c31
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c4
-rw-r--r--drivers/iio/frequency/adf4371.c8
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c48
-rw-r--r--drivers/iio/gyro/st_gyro_core.c32
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c22
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c20
-rw-r--r--drivers/iio/humidity/am2315.c24
-rw-r--r--drivers/iio/humidity/hdc100x.c19
-rw-r--r--drivers/iio/imu/Kconfig12
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c12
-rw-r--r--drivers/iio/imu/adis16460.c489
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c46
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig11
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Makefile1
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h29
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c36
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c622
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c15
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c57
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c15
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/cm3323.c33
-rw-r--r--drivers/iio/light/cm36651.c12
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c51
-rw-r--r--drivers/iio/light/noa1305.c313
-rw-r--r--drivers/iio/light/si1145.c42
-rw-r--r--drivers/iio/light/stk3310.c9
-rw-r--r--drivers/iio/light/tsl2772.c124
-rw-r--r--drivers/iio/light/veml6070.c6
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c31
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c32
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c22
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c20
-rw-r--r--drivers/iio/potentiometer/Kconfig11
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/max5432.c135
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c19
-rw-r--r--drivers/iio/pressure/hp03.c6
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c39
-rw-r--r--drivers/iio/pressure/st_pressure_core.c32
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c31
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c22
-rw-r--r--drivers/iio/proximity/Kconfig2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c25
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c84
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/core_priv.h5
-rw-r--r--drivers/infiniband/core/counters.c27
-rw-r--r--drivers/infiniband/core/device.c102
-rw-r--r--drivers/infiniband/core/mad.c20
-rw-r--r--drivers/infiniband/core/nldev.c11
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c119
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/Kconfig6
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c51
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c48
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c37
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h10
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c116
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c22
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c56
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c5
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/applespi.c29
-rw-r--r--drivers/input/mouse/elantech.c55
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/vmmouse.c6
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/input/tablet/kbtab.c6
-rw-r--r--drivers/input/touchscreen/sur40.c4
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c2
-rw-r--r--drivers/interconnect/core.c27
-rw-r--r--drivers/interconnect/qcom/Kconfig12
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/qcs404.c539
-rw-r--r--drivers/interconnect/qcom/sdm845.c160
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c77
-rw-r--r--drivers/interconnect/qcom/smd-rpm.h15
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c146
-rw-r--r--drivers/iommu/amd_iommu.h14
-rw-r--r--drivers/iommu/amd_iommu_init.c95
-rw-r--r--drivers/iommu/amd_iommu_quirks.c92
-rw-r--r--drivers/iommu/amd_iommu_types.h18
-rw-r--r--drivers/iommu/arm-smmu-impl.c174
-rw-r--r--drivers/iommu/arm-smmu-regs.h210
-rw-r--r--drivers/iommu/arm-smmu-v3.c993
-rw-r--r--drivers/iommu/arm-smmu.c671
-rw-r--r--drivers/iommu/arm-smmu.h402
-rw-r--r--drivers/iommu/dma-iommu.c41
-rw-r--r--drivers/iommu/dmar.c77
-rw-r--r--drivers/iommu/exynos-iommu.c9
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c6
-rw-r--r--drivers/iommu/intel-iommu.c504
-rw-r--r--drivers/iommu/intel-svm.c36
-rw-r--r--drivers/iommu/intel-trace.c14
-rw-r--r--drivers/iommu/intel_irq_remapping.c6
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c145
-rw-r--r--drivers/iommu/io-pgtable-arm.c48
-rw-r--r--drivers/iommu/iommu.c217
-rw-r--r--drivers/iommu/iova.c27
-rw-r--r--drivers/iommu/ipmmu-vmsa.c106
-rw-r--r--drivers/iommu/msm_iommu.c43
-rw-r--r--drivers/iommu/mtk_iommu.c213
-rw-r--r--drivers/iommu/mtk_iommu.h21
-rw-r--r--drivers/iommu/mtk_iommu_v1.c9
-rw-r--r--drivers/iommu/omap-iommu.c324
-rw-r--r--drivers/iommu/omap-iommu.h9
-rw-r--r--drivers/iommu/qcom_iommu.c72
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/s390-iommu.c3
-rw-r--r--drivers/iommu/tegra-gart.c12
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c45
-rw-r--r--drivers/irqchip/irq-gic-common.c35
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c15
-rw-r--r--drivers/irqchip/irq-gic-v3.c388
-rw-r--r--drivers/irqchip/irq-gic.c14
-rw-r--r--drivers/irqchip/irq-hip04.c9
-rw-r--r--drivers/irqchip/irq-imgpdc.c8
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-ixp4xx.c2
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-mbigen.c9
-rw-r--r--drivers/irqchip/irq-meson-gpio.c52
-rw-r--r--drivers/irqchip/irq-mmp.c86
-rw-r--r--drivers/irqchip/irq-sifive-plic.c12
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c4
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c4
-rw-r--r--drivers/isdn/capi/capi.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c13
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/led-class-flash.c9
-rw-r--r--drivers/leds/led-class.c62
-rw-r--r--drivers/leds/led-core.c136
-rw-r--r--drivers/leds/led-triggers.c5
-rw-r--r--drivers/leds/leds-aat1290.c16
-rw-r--r--drivers/leds/leds-an30259a.c29
-rw-r--r--drivers/leds/leds-apu.c157
-rw-r--r--drivers/leds/leds-as3645a.c74
-rw-r--r--drivers/leds/leds-cr0014114.c33
-rw-r--r--drivers/leds/leds-gpio.c26
-rw-r--r--drivers/leds/leds-is31fl319x.c3
-rw-r--r--drivers/leds/leds-is31fl32xx.c8
-rw-r--r--drivers/leds/leds-ktd2692.c4
-rw-r--r--drivers/leds/leds-lm3532.c138
-rw-r--r--drivers/leds/leds-lm3601x.c38
-rw-r--r--drivers/leds/leds-lm36274.c4
-rw-r--r--drivers/leds/leds-lm3692x.c22
-rw-r--r--drivers/leds/leds-lm3697.c5
-rw-r--r--drivers/leds/leds-lp5562.c6
-rw-r--r--drivers/leds/leds-lp8860.c35
-rw-r--r--drivers/leds/leds-lt3593.c20
-rw-r--r--drivers/leds/leds-max77650.c44
-rw-r--r--drivers/leds/leds-netxbig.c76
-rw-r--r--drivers/leds/leds-ns2.c21
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pwm.c11
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c22
-rw-r--r--drivers/leds/leds-syscon.c2
-rw-r--r--drivers/leds/leds-ti-lmu-common.c15
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c12
-rw-r--r--drivers/lightnvm/core.c97
-rw-r--r--drivers/lightnvm/pblk-core.c116
-rw-r--r--drivers/lightnvm/pblk-gc.c19
-rw-r--r--drivers/lightnvm/pblk-init.c38
-rw-r--r--drivers/lightnvm/pblk-read.c26
-rw-r--r--drivers/lightnvm/pblk-recovery.c42
-rw-r--r--drivers/lightnvm/pblk-write.c20
-rw-r--r--drivers/lightnvm/pblk.h31
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c77
-rw-r--r--drivers/md/bcache/closure.c10
-rw-r--r--drivers/md/bcache/debug.c5
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c21
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c3
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/md-linear.c5
-rw-r--r--drivers/md/md.c96
-rw-r--r--drivers/md/md.h20
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/md/raid0.c41
-rw-r--r--drivers/md/raid0.h14
-rw-r--r--drivers/md/raid1.c89
-rw-r--r--drivers/md/raid10.c32
-rw-r--r--drivers/md/raid5.c27
-rw-r--r--drivers/md/raid5.h5
-rw-r--r--drivers/media/Kconfig7
-rw-r--r--drivers/media/cec/cec-adap.c3
-rw-r--r--drivers/media/cec/cec-notifier.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c16
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c107
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c23
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c8
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb-core/dvbdev.c7
-rw-r--r--drivers/media/dvb-frontends/Kconfig5
-rw-r--r--drivers/media/dvb-frontends/cx24117.c4
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c9
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c5
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c40
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c6
-rw-r--r--drivers/media/dvb-frontends/mn88472.c18
-rw-r--r--drivers/media/dvb-frontends/mn88473.c18
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c3
-rw-r--r--drivers/media/dvb-frontends/si2168.c164
-rw-r--r--drivers/media/dvb-frontends/si2168.h1
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c3
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/i2c/Kconfig18
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/ad9389b.c6
-rw-r--r--drivers/media/i2c/adv7180.c12
-rw-r--r--drivers/media/i2c/adv7343.c5
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c13
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c28
-rw-r--r--drivers/media/i2c/adv7842.c42
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c5
-rw-r--r--drivers/media/i2c/imx274.c5
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c14
-rw-r--r--drivers/media/i2c/max2175.c5
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c7
-rw-r--r--drivers/media/i2c/ov2640.c7
-rw-r--r--drivers/media/i2c/ov2659.c9
-rw-r--r--drivers/media/i2c/ov2680.c9
-rw-r--r--drivers/media/i2c/ov5640.c14
-rw-r--r--drivers/media/i2c/ov5645.c151
-rw-r--r--drivers/media/i2c/ov5647.c5
-rw-r--r--drivers/media/i2c/ov5675.c1183
-rw-r--r--drivers/media/i2c/ov5695.c5
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov772x.c5
-rw-r--r--drivers/media/i2c/ov7740.c13
-rw-r--r--drivers/media/i2c/ov8856.c5
-rw-r--r--drivers/media/i2c/ov9650.c10
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c5
-rw-r--r--drivers/media/i2c/s5k5baf.c5
-rw-r--r--drivers/media/i2c/s5k6a3.c5
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c5
-rw-r--r--drivers/media/i2c/tc358743.c5
-rw-r--r--drivers/media/i2c/tda1997x.c9
-rw-r--r--drivers/media/i2c/ths8200.c5
-rw-r--r--drivers/media/i2c/tvp5150.c9
-rw-r--r--drivers/media/i2c/tvp7002.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c19
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c8
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h1
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c40
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h63
-rw-r--r--drivers/media/pci/cobalt/cobalt-flash.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c6
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c7
-rw-r--r--drivers/media/pci/cx25821/cx25821.h1
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c17
-rw-r--r--drivers/media/pci/cx88/cx88.h1
-rw-r--r--drivers/media/pci/dt3155/dt3155.c1
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c100
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h28
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/mantis/mantis_reg.h152
-rw-r--r--drivers/media/pci/meye/meye.c3
-rw-r--r--drivers/media/pci/ngene/ngene-core.c4
-rw-r--r--drivers/media/pci/pt1/pt1.c6
-rw-r--r--drivers/media/pci/pt3/pt3.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c14
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c28
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c3
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-regs.h286
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c9
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c4
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c1
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c14
-rw-r--r--drivers/media/pci/tw68/tw68-video.c14
-rw-r--r--drivers/media/pci/tw68/tw68.h1
-rw-r--r--drivers/media/platform/Kconfig6
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c30
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h6
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h26
-rw-r--r--drivers/media/platform/aspeed-video.c6
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c2
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c7
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c155
-rw-r--r--drivers/media/platform/coda/coda-common.c4
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c69
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc_regs.h20
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c21
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c5
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c5
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c54
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c33
-rw-r--r--drivers/media/platform/davinci/vpif_display.c7
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c22
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c20
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite-reg.h80
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.h138
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c10
-rw-r--r--drivers/media/platform/fsl-viu.c11
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/m2m-deinterlace.c140
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c15
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c1
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c84
-rw-r--r--drivers/media/platform/meson/ao-cec.c44
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c16
-rw-r--r--drivers/media/platform/mx2_emmaprp.c141
-rw-r--r--drivers/media/platform/omap/Kconfig3
-rw-r--r--drivers/media/platform/omap/omap_vout.c1045
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c29
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.h4
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h45
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c6
-rw-r--r--drivers/media/platform/omap3isp/isp.c340
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c1
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c1
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c1
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h584
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c1
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c15
-rw-r--r--drivers/media/platform/pxa_camera.c7
-rw-r--r--drivers/media/platform/qcom/camss/camss.c2
-rw-r--r--drivers/media/platform/qcom/venus/core.c2
-rw-r--r--drivers/media/platform/qcom/venus/core.h33
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c199
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h11
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c11
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c548
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c7
-rw-r--r--drivers/media/platform/qcom/venus/venc.c13
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c55
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c56
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c68
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h9
-rw-r--r--drivers/media/platform/rcar_fdp1.c3
-rw-r--r--drivers/media/platform/renesas-ceu.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c1
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c16
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c10
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h1
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h118
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c23
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c13
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c31
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h10
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c19
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c2
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c55
-rw-r--r--drivers/media/platform/sh_veu.c28
-rw-r--r--drivers/media/platform/sh_vou.c17
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c6
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c8
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c32
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c8
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c323
-rw-r--r--drivers/media/platform/sunxi/Kconfig2
-rw-r--r--drivers/media/platform/sunxi/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Kconfig11
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/Makefile5
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c314
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h160
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c454
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c385
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c7
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c35
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.h82
-rw-r--r--drivers/media/platform/ti-vpe/cal.c6
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c10
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c12
-rw-r--r--drivers/media/platform/ti-vpe/vpe_regs.h94
-rw-r--r--drivers/media/platform/via-camera.c579
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c30
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c14
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c76
-rw-r--r--drivers/media/platform/vimc/vimc-common.c309
-rw-r--r--drivers/media/platform/vimc/vimc-common.h58
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c88
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c68
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c57
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c40
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h14
-rw-r--r--drivers/media/platform/vivid/vivid-core.c12
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c26
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c9
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c132
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h224
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c10
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c16
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h31
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-trust.c1
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c5
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c5
-rw-r--r--drivers/media/radio/si4713/si4713.c5
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h88
-rw-r--r--drivers/media/rc/iguanair.c15
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c4
-rw-r--r--drivers/media/rc/imon.c7
-rw-r--r--drivers/media/rc/imon_raw.c43
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile7
-rw-r--r--drivers/media/rc/keymaps/rc-imon-rsc.c7
-rw-r--r--drivers/media/rc/keymaps/rc-khadas.c54
-rw-r--r--drivers/media/rc/keymaps/rc-odroid.c54
-rw-r--r--drivers/media/rc/keymaps/rc-tanix-tx3mini.c77
-rw-r--r--drivers/media/rc/keymaps/rc-tanix-tx5max.c68
-rw-r--r--drivers/media/rc/keymaps/rc-wetek-hub.c53
-rw-r--r--drivers/media/rc/keymaps/rc-wetek-play2.c93
-rw-r--r--drivers/media/rc/keymaps/rc-x96max.c83
-rw-r--r--drivers/media/rc/mceusb.c401
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c14
-rw-r--r--drivers/media/rc/sunxi-cir.c89
-rw-r--r--drivers/media/spi/Kconfig5
-rw-r--r--drivers/media/tuners/Kconfig6
-rw-r--r--drivers/media/tuners/tuner-xc2028.c3
-rw-r--r--drivers/media/tuners/xc4000.c3
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/au0828/au0828-video.c6
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c25
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c26
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c161
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c8
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c8
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c22
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c11
-rw-r--r--drivers/media/usb/em28xx/em28xx.h4
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c11
-rw-r--r--drivers/media/usb/go7007/s2250-board.c6
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c37
-rw-r--r--drivers/media/usb/gspca/konica.c5
-rw-r--r--drivers/media/usb/gspca/nw80x.c5
-rw-r--r--drivers/media/usb/gspca/ov519.c10
-rw-r--r--drivers/media/usb/gspca/ov534.c5
-rw-r--r--drivers/media/usb/gspca/ov534_9.c1
-rw-r--r--drivers/media/usb/gspca/se401.c5
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c12
-rw-r--r--drivers/media/usb/gspca/sonixb.c5
-rw-r--r--drivers/media/usb/gspca/sonixj.c5
-rw-r--r--drivers/media/usb/gspca/spca1528.c5
-rw-r--r--drivers/media/usb/gspca/sq930x.c5
-rw-r--r--drivers/media/usb/gspca/sunplus.c5
-rw-r--r--drivers/media/usb/gspca/vc032x.c5
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c5
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c13
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c3
-rw-r--r--drivers/media/usb/msi2500/msi2500.c8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c19
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c5
-rw-r--r--drivers/media/usb/s2255/s2255drv.c10
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160.h1
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c3
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000.h1
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c28
-rw-r--r--drivers/media/usb/usbvision/usbvision.h1
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c11
-rw-r--r--drivers/media/v4l2-core/Kconfig5
-rw-r--r--drivers/media/v4l2-core/Makefile15
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c28
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c218
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c305
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c39
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c31
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c184
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c74
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c47
-rw-r--r--drivers/media/v4l2-core/v4l2-spi.c78
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c8
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c5
-rw-r--r--drivers/memory/mtk-smi.c268
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/host/r592.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/aat2870-core.c13
-rw-r--r--drivers/mfd/ab3100-core.c45
-rw-r--r--drivers/mfd/ab3100-otp.c21
-rw-r--r--drivers/mfd/ab8500-debugfs.c324
-rw-r--r--drivers/mfd/altera-sysmgr.c14
-rw-r--r--drivers/mfd/db8500-prcmu.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/misc/Kconfig27
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/cardreader/alcor_pci.c6
-rw-r--r--drivers/misc/eeprom/Kconfig8
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/eeprom/ee1004.c6
-rw-r--r--drivers/misc/eeprom/max6875.c6
-rw-r--r--drivers/misc/fastrpc.c79
-rw-r--r--drivers/misc/habanalabs/asid.c2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c3
-rw-r--r--drivers/misc/habanalabs/command_submission.c29
-rw-r--r--drivers/misc/habanalabs/context.c40
-rw-r--r--drivers/misc/habanalabs/debugfs.c16
-rw-r--r--drivers/misc/habanalabs/device.c493
-rw-r--r--drivers/misc/habanalabs/firmware_if.c22
-rw-r--r--drivers/misc/habanalabs/goya/goya.c178
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h21
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c89
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c109
-rw-r--r--drivers/misc/habanalabs/habanalabs.h150
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c180
-rw-r--r--drivers/misc/habanalabs/hw_queue.c32
-rw-r--r--drivers/misc/habanalabs/hwmon.c24
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h85
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h13
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h34
-rw-r--r--drivers/misc/habanalabs/irq.c31
-rw-r--r--drivers/misc/habanalabs/memory.c2
-rw-r--r--drivers/misc/habanalabs/sysfs.c126
-rw-r--r--drivers/misc/ioc4.c498
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/lkdtm/bugs.c11
-rw-r--r--drivers/misc/lkdtm/cfi.c42
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/misc/lkdtm/lkdtm.h4
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/main.c9
-rw-r--r--drivers/misc/mei/pci-me.c24
-rw-r--r--drivers/misc/mei/pci-txe.c19
-rw-r--r--drivers/misc/mic/card/mic_x100.c28
-rw-r--r--drivers/misc/mic/scif/scif_epd.h5
-rw-r--r--drivers/misc/sgi-xp/Makefile13
-rw-r--r--drivers/misc/sgi-xp/xp.h19
-rw-r--r--drivers/misc/sgi-xp/xp_main.c8
-rw-r--r--drivers/misc/sgi-xp/xp_nofault.S35
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c190
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c9
-rw-r--r--drivers/misc/sgi-xp/xpc.h273
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c31
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c5
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2459
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c20
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c797
-rw-r--r--drivers/misc/vmw_balloon.c10
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c6
-rw-r--r--drivers/misc/xilinx_sdfec.c1214
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c4
-rw-r--r--drivers/mmc/core/sdio_irq.c57
-rw-r--r--drivers/mmc/host/Kconfig12
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/bcm2835.c5
-rw-r--r--drivers/mmc/host/cavium.c4
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/jz4740_mmc.c19
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c69
-rw-r--r--drivers/mmc/host/mtk-sd.c29
-rw-r--r--drivers/mmc/host/mxcmmc.c4
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c36
-rw-r--r--drivers/mmc/host/s3cmci.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-cadence.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c36
-rw-r--r--drivers/mmc/host/sdhci-iproc.c9
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c15
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c342
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c17
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c313
-rw-r--r--drivers/mmc/host/sdhci-pci.h1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c7
-rw-r--r--drivers/mmc/host/sdhci-sprd.c33
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mmc/host/sdhci-xenon.c2
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/mmc/host/sdhci.h14
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c5
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c27
-rw-r--r--drivers/mmc/host/uniphier-sd.c7
-rw-r--r--drivers/mtd/hyperbus/Kconfig4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c14
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5
-rw-r--r--drivers/mux/core.c7
-rw-r--r--drivers/net/arcnet/arc-rimi.c3
-rw-r--r--drivers/net/arcnet/com20020-isa.c6
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/at91_can.c6
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c39
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c9
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c52
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c29
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c147
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c7
-rw-r--r--drivers/net/ethernet/8390/Kconfig4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c10
-rw-r--r--drivers/net/ethernet/apple/Kconfig4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c31
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c87
-rw-r--r--drivers/net/ethernet/marvell/sky2.c14
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c155
-rw-r--r--drivers/net/ethernet/micrel/Kconfig11
-rw-r--r--drivers/net/ethernet/micrel/Makefile1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1632
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.h108
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c23
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/ni/Kconfig2
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig29
-rw-r--r--drivers/net/ethernet/nuvoton/Makefile6
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1082
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c143
-rw-r--r--drivers/net/ethernet/nxp/Kconfig2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c45
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/packetengines/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c13
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c19
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c7
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/xscale/Kconfig2
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c10
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/netdevsim/dev.c63
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/netdev.c9
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/mscc.c16
-rw-r--r--drivers/net/phy/phy-c45.c40
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c18
-rw-r--r--drivers/net/phy/phy_led_triggers.c3
-rw-r--r--drivers/net/phy/phylink.c16
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/ppp/pppox.c13
-rw-r--r--drivers/net/ppp/pptp.c3
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c22
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vrf.c58
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/sdla.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c539
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c46
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nfc/nfcmrvl/main.c4
-rw-r--r--drivers/nfc/nfcmrvl/uart.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c1
-rw-r--r--drivers/nfc/st-nci/se.c2
-rw-r--r--drivers/nfc/st21nfca/se.c2
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/ntb/msi.c5
-rw-r--r--drivers/nvdimm/btt_devs.c16
-rw-r--r--drivers/nvdimm/bus.c210
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c36
-rw-r--r--drivers/nvdimm/nd-core.h71
-rw-r--r--drivers/nvdimm/pfn_devs.c29
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c12
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/core.c242
-rw-r--r--drivers/nvme/host/fabrics.c38
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c73
-rw-r--r--drivers/nvme/host/lightnvm.c45
-rw-r--r--drivers/nvme/host/multipath.c87
-rw-r--r--drivers/nvme/host/nvme.h68
-rw-r--r--drivers/nvme/host/pci.c125
-rw-r--r--drivers/nvme/host/rdma.c77
-rw-r--r--drivers/nvme/host/tcp.c144
-rw-r--r--drivers/nvme/host/trace.c18
-rw-r--r--drivers/nvme/target/admin-cmd.c22
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/nvme/target/loop.c38
-rw-r--r--drivers/nvme/target/nvmet.h3
-rw-r--r--drivers/nvme/target/tcp.c24
-rw-r--r--drivers/nvme/target/trace.c18
-rw-r--r--drivers/nvmem/core.c7
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c7
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/meson-mx-efuse.c3
-rw-r--r--drivers/nvmem/mxs-ocotp.c2
-rw-r--r--drivers/nvmem/nvmem-sysfs.c15
-rw-r--r--drivers/nvmem/sunxi_sid.c1
-rw-r--r--drivers/of/fdt.c14
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_mdio.c8
-rw-r--r--drivers/of/platform.c7
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/opp/core.c85
-rw-r--r--drivers/opp/of.c30
-rw-r--r--drivers/parisc/dino.c30
-rw-r--r--drivers/parisc/eisa_enumerator.c10
-rw-r--r--drivers/parisc/hppb.c11
-rw-r--r--drivers/parport/Makefile2
-rw-r--r--drivers/parport/parport_serial.c50
-rw-r--r--drivers/pci/controller/pci-hyperv.c10
-rw-r--r--drivers/pci/hotplug/Kconfig9
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c700
-rw-r--r--drivers/pci/pci.c29
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/portdrv_core.c66
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c4
-rw-r--r--drivers/pcmcia/i82092.c6
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c65
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c75
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c4
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c4
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/lantiq/Kconfig11
-rw-r--r--drivers/phy/lantiq/Makefile1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c494
-rw-r--r--drivers/phy/marvell/Kconfig1
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c17
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c525
-rw-r--r--drivers/phy/phy-core.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c44
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5250-sata.c1
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c33
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c120
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c9
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c42
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c18
-rw-r--r--drivers/pinctrl/qcom/Kconfig1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c92
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c5
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c6
-rw-r--r--drivers/platform/x86/acer-wmi.c49
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c534
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/hp-wmi.c47
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c4
-rw-r--r--drivers/platform/x86/intel-hid.c36
-rw-r--r--drivers/platform/x86/intel-vbtn.c20
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c5
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c57
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c26
-rw-r--r--drivers/platform/x86/intel_pmc_core.c21
-rw-r--r--drivers/platform/x86/intel_pmc_core_pltdrv.c20
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c4
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c8
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c19
-rw-r--r--drivers/platform/x86/pmc_atom.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c122
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c58
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/powercap/idle_inject.c53
-rw-r--r--drivers/powercap/intel_rapl_common.c34
-rw-r--r--drivers/powercap/powercap_sys.c2
-rw-r--r--drivers/pwm/core.c7
-rw-r--r--drivers/ras/Makefile3
-rw-r--r--drivers/ras/cec.c1
-rw-r--r--drivers/ras/debugfs.c2
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/act8865-regulator.c316
-rw-r--r--drivers/regulator/act8945a-regulator.c8
-rw-r--r--drivers/regulator/axp20x-regulator.c10
-rw-r--r--drivers/regulator/core.c58
-rw-r--r--drivers/regulator/da9062-regulator.c4
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/fixed.c83
-rw-r--r--drivers/regulator/helpers.c21
-rw-r--r--drivers/regulator/lm363x-regulator.c10
-rw-r--r--drivers/regulator/lp87565-regulator.c13
-rw-r--r--drivers/regulator/lp8788-ldo.c2
-rw-r--r--drivers/regulator/max77686-regulator.c2
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/mt6358-regulator.c549
-rw-r--r--drivers/regulator/of_regulator.c11
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c193
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/slg51000-regulator.c15
-rw-r--r--drivers/regulator/stm32-booster.c4
-rw-r--r--drivers/regulator/sy8824x.c232
-rw-r--r--drivers/regulator/tps65132-regulator.c4
-rw-r--r--drivers/regulator/twl-regulator.c23
-rw-r--r--drivers/regulator/twl6030-regulator.c21
-rw-r--r--drivers/regulator/uniphier-regulator.c4
-rw-r--r--drivers/reset/Kconfig13
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-imx7.c12
-rw-r--r--drivers/reset/reset-meson.c51
-rw-r--r--drivers/reset/reset-scmi.c124
-rw-r--r--drivers/reset/reset-simple.c3
-rw-r--r--drivers/rtc/interface.c11
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/s390/Makefile3
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/tape_core.c3
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c18
-rw-r--r--drivers/s390/cio/device.c15
-rw-r--r--drivers/s390/cio/qdio_main.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c60
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c51
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c10
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h17
-rw-r--r--drivers/s390/crypto/Makefile2
-rw-r--r--drivers/s390/crypto/ap_queue.c1
-rw-r--r--drivers/s390/crypto/pkey_api.c1638
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c52
-rw-r--r--drivers/s390/crypto/zcrypt_api.h7
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c1765
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h217
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c106
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c17
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c140
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/qla1280.c37
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/scsi_lib.c19
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_proc.c9
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/sd_zbc.c12
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/slimbus/slimbus.h2
-rw-r--r--drivers/sn/Kconfig22
-rw-r--r--drivers/sn/Makefile7
-rw-r--r--drivers/sn/ioc3.c844
-rw-r--r--drivers/soc/amlogic/Kconfig11
-rw-r--r--drivers/soc/amlogic/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c148
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c492
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c7
-rw-r--r--drivers/soc/fsl/dpaa2-console.c2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/guts.c6
-rw-r--r--drivers/soc/fsl/qbman/bman.c17
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c36
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c22
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h5
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c63
-rw-r--r--drivers/soc/fsl/qbman/qman.c83
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c68
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c22
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h8
-rw-r--r--drivers/soc/fsl/qe/qe.c165
-rw-r--r--drivers/soc/imx/gpcv2.c2
-rw-r--r--drivers/soc/imx/soc-imx-scu.c39
-rw-r--r--drivers/soc/imx/soc-imx8.c45
-rw-r--r--drivers/soc/ixp4xx/Kconfig4
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c10
-rw-r--r--drivers/soc/qcom/Kconfig8
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c6
-rw-r--r--drivers/soc/qcom/qcom_aoss.c133
-rw-r--r--drivers/soc/qcom/smem.c11
-rw-r--r--drivers/soc/qcom/socinfo.c476
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/rcar-sysc.c7
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c31
-rw-r--r--drivers/soc/samsung/Kconfig6
-rw-r--r--drivers/soc/samsung/Makefile2
-rw-r--r--drivers/soc/samsung/exynos-chipid.c105
-rw-r--r--drivers/soc/ti/pm33xx.c19
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c23
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c5
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/spi/Kconfig19
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c1
-rw-r--r--drivers/spi/spi-altera.c4
-rw-r--r--drivers/spi/spi-armada-3700.c5
-rw-r--r--drivers/spi/spi-ath79.c4
-rw-r--r--drivers/spi/spi-atmel.c5
-rw-r--r--drivers/spi/spi-axi-spi-engine.c4
-rw-r--r--drivers/spi/spi-bcm-qspi.c5
-rw-r--r--drivers/spi/spi-bcm2835.c469
-rw-r--r--drivers/spi/spi-bcm2835aux.c5
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-butterfly.c2
-rw-r--r--drivers/spi/spi-cadence.c5
-rw-r--r--drivers/spi/spi-cavium-octeon.c4
-rw-r--r--drivers/spi/spi-clps711x.c4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c18
-rw-r--r--drivers/spi/spi-dw-pci.c20
-rw-r--r--drivers/spi/spi-efm32.c4
-rw-r--r--drivers/spi/spi-ep93xx.c4
-rw-r--r--drivers/spi/spi-fsl-cpm.c6
-rw-r--r--drivers/spi/spi-fsl-dspi.c718
-rw-r--r--drivers/spi/spi-fsl-lib.h3
-rw-r--r--drivers/spi/spi-fsl-qspi.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c193
-rw-r--r--drivers/spi/spi-geni-qcom.c8
-rw-r--r--drivers/spi/spi-gpio.c11
-rw-r--r--drivers/spi/spi-lantiq-ssc.c12
-rw-r--r--drivers/spi/spi-lm70llp.c2
-rw-r--r--drivers/spi/spi-lp8841-rtc.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/spi/spi-mt65xx.c54
-rw-r--r--drivers/spi/spi-mt7621.c4
-rw-r--r--drivers/spi/spi-mxs.c4
-rw-r--r--drivers/spi/spi-npcm-fiu.c769
-rw-r--r--drivers/spi/spi-npcm-pspi.c5
-rw-r--r--drivers/spi/spi-nuc900.c429
-rw-r--r--drivers/spi/spi-nxp-fspi.c4
-rw-r--r--drivers/spi/spi-oc-tiny.c4
-rw-r--r--drivers/spi/spi-pic32-sqi.c5
-rw-r--r--drivers/spi/spi-pic32.c12
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/spi/spi-qcom-qspi.c8
-rw-r--r--drivers/spi/spi-rb4xx.c4
-rw-r--r--drivers/spi/spi-s3c24xx.c5
-rw-r--r--drivers/spi/spi-sh-msiof.c5
-rw-r--r--drivers/spi/spi-sh.c4
-rw-r--r--drivers/spi/spi-sifive.c5
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-slave-mt27xx.c1
-rw-r--r--drivers/spi/spi-sprd-adi.c92
-rw-r--r--drivers/spi/spi-sprd.c4
-rw-r--r--drivers/spi/spi-st-ssc4.c4
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-sun4i.c5
-rw-r--r--drivers/spi/spi-sun6i.c5
-rw-r--r--drivers/spi/spi-synquacer.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c4
-rw-r--r--drivers/spi/spi-ti-qspi.c1
-rw-r--r--drivers/spi/spi-uniphier.c90
-rw-r--r--drivers/spi/spi-xlp.c8
-rw-r--r--drivers/spi/spi-zynq-qspi.c7
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/spi/spi.c81
-rw-r--r--drivers/staging/Kconfig7
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/TODO2
-rw-r--r--drivers/staging/android/ion/ion.c36
-rw-r--r--drivers/staging/android/ion/ion.h10
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c3
-rw-r--r--drivers/staging/android/vsoc.c8
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c7
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c80
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h5
-rw-r--r--drivers/staging/erofs/Documentation/filesystems/erofs.txt209
-rw-r--r--drivers/staging/erofs/Kconfig151
-rw-r--r--drivers/staging/erofs/Makefile13
-rw-r--r--drivers/staging/erofs/TODO46
-rw-r--r--drivers/staging/erofs/compress.h62
-rw-r--r--drivers/staging/erofs/data.c400
-rw-r--r--drivers/staging/erofs/decompressor.c335
-rw-r--r--drivers/staging/erofs/dir.c151
-rw-r--r--drivers/staging/erofs/erofs_fs.h322
-rw-r--r--drivers/staging/erofs/include/linux/tagptr.h110
-rw-r--r--drivers/staging/erofs/include/trace/events/erofs.h256
-rw-r--r--drivers/staging/erofs/inode.c332
-rw-r--r--drivers/staging/erofs/internal.h642
-rw-r--r--drivers/staging/erofs/namei.c256
-rw-r--r--drivers/staging/erofs/super.c701
-rw-r--r--drivers/staging/erofs/unzip_pagevec.h169
-rw-r--r--drivers/staging/erofs/unzip_vle.c1591
-rw-r--r--drivers/staging/erofs/unzip_vle.h196
-rw-r--r--drivers/staging/erofs/utils.c353
-rw-r--r--drivers/staging/erofs/xattr.c704
-rw-r--r--drivers/staging/erofs/xattr.h97
-rw-r--r--drivers/staging/erofs/zmap.c463
-rw-r--r--drivers/staging/exfat/Kconfig49
-rw-r--r--drivers/staging/exfat/Makefile10
-rw-r--r--drivers/staging/exfat/TODO12
-rw-r--r--drivers/staging/exfat/exfat.h971
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c136
-rw-r--r--drivers/staging/exfat/exfat_cache.c724
-rw-r--r--drivers/staging/exfat/exfat_core.c3701
-rw-r--r--drivers/staging/exfat/exfat_nls.c404
-rw-r--r--drivers/staging/exfat/exfat_super.c4049
-rw-r--r--drivers/staging/exfat/exfat_upcase.c740
-rw-r--r--drivers/staging/fbtft/fb_bd663474.c2
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c2
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c4
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c4
-rw-r--r--drivers/staging/fbtft/fb_pcd8544.c4
-rw-r--r--drivers/staging/fbtft/fb_s6d1121.c4
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c4
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c4
-rw-r--r--drivers/staging/fbtft/fb_upd161704.c2
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c47
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt71
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/TODO1
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h15
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.c51
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/dpsw.h56
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c44
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c309
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h4
-rw-r--r--drivers/staging/gasket/apex_driver.c22
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c12
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c4
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c46
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c46
-rw-r--r--drivers/staging/greybus/Kconfig27
-rw-r--r--drivers/staging/greybus/Makefile22
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/arpc.h109
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c3
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h26
-rw-r--r--drivers/staging/greybus/audio_codec.h4
-rw-r--r--drivers/staging/greybus/audio_gb.c4
-rw-r--r--drivers/staging/greybus/audio_manager.c2
-rw-r--r--drivers/staging/greybus/authentication.c3
-rw-r--r--drivers/staging/greybus/bootrom.c2
-rw-r--r--drivers/staging/greybus/bundle.h89
-rw-r--r--drivers/staging/greybus/camera.c2
-rw-r--r--drivers/staging/greybus/connection.h128
-rw-r--r--drivers/staging/greybus/control.h57
-rw-r--r--drivers/staging/greybus/firmware.h4
-rw-r--r--drivers/staging/greybus/fw-core.c2
-rw-r--r--drivers/staging/greybus/fw-download.c2
-rw-r--r--drivers/staging/greybus/fw-management.c2
-rw-r--r--drivers/staging/greybus/gb-camera.h2
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/greybus/gbphy.h2
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/staging/greybus/greybus.h152
-rw-r--r--drivers/staging/greybus/greybus_authentication.h48
-rw-r--r--drivers/staging/greybus/greybus_firmware.h48
-rw-r--r--drivers/staging/greybus/greybus_id.h27
-rw-r--r--drivers/staging/greybus/greybus_manifest.h178
-rw-r--r--drivers/staging/greybus/greybus_protocols.h2222
-rw-r--r--drivers/staging/greybus/hd.h82
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/i2c.c24
-rw-r--r--drivers/staging/greybus/interface.h82
-rw-r--r--drivers/staging/greybus/light.c16
-rw-r--r--drivers/staging/greybus/log.c9
-rw-r--r--drivers/staging/greybus/loopback.c9
-rw-r--r--drivers/staging/greybus/manifest.h15
-rw-r--r--drivers/staging/greybus/module.h33
-rw-r--r--drivers/staging/greybus/operation.h224
-rw-r--r--drivers/staging/greybus/power_supply.c3
-rw-r--r--drivers/staging/greybus/pwm.c2
-rw-r--r--drivers/staging/greybus/raw.c3
-rw-r--r--drivers/staging/greybus/sdio.c2
-rw-r--r--drivers/staging/greybus/spi.c2
-rw-r--r--drivers/staging/greybus/spilib.c2
-rw-r--r--drivers/staging/greybus/spilib.h2
-rw-r--r--drivers/staging/greybus/svc.h101
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c3
-rw-r--r--drivers/staging/iio/accel/adis16240.c5
-rw-r--r--drivers/staging/iio/adc/ad7192.c175
-rw-r--r--drivers/staging/iio/adc/ad7192.h37
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c12
-rw-r--r--drivers/staging/isdn/hysdn/Kconfig2
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_net.c2
-rw-r--r--drivers/staging/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c18
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c18
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c4
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c3
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c8
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/allegro-dvt/allegro-core.c4
-rw-r--r--drivers/staging/media/bcm2048/Kconfig14
-rw-r--r--drivers/staging/media/bcm2048/Makefile2
-rw-r--r--drivers/staging/media/bcm2048/TODO24
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2689
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.h26
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig13
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile11
-rw-r--r--drivers/staging/media/davinci_vpfe/TODO38
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt154
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h1287
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c1852
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h174
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c1038
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h556
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c1070
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h228
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h90
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c2097
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.h200
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif_regs.h291
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c1995
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.h241
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe.h83
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c716
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h90
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c1646
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h150
-rw-r--r--drivers/staging/media/hantro/Kconfig2
-rw-r--r--drivers/staging/media/hantro/Makefile7
-rw-r--r--drivers/staging/media/hantro/hantro.h51
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c110
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c292
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c14
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c503
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c646
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h80
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c11
-rw-r--r--drivers/staging/media/hantro/hantro_vp8.c201
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c41
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c39
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c12
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c14
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c595
-rw-r--r--drivers/staging/media/imx/Kconfig5
-rw-r--r--drivers/staging/media/imx/Makefile3
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c925
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c28
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c4
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c2
-rw-r--r--drivers/staging/media/imx/imx-media.h12
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c12
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c34
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c4
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.h4
-rw-r--r--drivers/staging/media/ipu3/ipu3.c3
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c4
-rw-r--r--drivers/staging/media/omap4iss/iss.c1
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c40
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h2
-rw-r--r--drivers/staging/media/soc_camera/soc_camera.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c63
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h7
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c28
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig2
-rw-r--r--drivers/staging/most/cdev/cdev.c4
-rw-r--r--drivers/staging/most/core.c4
-rw-r--r--drivers/staging/most/dim2/dim2.c21
-rw-r--r--drivers/staging/most/dim2/hal.c99
-rw-r--r--drivers/staging/most/dim2/hal.h4
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/sound.c5
-rw-r--r--drivers/staging/most/video/video.c3
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c12
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c5
-rw-r--r--drivers/staging/nvec/nvec.c8
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c2
-rw-r--r--drivers/staging/octeon/ethernet.c16
-rw-r--r--drivers/staging/olpc_dcon/TODO4
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt2
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c141
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c11
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c41
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c14
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c143
-rw-r--r--drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h881
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h1
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c3
-rw-r--r--drivers/staging/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c12
-rw-r--r--drivers/staging/rtl8192e/rtllib.h5
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c206
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c24
-rw-r--r--drivers/staging/rtl8192u/Kconfig2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h42
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c204
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c635
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c142
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c43
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c12
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c18
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c11
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c42
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c112
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c59
-rw-r--r--drivers/staging/rtl8712/os_intfs.c35
-rw-r--r--drivers/staging/rtl8712/recv_linux.c18
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h16
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c14
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c54
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c46
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h8
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h7
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h8
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c22
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h3
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c29
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c65
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h20
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h4
-rw-r--r--drivers/staging/rtl8712/wifi.h8
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/Makefile1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c113
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c1307
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c15
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c121
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c18
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_phy.c157
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c19
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c15
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c7
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h4
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h8
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h8
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h17
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h10
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h77
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h20
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h16
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h14
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c7
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c21
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c779
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.h37
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c5
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c12
-rw-r--r--drivers/staging/rts5208/sd.c28
-rw-r--r--drivers/staging/sm750fb/ddk750.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h3
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c3
-rw-r--r--drivers/staging/uwb/Kconfig (renamed from drivers/uwb/Kconfig)0
-rw-r--r--drivers/staging/uwb/Makefile (renamed from drivers/uwb/Makefile)0
-rw-r--r--drivers/staging/uwb/TODO8
-rw-r--r--drivers/staging/uwb/address.c (renamed from drivers/uwb/address.c)0
-rw-r--r--drivers/staging/uwb/allocator.c (renamed from drivers/uwb/allocator.c)2
-rw-r--r--drivers/staging/uwb/beacon.c (renamed from drivers/uwb/beacon.c)0
-rw-r--r--drivers/staging/uwb/driver.c (renamed from drivers/uwb/driver.c)0
-rw-r--r--drivers/staging/uwb/drp-avail.c (renamed from drivers/uwb/drp-avail.c)0
-rw-r--r--drivers/staging/uwb/drp-ie.c (renamed from drivers/uwb/drp-ie.c)2
-rw-r--r--drivers/staging/uwb/drp.c (renamed from drivers/uwb/drp.c)0
-rw-r--r--drivers/staging/uwb/est.c (renamed from drivers/uwb/est.c)0
-rw-r--r--drivers/staging/uwb/hwa-rc.c (renamed from drivers/uwb/hwa-rc.c)6
-rw-r--r--drivers/staging/uwb/i1480/Makefile (renamed from drivers/uwb/i1480/Makefile)0
-rw-r--r--drivers/staging/uwb/i1480/dfu/Makefile (renamed from drivers/uwb/i1480/dfu/Makefile)0
-rw-r--r--drivers/staging/uwb/i1480/dfu/dfu.c (renamed from drivers/uwb/i1480/dfu/dfu.c)2
-rw-r--r--drivers/staging/uwb/i1480/dfu/i1480-dfu.h (renamed from drivers/uwb/i1480/dfu/i1480-dfu.h)2
-rw-r--r--drivers/staging/uwb/i1480/dfu/mac.c (renamed from drivers/uwb/i1480/dfu/mac.c)2
-rw-r--r--drivers/staging/uwb/i1480/dfu/phy.c (renamed from drivers/uwb/i1480/dfu/phy.c)2
-rw-r--r--drivers/staging/uwb/i1480/dfu/usb.c (renamed from drivers/uwb/i1480/dfu/usb.c)6
-rw-r--r--drivers/staging/uwb/i1480/i1480-est.c (renamed from drivers/uwb/i1480/i1480-est.c)2
-rw-r--r--drivers/staging/uwb/ie-rcv.c (renamed from drivers/uwb/ie-rcv.c)0
-rw-r--r--drivers/staging/uwb/ie.c (renamed from drivers/uwb/ie.c)0
-rw-r--r--drivers/staging/uwb/include/debug-cmd.h57
-rw-r--r--drivers/staging/uwb/include/spec.h767
-rw-r--r--drivers/staging/uwb/include/umc.h192
-rw-r--r--drivers/staging/uwb/include/whci.h102
-rw-r--r--drivers/staging/uwb/lc-dev.c (renamed from drivers/uwb/lc-dev.c)0
-rw-r--r--drivers/staging/uwb/lc-rc.c (renamed from drivers/uwb/lc-rc.c)0
-rw-r--r--drivers/staging/uwb/neh.c (renamed from drivers/uwb/neh.c)0
-rw-r--r--drivers/staging/uwb/pal.c (renamed from drivers/uwb/pal.c)2
-rw-r--r--drivers/staging/uwb/radio.c (renamed from drivers/uwb/radio.c)2
-rw-r--r--drivers/staging/uwb/reset.c (renamed from drivers/uwb/reset.c)0
-rw-r--r--drivers/staging/uwb/rsv.c (renamed from drivers/uwb/rsv.c)2
-rw-r--r--drivers/staging/uwb/scan.c (renamed from drivers/uwb/scan.c)0
-rw-r--r--drivers/staging/uwb/umc-bus.c (renamed from drivers/uwb/umc-bus.c)2
-rw-r--r--drivers/staging/uwb/umc-dev.c (renamed from drivers/uwb/umc-dev.c)2
-rw-r--r--drivers/staging/uwb/umc-drv.c (renamed from drivers/uwb/umc-drv.c)2
-rw-r--r--drivers/staging/uwb/uwb-debug.c (renamed from drivers/uwb/uwb-debug.c)3
-rw-r--r--drivers/staging/uwb/uwb-internal.h (renamed from drivers/uwb/uwb-internal.h)2
-rw-r--r--drivers/staging/uwb/uwb.h817
-rw-r--r--drivers/staging/uwb/uwbd.c (renamed from drivers/uwb/uwbd.c)0
-rw-r--r--drivers/staging/uwb/whc-rc.c (renamed from drivers/uwb/whc-rc.c)6
-rw-r--r--drivers/staging/uwb/whci.c (renamed from drivers/uwb/whci.c)4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c29
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h1
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c38
-rw-r--r--drivers/staging/vt6656/rxtx.c10
-rw-r--r--drivers/staging/vt6656/usbpipe.c2
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,sdio.txt8
-rw-r--r--drivers/staging/wilc1000/microchip,wilc1000,spi.txt8
-rw-r--r--drivers/staging/wilc1000/wilc_hif.c70
-rw-r--r--drivers/staging/wilc1000/wilc_hif.h6
-rw-r--r--drivers/staging/wilc1000/wilc_mon.c3
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c78
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c18
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c64
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h3
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c135
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h19
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c78
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h4
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h3
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c210
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c44
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c2
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-cbaf130
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-design-overview.rst457
-rw-r--r--drivers/staging/wusbcore/Kconfig (renamed from drivers/usb/wusbcore/Kconfig)3
-rw-r--r--drivers/staging/wusbcore/Makefile (renamed from drivers/usb/wusbcore/Makefile)2
-rw-r--r--drivers/staging/wusbcore/TODO8
-rw-r--r--drivers/staging/wusbcore/cbaf.c (renamed from drivers/usb/wusbcore/cbaf.c)6
-rw-r--r--drivers/staging/wusbcore/crypto.c (renamed from drivers/usb/wusbcore/crypto.c)4
-rw-r--r--drivers/staging/wusbcore/dev-sysfs.c (renamed from drivers/usb/wusbcore/dev-sysfs.c)0
-rw-r--r--drivers/staging/wusbcore/devconnect.c (renamed from drivers/usb/wusbcore/devconnect.c)2
-rw-r--r--drivers/staging/wusbcore/host/Kconfig28
-rw-r--r--drivers/staging/wusbcore/host/Makefile3
-rw-r--r--drivers/staging/wusbcore/host/hwa-hc.c (renamed from drivers/usb/host/hwa-hc.c)6
-rw-r--r--drivers/staging/wusbcore/host/whci/Makefile (renamed from drivers/usb/host/whci/Makefile)0
-rw-r--r--drivers/staging/wusbcore/host/whci/asl.c (renamed from drivers/usb/host/whci/asl.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/debug.c (renamed from drivers/usb/host/whci/debug.c)2
-rw-r--r--drivers/staging/wusbcore/host/whci/hcd.c (renamed from drivers/usb/host/whci/hcd.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/hw.c (renamed from drivers/usb/host/whci/hw.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/init.c (renamed from drivers/usb/host/whci/init.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/int.c (renamed from drivers/usb/host/whci/int.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/pzl.c (renamed from drivers/usb/host/whci/pzl.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/qset.c (renamed from drivers/usb/host/whci/qset.c)4
-rw-r--r--drivers/staging/wusbcore/host/whci/whcd.h (renamed from drivers/usb/host/whci/whcd.h)4
-rw-r--r--drivers/staging/wusbcore/host/whci/whci-hc.h (renamed from drivers/usb/host/whci/whci-hc.h)0
-rw-r--r--drivers/staging/wusbcore/host/whci/wusb.c (renamed from drivers/usb/host/whci/wusb.c)4
-rw-r--r--drivers/staging/wusbcore/include/association.h151
-rw-r--r--drivers/staging/wusbcore/include/wusb-wa.h304
-rw-r--r--drivers/staging/wusbcore/include/wusb.h362
-rw-r--r--drivers/staging/wusbcore/mmc.c (renamed from drivers/usb/wusbcore/mmc.c)2
-rw-r--r--drivers/staging/wusbcore/pal.c (renamed from drivers/usb/wusbcore/pal.c)0
-rw-r--r--drivers/staging/wusbcore/reservation.c (renamed from drivers/usb/wusbcore/reservation.c)2
-rw-r--r--drivers/staging/wusbcore/rh.c (renamed from drivers/usb/wusbcore/rh.c)0
-rw-r--r--drivers/staging/wusbcore/security.c (renamed from drivers/usb/wusbcore/security.c)0
-rw-r--r--drivers/staging/wusbcore/wa-hc.c (renamed from drivers/usb/wusbcore/wa-hc.c)0
-rw-r--r--drivers/staging/wusbcore/wa-hc.h (renamed from drivers/usb/wusbcore/wa-hc.h)6
-rw-r--r--drivers/staging/wusbcore/wa-nep.c (renamed from drivers/usb/wusbcore/wa-nep.c)0
-rw-r--r--drivers/staging/wusbcore/wa-rpipe.c (renamed from drivers/usb/wusbcore/wa-rpipe.c)0
-rw-r--r--drivers/staging/wusbcore/wa-xfer.c (renamed from drivers/usb/wusbcore/wa-xfer.c)0
-rw-r--r--drivers/staging/wusbcore/wusbhc.c (renamed from drivers/usb/wusbcore/wusbhc.c)0
-rw-r--r--drivers/staging/wusbcore/wusbhc.h (renamed from drivers/usb/wusbcore/wusbhc.h)4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/tee/optee/call.c1
-rw-r--r--drivers/thermal/cpu_cooling.c110
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/ctl.c23
-rw-r--r--drivers/thunderbolt/eeprom.c6
-rw-r--r--drivers/thunderbolt/icm.c194
-rw-r--r--drivers/thunderbolt/nhi.c134
-rw-r--r--drivers/thunderbolt/nhi.h22
-rw-r--r--drivers/thunderbolt/nhi_ops.c179
-rw-r--r--drivers/thunderbolt/nhi_regs.h37
-rw-r--r--drivers/thunderbolt/switch.c52
-rw-r--r--drivers/thunderbolt/tb_msgs.h16
-rw-r--r--drivers/thunderbolt/tunnel.c4
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/hvc/hvcs.c6
-rw-r--r--drivers/tty/isicom.c1
-rw-r--r--drivers/tty/n_gsm.c25
-rw-r--r--drivers/tty/nozomi.c4
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c173
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c126
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h19
-rw-r--r--drivers/tty/serial/8250/8250_exar.c114
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c4
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c76
-rw-r--r--drivers/tty/serial/8250/8250_moxa.c155
-rw-r--r--drivers/tty/serial/8250/8250_omap.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c651
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c20
-rw-r--r--drivers/tty/serial/8250/8250_port.c93
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig15
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/Kconfig82
-rw-r--r--drivers/tty/serial/Makefile6
-rw-r--r--drivers/tty/serial/amba-pl011.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c51
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c937
-rw-r--r--drivers/tty/serial/fsl_lpuart.c518
-rw-r--r--drivers/tty/serial/icom.c2
-rw-r--r--drivers/tty/serial/imx.c88
-rw-r--r--drivers/tty/serial/ioc3_serial.c2195
-rw-r--r--drivers/tty/serial/ioc4_serial.c2955
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/lantiq.c261
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c42
-rw-r--r--drivers/tty/serial/max310x.c32
-rw-r--r--drivers/tty/serial/mvebu-uart.c12
-rw-r--r--drivers/tty/serial/mxs-auart.c6
-rw-r--r--drivers/tty/serial/netx-serial.c733
-rw-r--r--drivers/tty/serial/owl-uart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c23
-rw-r--r--drivers/tty/serial/rda-uart.c4
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/tty/serial/serial-tegra.c407
-rw-r--r--drivers/tty/serial/serial_ks8695.c698
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c36
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h6
-rw-r--r--drivers/tty/serial/sh-sci.c50
-rw-r--r--drivers/tty/serial/sifive.c4
-rw-r--r--drivers/tty/serial/sn_console.c1036
-rw-r--r--drivers/tty/serial/sprd_serial.c58
-rw-r--r--drivers/tty/serial/stm32-usart.c72
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/tty_ldsem.c5
-rw-r--r--drivers/tty/vt/vt.c6
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c23
-rw-r--r--drivers/uio/uio_pdrv_genirq.c14
-rw-r--r--drivers/usb/Kconfig39
-rw-r--r--drivers/usb/Makefile4
-rw-r--r--drivers/usb/atm/cxacru.c58
-rw-r--r--drivers/usb/atm/ueagle-atm.c16
-rw-r--r--drivers/usb/cdns3/Kconfig46
-rw-r--r--drivers/usb/cdns3/Makefile16
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c203
-rw-r--r--drivers/usb/cdns3/core.c651
-rw-r--r--drivers/usb/cdns3/core.h98
-rw-r--r--drivers/usb/cdns3/debug.h161
-rw-r--r--drivers/usb/cdns3/drd.c381
-rw-r--r--drivers/usb/cdns3/drd.h167
-rw-r--r--drivers/usb/cdns3/ep0.c886
-rw-r--r--drivers/usb/cdns3/gadget-export.h28
-rw-r--r--drivers/usb/cdns3/gadget.c2744
-rw-r--r--drivers/usb/cdns3/gadget.h1338
-rw-r--r--drivers/usb/cdns3/host-export.h28
-rw-r--r--drivers/usb/cdns3/host.c74
-rw-r--r--drivers/usb/cdns3/trace.c11
-rw-r--r--drivers/usb/cdns3/trace.h493
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci.h12
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c9
-rw-r--r--drivers/usb/chipidea/core.c97
-rw-r--r--drivers/usb/chipidea/otg.c8
-rw-r--r--drivers/usb/chipidea/udc.c35
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c13
-rw-r--r--drivers/usb/class/usbtmc.c16
-rw-r--r--drivers/usb/common/Kconfig51
-rw-r--r--drivers/usb/common/Makefile2
-rw-r--r--drivers/usb/common/debug.c268
-rw-r--r--drivers/usb/common/usb-conn-gpio.c284
-rw-r--r--drivers/usb/core/buffer.c10
-rw-r--r--drivers/usb/core/config.c12
-rw-r--r--drivers/usb/core/devio.c118
-rw-r--r--drivers/usb/core/driver.c2
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/generic.c5
-rw-r--r--drivers/usb/core/hcd-pci.c33
-rw-r--r--drivers/usb/core/hcd.c142
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/phy.c21
-rw-r--r--drivers/usb/core/phy.h1
-rw-r--r--drivers/usb/core/port.c9
-rw-r--r--drivers/usb/core/sysfs.c121
-rw-r--r--drivers/usb/core/usb.c222
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc2/gadget.c9
-rw-r--r--drivers/usb/dwc2/hcd.c8
-rw-r--r--drivers/usb/dwc2/params.c5
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/core.c61
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/debug.h252
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c18
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c15
-rw-r--r--drivers/usb/dwc3/dwc3-st.c10
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/dwc3/host.c22
-rw-r--r--drivers/usb/dwc3/trace.h2
-rw-r--r--drivers/usb/gadget/composite.c39
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/udc/Kconfig3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c7
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c80
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c59
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c15
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c1
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c4
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c8
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c19
-rw-r--r--drivers/usb/gadget/udc/net2280.c38
-rw-r--r--drivers/usb/gadget/udc/net2280.h3
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c9
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c4
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/Kconfig35
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-atmel.c3
-rw-r--r--drivers/usb/host/ehci-exynos.c23
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-omap.c4
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c5
-rw-r--r--drivers/usb/host/ehci-st.c4
-rw-r--r--drivers/usb/host/ehci-w90x900.c130
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c6
-rw-r--r--drivers/usb/host/imx21-hcd.c6
-rw-r--r--drivers/usb/host/isp116x-hcd.c6
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/max3421-hcd.c17
-rw-r--r--drivers/usb/host/ohci-exynos.c23
-rw-r--r--drivers/usb/host/ohci-hcd.c17
-rw-r--r--drivers/usb/host/ohci-nxp.c25
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/ohci-platform.c4
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-ps3.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-sa1111.c2
-rw-r--r--drivers/usb/host/ohci-st.c4
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c446
-rw-r--r--drivers/usb/host/oxu210hp.h448
-rw-r--r--drivers/usb/host/pci-quirks.c45
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c1
-rw-r--r--drivers/usb/host/xhci-dbgtty.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.c18
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c1
-rw-r--r--drivers/usb/host/xhci-mtk.c13
-rw-r--r--drivers/usb/host/xhci-mtk.h1
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c15
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar.c25
-rw-r--r--drivers/usb/host/xhci-rcar.h21
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/host/xhci.c15
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/isp1760/isp1760-core.c3
-rw-r--r--drivers/usb/isp1760/isp1760-if.c1
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c29
-rw-r--r--drivers/usb/misc/cytherm.c64
-rw-r--r--drivers/usb/misc/iowarrior.c7
-rw-r--r--drivers/usb/misc/lvstest.c19
-rw-r--r--drivers/usb/misc/rio500.c66
-rw-r--r--drivers/usb/misc/trancevibrator.c15
-rw-r--r--drivers/usb/misc/usb251xb.c15
-rw-r--r--drivers/usb/misc/usbsevseg.c17
-rw-r--r--drivers/usb/misc/yurex.c2
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/mtu3/mtu3.h5
-rw-r--r--drivers/usb/mtu3/mtu3_core.c4
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c4
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c48
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h6
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c3
-rw-r--r--drivers/usb/musb/musb_core.c24
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c18
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c14
-rw-r--r--drivers/usb/phy/phy-mv-usb.c17
-rw-r--r--drivers/usb/phy/phy-tahvo.c18
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c12
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/roles/class.c57
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c54
-rw-r--r--drivers/usb/serial/ftdi_sio.c43
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/debug.h2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c15
-rw-r--r--drivers/usb/storage/scsiglue.c19
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/class.c16
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c101
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c75
-rw-r--r--drivers/usb/typec/tcpm/wcove.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c15
-rw-r--r--drivers/usb/usb-skeleton.c8
-rw-r--r--drivers/usb/usbip/stub.h7
-rw-r--r--drivers/usb/usbip/stub_dev.c50
-rw-r--r--drivers/usb/usbip/stub_main.c57
-rw-r--r--drivers/usb/usbip/stub_rx.c204
-rw-r--r--drivers/usb/usbip/stub_tx.c99
-rw-r--r--drivers/usb/usbip/usbip_common.c59
-rw-r--r--drivers/usb/usbip/vhci_hcd.c12
-rw-r--r--drivers/usb/usbip/vhci_rx.c3
-rw-r--r--drivers/usb/usbip/vhci_tx.c66
-rw-r--r--drivers/usb/usbip/vudc.h2
-rw-r--r--drivers/usb/usbip/vudc_dev.c9
-rw-r--r--drivers/usb/usbip/vudc_main.c1
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c7
-rw-r--r--drivers/vfio/vfio_iommu_type1.c27
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/vhost/vhost.c524
-rw-r--r--drivers/vhost/vhost.h41
-rw-r--r--drivers/video/fbdev/Kconfig14
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/video/fbdev/atafb.c42
-rw-r--r--drivers/video/fbdev/da8xx-fb.c118
-rw-r--r--drivers/video/fbdev/nuc900fb.c760
-rw-r--r--drivers/video/fbdev/nuc900fb.h51
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c8
-rw-r--r--drivers/video/fbdev/pxafb.c21
-rw-r--r--drivers/video/fbdev/pxafb.h1
-rw-r--r--drivers/video/fbdev/sa1100fb.c27
-rw-r--r--drivers/video/fbdev/sa1100fb.h1
-rw-r--r--drivers/video/fbdev/sm501fb.c37
-rw-r--r--drivers/video/fbdev/w100fb.c23
-rw-r--r--drivers/video/fbdev/wm8505fb.c13
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/w1/masters/Kconfig9
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/w1/masters/omap_hdq.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c130
-rw-r--r--drivers/w1/slaves/Kconfig6
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds250x.c290
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c35
-rw-r--r--drivers/xen/swiotlb-xen.c42
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c3
-rw-r--r--drivers/xen/xlate_mmu.c32
2868 files changed, 95321 insertions, 86449 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 61cf4ea2c229..8befa53f43be 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -30,8 +30,6 @@ source "drivers/block/Kconfig"
source "drivers/nvme/Kconfig"
-# misc before ide - BLK_DEV_SGIIOC4 depends on SGI_IOC4
-
source "drivers/misc/Kconfig"
source "drivers/ide/Kconfig"
@@ -108,8 +106,6 @@ source "drivers/hid/Kconfig"
source "drivers/usb/Kconfig"
-source "drivers/uwb/Kconfig"
-
source "drivers/mmc/Kconfig"
source "drivers/memstick/Kconfig"
@@ -146,6 +142,8 @@ source "drivers/hv/Kconfig"
source "drivers/xen/Kconfig"
+source "drivers/greybus/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 6d37564e783c..aaef17cc6512 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -100,7 +100,6 @@ obj-$(CONFIG_ZORRO) += zorro/
obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
-obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_USB_SUPPORT) += usb/
@@ -132,7 +131,6 @@ obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
-obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
@@ -148,6 +146,7 @@ obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
+obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5f6158973289..ebe1e9e5fd81 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -323,7 +323,7 @@ config ACPI_NUMA
bool "NUMA support"
depends on NUMA
depends on (X86 || IA64 || ARM64)
- default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64
+ default y if IA64 || ARM64
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index d696f165a50e..60bbc5090abe 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
}
static const struct lpss_device_desc lpt_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
+ | LPSS_SAVE_CTX,
.prv_offset = 0x800,
};
static const struct lpss_device_desc lpt_i2c_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
.prv_offset = 0x800,
};
@@ -236,7 +237,8 @@ static struct property_entry uart_properties[] = {
};
static const struct lpss_device_desc lpt_uart_dev_desc = {
- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
+ | LPSS_SAVE_CTX,
.clk_con_id = "baudclk",
.prv_offset = 0x800,
.setup = lpss_uart_setup,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 24f065114d42..2c4dda0787e8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -279,9 +279,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
if (acpi_duplicate_processor_id(pr->acpi_id)) {
- dev_err(&device->dev,
- "Failed to get unique processor _UID (0x%x)\n",
- pr->acpi_id);
+ if (pr->acpi_id == 0xff)
+ dev_info_once(&device->dev,
+ "Entry not well-defined, consider updating BIOS\n");
+ else
+ dev_err(&device->dev,
+ "Failed to get unique processor _UID (0x%x)\n",
+ pr->acpi_id);
return -ENODEV;
}
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 13d513b81589..1ea52576f0a2 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -134,12 +134,12 @@ struct acpi_namespace_node {
union acpi_operand_object *object; /* Interpreter object */
u8 descriptor_type; /* Differentiate object descriptor types */
u8 type; /* ACPI Type associated with this name */
- u8 flags; /* Miscellaneous flags */
- acpi_owner_id owner_id; /* Node creator */
+ u16 flags; /* Miscellaneous flags */
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
struct acpi_namespace_node *parent; /* Parent node */
struct acpi_namespace_node *child; /* First child */
struct acpi_namespace_node *peer; /* First peer */
+ acpi_owner_id owner_id; /* Node creator */
/*
* The following fields are used by the ASL compiler and disassembler only
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index b2ef703d7df8..8def0e3d690f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -153,8 +153,8 @@ struct acpi_object_method {
} dispatch;
u32 aml_length;
- u8 thread_count;
acpi_owner_id owner_id;
+ u8 thread_count;
};
/* Flags for info_flags field above */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 8a4e6b4aaf2c..218ff4c8b817 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -167,9 +167,9 @@ struct acpi_evaluate_info {
u32 return_flags; /* Used for return value analysis */
u32 return_btype; /* Bitmapped type of the returned object */
u16 param_count; /* Count of the input argument list */
+ u16 node_flags; /* Same as Node->Flags */
u8 pass_number; /* Parser pass number */
u8 return_object_type; /* Object type of the returned object */
- u8 node_flags; /* Same as Node->Flags */
u8 flags; /* General flags */
};
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 9022537567e9..601808be86d1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -686,22 +686,26 @@ void acpi_ut_delete_address_lists(void);
/*
* utxferror - various error/warning output functions
*/
+ACPI_PRINTF_LIKE(5)
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_warning(const char *module_name,
u32 line_number,
char *pathname,
- u8 node_flags, const char *format, ...);
+ u16 node_flags, const char *format, ...);
+ACPI_PRINTF_LIKE(5)
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_info(const char *module_name,
u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...);
+ char *pathname,
+ u16 node_flags, const char *format, ...);
+ACPI_PRINTF_LIKE(5)
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_bios_error(const char *module_name,
u32 line_number,
char *pathname,
- u8 node_flags, const char *format, ...);
+ u16 node_flags, const char *format, ...);
void
acpi_ut_prefixed_namespace_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 7809bd94a18d..47d2e5059849 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -121,7 +121,7 @@ void acpi_db_display_history(void)
for (i = 0; i < acpi_gbl_num_history; i++) {
if (acpi_gbl_history_buffer[history_index].command) {
- acpi_os_printf("%3ld %s\n",
+ acpi_os_printf("%3u %s\n",
acpi_gbl_history_buffer[history_index].
cmd_num,
acpi_gbl_history_buffer[history_index].
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 3e5f95390f0d..55a7e10998d8 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -593,7 +593,7 @@ static u32 acpi_db_get_line(char *input_buffer)
input_buffer)) {
acpi_os_printf
("Buffer overflow while parsing input line (max %u characters)\n",
- sizeof(acpi_gbl_db_parsed_buf));
+ (u32)sizeof(acpi_gbl_db_parsed_buf));
return (0);
}
@@ -853,24 +853,24 @@ acpi_db_command_dispatch(char *input_buffer,
if (param_count == 0) {
acpi_os_printf
- ("Current debug level for file output is: %8.8lX\n",
+ ("Current debug level for file output is: %8.8X\n",
acpi_gbl_db_debug_level);
acpi_os_printf
- ("Current debug level for console output is: %8.8lX\n",
+ ("Current debug level for console output is: %8.8X\n",
acpi_gbl_db_console_debug_level);
} else if (param_count == 2) {
temp = acpi_gbl_db_console_debug_level;
acpi_gbl_db_console_debug_level =
strtoul(acpi_gbl_db_args[1], NULL, 16);
acpi_os_printf
- ("Debug Level for console output was %8.8lX, now %8.8lX\n",
+ ("Debug Level for console output was %8.8X, now %8.8X\n",
temp, acpi_gbl_db_console_debug_level);
} else {
temp = acpi_gbl_db_debug_level;
acpi_gbl_db_debug_level =
strtoul(acpi_gbl_db_args[1], NULL, 16);
acpi_os_printf
- ("Debug Level for file output was %8.8lX, now %8.8lX\n",
+ ("Debug Level for file output was %8.8X, now %8.8X\n",
temp, acpi_gbl_db_debug_level);
}
break;
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index d8b7a0fe92ec..76a15b6ffc5d 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -302,6 +302,10 @@ acpi_status acpi_db_disassemble_method(char *name)
}
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
walk_state->owner_id = obj_desc->method.owner_id;
/* Push start scope on scope stack and make it current */
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index d220168dca01..f9fc84bc3e84 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -394,7 +394,6 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state)
u8 display_locals = FALSE;
node = walk_state->method_node;
- obj_desc = walk_state->method_desc;
/* There are no locals for the module-level code case */
diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c
index bf620937c79b..3af88e70238c 100644
--- a/drivers/acpi/acpica/dbstats.c
+++ b/drivers/acpi/acpica/dbstats.c
@@ -341,17 +341,17 @@ acpi_status acpi_db_display_statistics(char *type_arg)
"ACPI_TYPE", "NODES", "OBJECTS");
for (i = 0; i < ACPI_TYPE_NS_NODE_MAX; i++) {
- acpi_os_printf("%16.16s % 10ld% 10ld\n",
+ acpi_os_printf("%16.16s %10u %10u\n",
acpi_ut_get_type_name(i),
acpi_gbl_node_type_count[i],
acpi_gbl_obj_type_count[i]);
}
- acpi_os_printf("%16.16s % 10ld% 10ld\n", "Misc/Unknown",
+ acpi_os_printf("%16.16s %10u %10u\n", "Misc/Unknown",
acpi_gbl_node_type_count_misc,
acpi_gbl_obj_type_count_misc);
- acpi_os_printf("%16.16s % 10ld% 10ld\n", "TOTALS:",
+ acpi_os_printf("%16.16s %10u %10u\n", "TOTALS:",
acpi_gbl_num_nodes, acpi_gbl_num_objects);
break;
@@ -379,16 +379,14 @@ acpi_status acpi_db_display_statistics(char *type_arg)
case CMD_STAT_MISC:
acpi_os_printf("\nMiscellaneous Statistics:\n\n");
- acpi_os_printf("Calls to AcpiPsFind:.. ........% 7ld\n",
+ acpi_os_printf("%-28s: %7u\n", "Calls to AcpiPsFind",
acpi_gbl_ps_find_count);
- acpi_os_printf("Calls to AcpiNsLookup:..........% 7ld\n",
+ acpi_os_printf("%-28s: %7u\n", "Calls to AcpiNsLookup",
acpi_gbl_ns_lookup_count);
- acpi_os_printf("\n");
-
- acpi_os_printf("Mutex usage:\n\n");
+ acpi_os_printf("\nMutex usage:\n\n");
for (i = 0; i < ACPI_NUM_MUTEX; i++) {
- acpi_os_printf("%-28s: % 7ld\n",
+ acpi_os_printf("%-28s: %7u\n",
acpi_ut_get_mutex_name(i),
acpi_gbl_mutex_info[i].use_count);
}
@@ -399,87 +397,87 @@ acpi_status acpi_db_display_statistics(char *type_arg)
acpi_os_printf("\nInternal object sizes:\n\n");
acpi_os_printf("Common %3d\n",
- sizeof(struct acpi_object_common));
+ (u32)sizeof(struct acpi_object_common));
acpi_os_printf("Number %3d\n",
- sizeof(struct acpi_object_integer));
+ (u32)sizeof(struct acpi_object_integer));
acpi_os_printf("String %3d\n",
- sizeof(struct acpi_object_string));
+ (u32)sizeof(struct acpi_object_string));
acpi_os_printf("Buffer %3d\n",
- sizeof(struct acpi_object_buffer));
+ (u32)sizeof(struct acpi_object_buffer));
acpi_os_printf("Package %3d\n",
- sizeof(struct acpi_object_package));
+ (u32)sizeof(struct acpi_object_package));
acpi_os_printf("BufferField %3d\n",
- sizeof(struct acpi_object_buffer_field));
+ (u32)sizeof(struct acpi_object_buffer_field));
acpi_os_printf("Device %3d\n",
- sizeof(struct acpi_object_device));
+ (u32)sizeof(struct acpi_object_device));
acpi_os_printf("Event %3d\n",
- sizeof(struct acpi_object_event));
+ (u32)sizeof(struct acpi_object_event));
acpi_os_printf("Method %3d\n",
- sizeof(struct acpi_object_method));
+ (u32)sizeof(struct acpi_object_method));
acpi_os_printf("Mutex %3d\n",
- sizeof(struct acpi_object_mutex));
+ (u32)sizeof(struct acpi_object_mutex));
acpi_os_printf("Region %3d\n",
- sizeof(struct acpi_object_region));
+ (u32)sizeof(struct acpi_object_region));
acpi_os_printf("PowerResource %3d\n",
- sizeof(struct acpi_object_power_resource));
+ (u32)sizeof(struct acpi_object_power_resource));
acpi_os_printf("Processor %3d\n",
- sizeof(struct acpi_object_processor));
+ (u32)sizeof(struct acpi_object_processor));
acpi_os_printf("ThermalZone %3d\n",
- sizeof(struct acpi_object_thermal_zone));
+ (u32)sizeof(struct acpi_object_thermal_zone));
acpi_os_printf("RegionField %3d\n",
- sizeof(struct acpi_object_region_field));
+ (u32)sizeof(struct acpi_object_region_field));
acpi_os_printf("BankField %3d\n",
- sizeof(struct acpi_object_bank_field));
+ (u32)sizeof(struct acpi_object_bank_field));
acpi_os_printf("IndexField %3d\n",
- sizeof(struct acpi_object_index_field));
+ (u32)sizeof(struct acpi_object_index_field));
acpi_os_printf("Reference %3d\n",
- sizeof(struct acpi_object_reference));
+ (u32)sizeof(struct acpi_object_reference));
acpi_os_printf("Notify %3d\n",
- sizeof(struct acpi_object_notify_handler));
+ (u32)sizeof(struct acpi_object_notify_handler));
acpi_os_printf("AddressSpace %3d\n",
- sizeof(struct acpi_object_addr_handler));
+ (u32)sizeof(struct acpi_object_addr_handler));
acpi_os_printf("Extra %3d\n",
- sizeof(struct acpi_object_extra));
+ (u32)sizeof(struct acpi_object_extra));
acpi_os_printf("Data %3d\n",
- sizeof(struct acpi_object_data));
+ (u32)sizeof(struct acpi_object_data));
acpi_os_printf("\n");
acpi_os_printf("ParseObject %3d\n",
- sizeof(struct acpi_parse_obj_common));
+ (u32)sizeof(struct acpi_parse_obj_common));
acpi_os_printf("ParseObjectNamed %3d\n",
- sizeof(struct acpi_parse_obj_named));
+ (u32)sizeof(struct acpi_parse_obj_named));
acpi_os_printf("ParseObjectAsl %3d\n",
- sizeof(struct acpi_parse_obj_asl));
+ (u32)sizeof(struct acpi_parse_obj_asl));
acpi_os_printf("OperandObject %3d\n",
- sizeof(union acpi_operand_object));
+ (u32)sizeof(union acpi_operand_object));
acpi_os_printf("NamespaceNode %3d\n",
- sizeof(struct acpi_namespace_node));
+ (u32)sizeof(struct acpi_namespace_node));
acpi_os_printf("AcpiObject %3d\n",
- sizeof(union acpi_object));
+ (u32)sizeof(union acpi_object));
acpi_os_printf("\n");
acpi_os_printf("Generic State %3d\n",
- sizeof(union acpi_generic_state));
+ (u32)sizeof(union acpi_generic_state));
acpi_os_printf("Common State %3d\n",
- sizeof(struct acpi_common_state));
+ (u32)sizeof(struct acpi_common_state));
acpi_os_printf("Control State %3d\n",
- sizeof(struct acpi_control_state));
+ (u32)sizeof(struct acpi_control_state));
acpi_os_printf("Update State %3d\n",
- sizeof(struct acpi_update_state));
+ (u32)sizeof(struct acpi_update_state));
acpi_os_printf("Scope State %3d\n",
- sizeof(struct acpi_scope_state));
+ (u32)sizeof(struct acpi_scope_state));
acpi_os_printf("Parse Scope %3d\n",
- sizeof(struct acpi_pscope_state));
+ (u32)sizeof(struct acpi_pscope_state));
acpi_os_printf("Package State %3d\n",
- sizeof(struct acpi_pkg_state));
+ (u32)sizeof(struct acpi_pkg_state));
acpi_os_printf("Thread State %3d\n",
- sizeof(struct acpi_thread_state));
+ (u32)sizeof(struct acpi_thread_state));
acpi_os_printf("Result Values %3d\n",
- sizeof(struct acpi_result_values));
+ (u32)sizeof(struct acpi_result_values));
acpi_os_printf("Notify Info %3d\n",
- sizeof(struct acpi_notify_info));
+ (u32)sizeof(struct acpi_notify_info));
break;
case CMD_STAT_STACK:
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 710488ec59e9..04a40d563dd6 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -644,17 +644,17 @@ ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
*
- * RETURN: None
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
* (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
*
******************************************************************************/
-void acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)
+u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)
{
ACPI_FUNCTION_TRACE(acpi_dispatch_gpe);
- acpi_ev_detect_gpe(gpe_device, NULL, gpe_number);
+ return acpi_ev_detect_gpe(gpe_device, NULL, gpe_number);
}
ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe)
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 6526b2deeaad..a9bc938a3b55 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -94,7 +94,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[9] = {
"Parameter Count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
+ {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"}
@@ -269,8 +269,8 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
static struct acpi_exdump_info acpi_ex_dump_node[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
- {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
+ {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(flags), "Flags"},
+ {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
{ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"},
{ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"},
{ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"},
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 2566e2d4c780..3f045b5953b2 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -598,7 +598,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
if (flags & ACPI_NS_PREFIX_MUST_EXIST) {
acpi_os_printf(ACPI_MSG_BIOS_ERROR
"Object does not exist: %4.4s\n",
- &simple_name);
+ (char *)&simple_name);
}
#endif
/* Name not found in ACPI namespace */
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 6eb63db72249..fe9b3639a87d 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -241,7 +241,7 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
node->type = (u8) type;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "%4.4s (%s) [Node %p Owner %X] added to %4.4s (%s) [Node %p]\n",
+ "%4.4s (%s) [Node %p Owner %3.3X] added to %4.4s (%s) [Node %p]\n",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type), node, owner_id,
acpi_ut_get_node_name(parent_node),
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 1b12c172e115..9731d7cf1b83 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -197,7 +197,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/* Now we can print out the pertinent information */
- acpi_os_printf(" %-12s %p %2.2X ",
+ acpi_os_printf(" %-12s %p %3.3X ",
acpi_ut_get_type_name(type), this_node,
this_node->owner_id);
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 8d776256b213..663d85e0adba 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -126,7 +126,7 @@ static const struct acpi_repair_info acpi_ns_repairable_names[] = {
#define ACPI_FDE_FIELD_COUNT 5
#define ACPI_FDE_BYTE_BUFFER_SIZE 5
-#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * sizeof (u32))
+#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * (u32) sizeof (u32))
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 91a4b984f224..309440010ab2 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -750,6 +750,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+
acpi_ns_delete_namespace_by_owner(owner_id);
acpi_ut_release_write_lock(&acpi_gbl_namespace_rw_lock);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 01b1b36c8a8e..5b169b5f0f1a 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -158,7 +158,7 @@ acpi_debug_print(u32 requested_debug_level,
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
- acpi_os_printf("%9s-%04ld ", module_name, line_number);
+ acpi_os_printf("%9s-%04d ", module_name, line_number);
#ifdef ACPI_APPLICATION
/*
@@ -177,7 +177,7 @@ acpi_debug_print(u32 requested_debug_level,
fill_count = 0;
}
- acpi_os_printf("[%02ld] %*s",
+ acpi_os_printf("[%02d] %*s",
acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
acpi_os_printf("%s%*s: ",
acpi_ut_trim_function_name(function_name), fill_count,
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 075457341bad..918aca7c4db4 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -39,7 +39,7 @@ void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_warning(const char *module_name,
u32 line_number,
char *pathname,
- u8 node_flags, const char *format, ...)
+ u16 node_flags, const char *format, ...)
{
va_list arg_list;
@@ -81,7 +81,7 @@ acpi_ut_predefined_warning(const char *module_name,
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_info(const char *module_name,
u32 line_number,
- char *pathname, u8 node_flags, const char *format, ...)
+ char *pathname, u16 node_flags, const char *format, ...)
{
va_list arg_list;
@@ -124,7 +124,7 @@ void ACPI_INTERNAL_VAR_XFACE
acpi_ut_predefined_bios_error(const char *module_name,
u32 line_number,
char *pathname,
- u8 node_flags, const char *format, ...)
+ u16 node_flags, const char *format, ...)
{
va_list arg_list;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 688c61a90725..ad2b218039d0 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -65,13 +65,14 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
- {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
+ {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8_1}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
{"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
{"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
{"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */
{"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */
{"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */
+ {"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 5eb8b76ce9d8..d3525ef8ed28 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -38,7 +38,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
if (*owner_id) {
ACPI_ERROR((AE_INFO,
- "Owner ID [0x%2.2X] already exists", *owner_id));
+ "Owner ID [0x%3.3X] already exists", *owner_id));
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -88,14 +88,14 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
/*
* Construct encoded ID from the index and bit position
*
- * Note: Last [j].k (bit 255) is never used and is marked
+ * Note: Last [j].k (bit 4095) is never used and is marked
* permanently allocated (prevents +1 overflow)
*/
*owner_id =
(acpi_owner_id)((k + 1) + ACPI_MUL_32(j));
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Allocated OwnerId: %2.2X\n",
+ "Allocated OwnerId: 0x%3.3X\n",
(unsigned int)*owner_id));
goto exit;
}
@@ -116,7 +116,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
*/
status = AE_OWNER_ID_LIMIT;
ACPI_ERROR((AE_INFO,
- "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
+ "Could not allocate new OwnerId (4095 max), AE_OWNER_ID_LIMIT"));
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
@@ -153,7 +153,7 @@ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
/* Zero is not a valid owner_ID */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%3.3X", owner_id));
return_VOID;
}
@@ -179,7 +179,7 @@ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: 0x%2.2X",
+ "Attempted release of non-allocated OwnerId: 0x%3.3X",
owner_id + 1));
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a66e00fe31fe..777f6f7122b4 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -153,6 +153,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
int ghes_estatus_pool_init(int num_ghes)
{
unsigned long addr, len;
+ int rc;
ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
if (!ghes_estatus_pool)
@@ -164,7 +165,7 @@ int ghes_estatus_pool_init(int num_ghes)
ghes_estatus_pool_size_request = PAGE_ALIGN(len);
addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
if (!addr)
- return -ENOMEM;
+ goto err_pool_alloc;
/*
* New allocation must be visible in all pgd before it can be found by
@@ -172,7 +173,19 @@ int ghes_estatus_pool_init(int num_ghes)
*/
vmalloc_sync_all();
- return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
+ rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
+ if (rc)
+ goto err_pool_add;
+
+ return 0;
+
+err_pool_add:
+ vfree((void *)addr);
+
+err_pool_alloc:
+ gen_pool_destroy(ghes_estatus_pool);
+
+ return -ENOMEM;
}
static int map_gen_v2(struct ghes *ghes)
@@ -483,7 +496,7 @@ static void ghes_do_proc(struct ghes *ghes,
int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
guid_t *sec_type;
- guid_t *fru_id = &NULL_UUID_LE;
+ const guid_t *fru_id = &guid_null;
char *fru_text = "";
sev = ghes_severity(estatus->error_severity);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4551e33fa71..5a7551d060f2 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
- if (idx > its->its_count) {
- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+ if (idx >= its->its_count) {
+ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
@@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev,
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- int node = acpi_map_pxm_to_node(smmu->pxm);
+ int dev_node = acpi_map_pxm_to_node(smmu->pxm);
- if (node != NUMA_NO_NODE && !node_online(node))
+ if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
return -EINVAL;
- set_dev_node(dev, node);
+ set_dev_node(dev, dev_node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 15f103d7532b..3b2525908dd8 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -365,8 +365,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
- status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
- ACPI_TYPE_PACKAGE);
+ status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
+ &buffer, ACPI_TYPE_PACKAGE);
+ if (status == AE_NOT_FOUND) /* _PSD is optional */
+ return 0;
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index b2ef4c2ec955..fd66a736621c 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -49,8 +49,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
- (count > uncopied_bytes))
+ (count > uncopied_bytes)) {
+ kfree(buf);
return -EINVAL;
+ }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
@@ -70,6 +72,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
+ kfree(buf);
return count;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 28cffaaf9d82..08bb9f2f2d23 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -166,6 +166,10 @@ int acpi_device_set_power(struct acpi_device *device, int state)
|| (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
+ acpi_handle_debug(device->handle, "Power state change: %s -> %s\n",
+ acpi_power_state_string(device->power.state),
+ acpi_power_state_string(state));
+
/* Make sure this is a valid target state */
/* There is a special case for D0 addressed below. */
@@ -232,13 +236,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
+ int cur_state = device->power.state;
+
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
- if (device->power.state == ACPI_STATE_D0) {
+ if (cur_state == ACPI_STATE_D0) {
int psc;
/* Nothing to do here if _PSC is not present. */
@@ -495,7 +501,8 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
goto out;
mutex_lock(&acpi_pm_notifier_lock);
- adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+ adev->wakeup.ws = wakeup_source_register(&adev->dev,
+ dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index c33756ed3304..da1e5c5ce150 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/io.h>
@@ -1048,24 +1049,6 @@ void acpi_ec_unblock_transactions(void)
acpi_ec_start(first_ec, true);
}
-void acpi_ec_mark_gpe_for_wake(void)
-{
- if (first_ec && !ec_no_wakeup)
- acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
-}
-
-void acpi_ec_set_gpe_wake_mask(u8 action)
-{
- if (first_ec && !ec_no_wakeup)
- acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
-}
-
-void acpi_ec_dispatch_gpe(void)
-{
- if (first_ec)
- acpi_dispatch_gpe(NULL, first_ec->gpe);
-}
-
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
@@ -1931,7 +1914,7 @@ static int acpi_ec_suspend(struct device *dev)
struct acpi_ec *ec =
acpi_driver_data(to_acpi_device(dev));
- if (acpi_sleep_no_ec_events() && ec_freeze_events)
+ if (!pm_suspend_no_platform() && ec_freeze_events)
acpi_ec_disable_event(ec);
return 0;
}
@@ -1948,8 +1931,7 @@ static int acpi_ec_suspend_noirq(struct device *dev)
ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
- if (acpi_sleep_no_ec_events())
- acpi_ec_enter_noirq(ec);
+ acpi_ec_enter_noirq(ec);
return 0;
}
@@ -1958,8 +1940,7 @@ static int acpi_ec_resume_noirq(struct device *dev)
{
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
- if (acpi_sleep_no_ec_events())
- acpi_ec_leave_noirq(ec);
+ acpi_ec_leave_noirq(ec);
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
@@ -1976,7 +1957,35 @@ static int acpi_ec_resume(struct device *dev)
acpi_ec_enable_event(ec);
return 0;
}
-#endif
+
+void acpi_ec_mark_gpe_for_wake(void)
+{
+ if (first_ec && !ec_no_wakeup)
+ acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
+}
+EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
+
+void acpi_ec_set_gpe_wake_mask(u8 action)
+{
+ if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
+ acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
+}
+
+bool acpi_ec_dispatch_gpe(void)
+{
+ u32 ret;
+
+ if (!first_ec)
+ return false;
+
+ ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
+ if (ret == ACPI_INTERRUPT_HANDLED) {
+ pm_pr_dbg("EC GPE dispatched\n");
+ return true;
+ }
+ return false;
+}
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops acpi_ec_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index 96b7d39a97c6..8f9a28a870b0 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -14,14 +14,18 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/list_sort.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
#include <linux/node.h>
#include <linux/sysfs.h>
-static __initdata u8 hmat_revision;
+static u8 hmat_revision;
-static __initdata LIST_HEAD(targets);
-static __initdata LIST_HEAD(initiators);
-static __initdata LIST_HEAD(localities);
+static LIST_HEAD(targets);
+static LIST_HEAD(initiators);
+static LIST_HEAD(localities);
+
+static DEFINE_MUTEX(target_lock);
/*
* The defined enum order is used to prioritize attributes to break ties when
@@ -36,11 +40,19 @@ enum locality_types {
static struct memory_locality *localities_types[4];
+struct target_cache {
+ struct list_head node;
+ struct node_cache_attrs cache_attrs;
+};
+
struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct node_hmem_attrs hmem_attrs;
+ struct list_head caches;
+ struct node_cache_attrs cache_attrs;
+ bool registered;
};
struct memory_initiator {
@@ -53,7 +65,7 @@ struct memory_locality {
struct acpi_hmat_locality *hmat_loc;
};
-static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
{
struct memory_initiator *initiator;
@@ -63,7 +75,7 @@ static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
return NULL;
}
-static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+static struct memory_target *find_mem_target(unsigned int mem_pxm)
{
struct memory_target *target;
@@ -96,9 +108,6 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
{
struct memory_target *target;
- if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
- return;
-
target = find_mem_target(mem_pxm);
if (target)
return;
@@ -110,6 +119,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
target->memory_pxm = mem_pxm;
target->processor_pxm = PXM_INVAL;
list_add_tail(&target->node, &targets);
+ INIT_LIST_HEAD(&target->caches);
}
static __init const char *hmat_data_type(u8 type)
@@ -148,7 +158,7 @@ static __init const char *hmat_data_type_suffix(u8 type)
}
}
-static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+static u32 hmat_normalize(u16 entry, u64 base, u8 type)
{
u32 value;
@@ -183,7 +193,7 @@ static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
return value;
}
-static __init void hmat_update_target_access(struct memory_target *target,
+static void hmat_update_target_access(struct memory_target *target,
u8 type, u32 value)
{
switch (type) {
@@ -314,7 +324,8 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_hmat_cache *cache = (void *)header;
- struct node_cache_attrs cache_attrs;
+ struct memory_target *target;
+ struct target_cache *tcache;
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
@@ -328,37 +339,47 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
- cache_attrs.size = cache->cache_size;
- cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
- cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+ target = find_mem_target(cache->memory_PD);
+ if (!target)
+ return 0;
+
+ tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
+ if (!tcache) {
+ pr_notice_once("Failed to allocate HMAT cache info\n");
+ return 0;
+ }
+
+ tcache->cache_attrs.size = cache->cache_size;
+ tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+ tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
- cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
- cache_attrs.indexing = NODE_CACHE_INDEXED;
+ tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
break;
case ACPI_HMAT_CA_NONE:
default:
- cache_attrs.indexing = NODE_CACHE_OTHER;
+ tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
break;
}
switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
case ACPI_HMAT_CP_WB:
- cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
break;
case ACPI_HMAT_CP_WT:
- cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
break;
case ACPI_HMAT_CP_NONE:
default:
- cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+ tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
break;
}
+ list_add_tail(&tcache->node, &target->caches);
- node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
return 0;
}
@@ -435,7 +456,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return 0;
}
-static __init u32 hmat_initiator_perf(struct memory_target *target,
+static u32 hmat_initiator_perf(struct memory_target *target,
struct memory_initiator *initiator,
struct acpi_hmat_locality *hmat_loc)
{
@@ -473,7 +494,7 @@ static __init u32 hmat_initiator_perf(struct memory_target *target,
hmat_loc->data_type);
}
-static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+static bool hmat_update_best(u8 type, u32 value, u32 *best)
{
bool updated = false;
@@ -517,7 +538,7 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
return ia->processor_pxm - ib->processor_pxm;
}
-static __init void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
struct memory_initiator *initiator;
@@ -577,29 +598,89 @@ static __init void hmat_register_target_initiators(struct memory_target *target)
}
}
-static __init void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_cache(struct memory_target *target)
+{
+ unsigned mem_nid = pxm_to_node(target->memory_pxm);
+ struct target_cache *tcache;
+
+ list_for_each_entry(tcache, &target->caches, node)
+ node_add_cache(mem_nid, &tcache->cache_attrs);
+}
+
+static void hmat_register_target_perf(struct memory_target *target)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}
-static __init void hmat_register_targets(void)
+static void hmat_register_target(struct memory_target *target)
{
- struct memory_target *target;
+ int nid = pxm_to_node(target->memory_pxm);
- list_for_each_entry(target, &targets, node) {
+ /*
+ * Skip offline nodes. This can happen when memory
+ * marked EFI_MEMORY_SP, "specific purpose", is applied
+ * to all the memory in a promixity domain leading to
+ * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ mutex_lock(&target_lock);
+ if (!target->registered) {
hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
hmat_register_target_perf(target);
+ target->registered = true;
}
+ mutex_unlock(&target_lock);
+}
+
+static void hmat_register_targets(void)
+{
+ struct memory_target *target;
+
+ list_for_each_entry(target, &targets, node)
+ hmat_register_target(target);
+}
+
+static int hmat_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_target *target;
+ struct memory_notify *mnb = arg;
+ int pxm, nid = mnb->status_change_nid;
+
+ if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ return NOTIFY_OK;
+
+ pxm = node_to_pxm(nid);
+ target = find_mem_target(pxm);
+ if (!target)
+ return NOTIFY_OK;
+
+ hmat_register_target(target);
+ return NOTIFY_OK;
}
+static struct notifier_block hmat_callback_nb = {
+ .notifier_call = hmat_callback,
+ .priority = 2,
+};
+
static __init void hmat_free_structures(void)
{
struct memory_target *target, *tnext;
struct memory_locality *loc, *lnext;
struct memory_initiator *initiator, *inext;
+ struct target_cache *tcache, *cnext;
list_for_each_entry_safe(target, tnext, &targets, node) {
+ list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
+ list_del(&tcache->node);
+ kfree(tcache);
+ }
list_del(&target->node);
kfree(target);
}
@@ -658,6 +739,10 @@ static __init int hmat_init(void)
}
}
hmat_register_targets();
+
+ /* Keep the table and structures if the notifier may use them */
+ if (!register_hotmemory_notifier(&hmat_callback_nb))
+ return 0;
out_put:
hmat_free_structures();
acpi_put_table(tbl);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f4c2fe6be4f2..afe6636f9ad3 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -194,9 +194,6 @@ void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
-void acpi_ec_mark_gpe_for_wake(void);
-void acpi_ec_set_gpe_wake_mask(u8 action);
-void acpi_ec_dispatch_gpe(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
@@ -204,6 +201,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
+bool acpi_ec_dispatch_gpe(void);
#endif
@@ -212,11 +210,9 @@ void acpi_ec_flush_work(void);
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
extern bool acpi_s2idle_wakeup(void);
-extern bool acpi_sleep_no_ec_events(void);
extern int acpi_sleep_init(void);
#else
static inline bool acpi_s2idle_wakeup(void) { return false; }
-static inline bool acpi_sleep_no_ec_events(void) { return true; }
static inline int acpi_sleep_init(void) { return -ENXIO; }
#endif
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index c02fa27dd3f3..1413324982f0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
@@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
break;
}
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev,
ssize_t rc = -ENXIO;
bool busy;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
device_unlock(dev);
@@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev,
}
mutex_unlock(&acpi_desc->init_mutex);
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
@@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev,
if (val != 1)
return -EINVAL;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
struct acpi_device *adev = data;
struct device *dev = &adev->dev;
- device_lock(dev->parent);
+ nfit_device_lock(dev->parent);
__acpi_nvdimm_notify(dev, event);
- device_unlock(dev->parent);
+ nfit_device_unlock(dev->parent);
}
static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
@@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
- device_lock(dev);
- device_unlock(dev);
+ nfit_device_lock(dev);
+ nfit_device_unlock(dev);
/* Bounce the init_mutex to complete initial registration */
mutex_lock(&acpi_desc->init_mutex);
@@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data)
* acpi_nfit_ars_rescan() submissions have had a chance to
* either submit or see ->cancel set.
*/
- device_lock(bus_dev);
- device_unlock(bus_dev);
+ nfit_device_lock(bus_dev);
+ nfit_device_unlock(bus_dev);
flush_workqueue(nfit_wq);
}
@@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
- device_lock(&adev->dev);
+ nfit_device_lock(&adev->dev);
__acpi_nfit_notify(&adev->dev, adev->handle, event);
- device_unlock(&adev->dev);
+ nfit_device_unlock(&adev->dev);
}
static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 6ee2b02af73e..24241941181c 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}
+#ifdef CONFIG_PROVE_LOCKING
+static inline void nfit_device_lock(struct device *dev)
+{
+ device_lock(dev);
+ mutex_lock(&dev->lockdep_mutex);
+}
+
+static inline void nfit_device_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->lockdep_mutex);
+ device_unlock(dev);
+}
+#else
+static inline void nfit_device_lock(struct device *dev)
+{
+ device_lock(dev);
+}
+
+static inline void nfit_device_unlock(struct device *dev)
+{
+ device_unlock(dev);
+}
+#endif
+
const guid_t *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void acpi_nfit_shutdown(void *data);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9c0edf2fc0dd..2f9d0d20b836 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
@@ -80,6 +81,7 @@ struct acpi_ioremap {
static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
+#define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
@@ -206,7 +208,7 @@ acpi_map_lookup(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
- list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
if (map->phys <= phys &&
phys + size <= map->phys + map->size)
return map;
@@ -249,7 +251,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
- list_for_each_entry_rcu(map, &acpi_ioremaps, list)
+ list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
if (map->virt <= virt &&
virt + size <= map->virt + map->size)
return map;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d2549ae65e1b..dea8a60e18a4 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -449,8 +449,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.
*/
- if (!acpi_pci_irq_valid(dev, pin))
+ if (!acpi_pci_irq_valid(dev, pin)) {
+ kfree(entry);
return 0;
+ }
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index db11f7771ef1..00a6da2121be 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -661,7 +661,7 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Link %s is referenced\n",
acpi_device_bid(link->device)));
- return (link->irq.active);
+ return link->irq.active;
}
/*
@@ -712,7 +712,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
mutex_unlock(&acpi_link_lock);
- return (link->irq.active);
+ return link->irq.active;
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 1e7ac0bd0d3a..f31544d3656e 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -541,6 +541,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
}
/**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ * the table revision isn't new enough.
+ * 1, any passed flag set
+ * 0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_processor *cpu_node = NULL;
+ int ret = -ENOENT;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_pptt_warn_missing();
+ return ret;
+ }
+
+ if (table->revision >= rev)
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+ if (cpu_node)
+ ret = (cpu_node->flags & flag) != 0;
+
+ acpi_put_table(table);
+
+ return ret;
+}
+
+/**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
* @cpu: Kernel logical CPU number
*
@@ -605,6 +643,20 @@ int cache_setup_acpi(unsigned int cpu)
}
/**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ * 0, not a thread
+ * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ * the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+ return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
+/**
* find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
* @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
@@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
return ret;
}
-
/**
* find_acpi_cpu_topology_package() - Determine a unique CPU package value
* @cpu: Kernel logical CPU number
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index aea8d674a33d..08da9c29f1e9 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -284,6 +284,29 @@ static int acpi_processor_stop(struct device *dev)
return 0;
}
+bool acpi_processor_cpufreq_init;
+
+static int acpi_processor_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int cpu = policy->cpu;
+
+ if (event == CPUFREQ_CREATE_POLICY) {
+ acpi_thermal_cpufreq_init(cpu);
+ acpi_processor_ppc_init(cpu);
+ } else if (event == CPUFREQ_REMOVE_POLICY) {
+ acpi_processor_ppc_exit(cpu);
+ acpi_thermal_cpufreq_exit(cpu);
+ }
+
+ return 0;
+}
+
+static struct notifier_block acpi_processor_notifier_block = {
+ .notifier_call = acpi_processor_notifier,
+};
+
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
@@ -310,8 +333,12 @@ static int __init acpi_processor_driver_init(void)
cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
NULL, acpi_soft_cpu_dead);
- acpi_thermal_cpufreq_init();
- acpi_processor_ppc_init();
+ if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER)) {
+ acpi_processor_cpufreq_init = true;
+ acpi_processor_ignore_ppc_init();
+ }
+
acpi_processor_throttling_init();
return 0;
err:
@@ -324,8 +351,12 @@ static void __exit acpi_processor_driver_exit(void)
if (acpi_disabled)
return;
- acpi_processor_ppc_exit();
- acpi_thermal_cpufreq_exit();
+ if (acpi_processor_cpufreq_init) {
+ cpufreq_unregister_notifier(&acpi_processor_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ acpi_processor_cpufreq_init = false;
+ }
+
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ee87cb6f6e59..2261713d1aec 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -50,57 +50,13 @@ module_param(ignore_ppc, int, 0644);
MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
"limited by BIOS, this should help");
-#define PPC_REGISTERED 1
-#define PPC_IN_USE 2
-
-static int acpi_processor_ppc_status;
-
-static int acpi_processor_ppc_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- struct acpi_processor *pr;
- unsigned int ppc = 0;
-
- if (ignore_ppc < 0)
- ignore_ppc = 0;
-
- if (ignore_ppc)
- return 0;
-
- if (event != CPUFREQ_ADJUST)
- return 0;
-
- mutex_lock(&performance_mutex);
-
- pr = per_cpu(processors, policy->cpu);
- if (!pr || !pr->performance)
- goto out;
-
- ppc = (unsigned int)pr->performance_platform_limit;
-
- if (ppc >= pr->performance->state_count)
- goto out;
-
- cpufreq_verify_within_limits(policy, 0,
- pr->performance->states[ppc].
- core_frequency * 1000);
-
- out:
- mutex_unlock(&performance_mutex);
-
- return 0;
-}
-
-static struct notifier_block acpi_ppc_notifier_block = {
- .notifier_call = acpi_processor_ppc_notifier,
-};
+static bool acpi_processor_ppc_in_use;
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
-
+ int ret;
if (!pr)
return -EINVAL;
@@ -112,7 +68,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
if (status != AE_NOT_FOUND)
- acpi_processor_ppc_status |= PPC_IN_USE;
+ acpi_processor_ppc_in_use = true;
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
@@ -124,6 +80,17 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
pr->performance_platform_limit = (int)ppc;
+ if (ppc >= pr->performance->state_count ||
+ unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+ return 0;
+
+ ret = dev_pm_qos_update_request(&pr->perflib_req,
+ pr->performance->states[ppc].core_frequency * 1000);
+ if (ret < 0) {
+ pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
return 0;
}
@@ -184,23 +151,32 @@ int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
}
EXPORT_SYMBOL(acpi_processor_get_bios_limit);
-void acpi_processor_ppc_init(void)
+void acpi_processor_ignore_ppc_init(void)
{
- if (!cpufreq_register_notifier
- (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
- acpi_processor_ppc_status |= PPC_REGISTERED;
- else
- printk(KERN_DEBUG
- "Warning: Processor Platform Limit not supported.\n");
+ if (ignore_ppc < 0)
+ ignore_ppc = 0;
+}
+
+void acpi_processor_ppc_init(int cpu)
+{
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+ &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
+ INT_MAX);
+ if (ret < 0) {
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
+ ret);
+ return;
+ }
}
-void acpi_processor_ppc_exit(void)
+void acpi_processor_ppc_exit(int cpu)
{
- if (acpi_processor_ppc_status & PPC_REGISTERED)
- cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- acpi_processor_ppc_status &= ~PPC_REGISTERED;
+ dev_pm_qos_remove_request(&pr->perflib_req);
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
@@ -477,7 +453,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
static int is_done = 0;
int result;
- if (!(acpi_processor_ppc_status & PPC_REGISTERED))
+ if (!acpi_processor_cpufreq_init)
return -EBUSY;
if (!try_module_get(calling_module))
@@ -513,7 +489,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
* we can allow the cpufreq driver to be rmmod'ed. */
is_done = 1;
- if (!(acpi_processor_ppc_status & PPC_IN_USE))
+ if (!acpi_processor_ppc_in_use)
module_put(calling_module);
return 0;
@@ -742,7 +718,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
{
struct acpi_processor *pr;
- if (!(acpi_processor_ppc_status & PPC_REGISTERED))
+ if (!acpi_processor_cpufreq_init)
return -EINVAL;
mutex_lock(&performance_mutex);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 50fb0107375e..ec2638f1df4f 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -35,7 +35,6 @@ ACPI_MODULE_NAME("processor_thermal");
#define CPUFREQ_THERMAL_MAX_STEP 3
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
-static unsigned int acpi_thermal_cpufreq_is_init = 0;
#define reduction_pctg(cpu) \
per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
@@ -61,35 +60,11 @@ static int phys_package_first_cpu(int cpu)
static int cpu_has_cpufreq(unsigned int cpu)
{
struct cpufreq_policy policy;
- if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
+ if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
return 0;
return 1;
}
-static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
-
- if (event != CPUFREQ_ADJUST)
- goto out;
-
- max_freq = (
- policy->cpuinfo.max_freq *
- (100 - reduction_pctg(policy->cpu) * 20)
- ) / 100;
-
- cpufreq_verify_within_limits(policy, 0, max_freq);
-
- out:
- return 0;
-}
-
-static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
- .notifier_call = acpi_thermal_cpufreq_notifier,
-};
-
static int cpufreq_get_max_state(unsigned int cpu)
{
if (!cpu_has_cpufreq(cpu))
@@ -108,7 +83,10 @@ static int cpufreq_get_cur_state(unsigned int cpu)
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- int i;
+ struct cpufreq_policy *policy;
+ struct acpi_processor *pr;
+ unsigned long max_freq;
+ int i, ret;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -121,33 +99,53 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
* frequency.
*/
for_each_online_cpu(i) {
- if (topology_physical_package_id(i) ==
+ if (topology_physical_package_id(i) !=
topology_physical_package_id(cpu))
- cpufreq_update_policy(i);
+ continue;
+
+ pr = per_cpu(processors, i);
+
+ if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+ continue;
+
+ policy = cpufreq_cpu_get(i);
+ if (!policy)
+ return -EINVAL;
+
+ max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
+
+ cpufreq_cpu_put(policy);
+
+ ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
}
return 0;
}
-void acpi_thermal_cpufreq_init(void)
+void acpi_thermal_cpufreq_init(int cpu)
{
- int i;
-
- i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
- if (!i)
- acpi_thermal_cpufreq_is_init = 1;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ ret = dev_pm_qos_add_request(get_cpu_device(cpu),
+ &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
+ INT_MAX);
+ if (ret < 0) {
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
+ ret);
+ return;
+ }
}
-void acpi_thermal_cpufreq_exit(void)
+void acpi_thermal_cpufreq_exit(int cpu)
{
- if (acpi_thermal_cpufreq_is_init)
- cpufreq_unregister_notifier
- (&acpi_thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
+ struct acpi_processor *pr = per_cpu(processors, cpu);
- acpi_thermal_cpufreq_is_init = 0;
+ dev_pm_qos_remove_request(&pr->thermal_req);
}
-
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
{
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index ea3d700da3ca..3eacf474e1e3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -39,6 +39,12 @@ static const guid_t prp_guids[] = {
/* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
+ /* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
+ GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
+ 0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
+ /* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
+ GUID_INIT(0x6c501103, 0xc189, 0x4296,
+ 0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
};
/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
@@ -1210,7 +1216,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
/**
- * acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint
+ * acpi_graph_get_remote_endpoint - Parses and returns remote end of an endpoint
* @fwnode: Endpoint firmware node pointing to a remote device
* @endpoint: Firmware node of remote endpoint is filled here if not %NULL
*
diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h
index 06372a37df10..c3522bb82792 100644
--- a/drivers/acpi/sbshc.h
+++ b/drivers/acpi/sbshc.h
@@ -15,8 +15,6 @@ enum acpi_smb_protocol {
SMBUS_BLOCK_PROCESS_CALL = 0xd,
};
-static const u8 SMBUS_PEC = 0x80;
-
enum acpi_sbs_device_addr {
ACPI_SBS_CHARGER = 0x9,
ACPI_SBS_MANAGER = 0xa,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0e28270b0fd8..aad6be5c0af0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
+ /*
+ * Although we call __add_memory() that is documented to require the
+ * device_hotplug_lock, it is not necessary here because this is an
+ * early code when userspace or any other code path cannot trigger
+ * hotplug/hotunplug operations.
+ */
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index f0fe7c15d657..9fa77d72ef27 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -89,6 +89,10 @@ bool acpi_sleep_state_supported(u8 sleep_state)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool sleep_no_lps0 __read_mostly;
+module_param(sleep_no_lps0, bool, 0644);
+MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
u32 acpi_target_system_state(void)
@@ -158,11 +162,11 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static bool acpi_sleep_no_lps0;
+static bool acpi_sleep_default_s3;
-static int __init init_no_lps0(const struct dmi_system_id *d)
+static int __init init_default_s3(const struct dmi_system_id *d)
{
- acpi_sleep_no_lps0 = true;
+ acpi_sleep_default_s3 = true;
return 0;
}
@@ -363,7 +367,7 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
* S0 Idle firmware interface.
*/
{
- .callback = init_no_lps0,
+ .callback = init_default_s3,
.ident = "Dell XPS13 9360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -376,7 +380,7 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
* https://bugzilla.kernel.org/show_bug.cgi?id=199057).
*/
{
- .callback = init_no_lps0,
+ .callback = init_default_s3,
.ident = "ThinkPad X1 Tablet(2016)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -524,8 +528,9 @@ static void acpi_pm_end(void)
acpi_sleep_tts_switch(acpi_target_sleep_state);
}
#else /* !CONFIG_ACPI_SLEEP */
+#define sleep_no_lps0 (1)
#define acpi_target_sleep_state ACPI_STATE_S0
-#define acpi_sleep_no_lps0 (false)
+#define acpi_sleep_default_s3 (1)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
@@ -691,7 +696,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
.recover = acpi_pm_finish,
};
-static bool s2idle_in_progress;
static bool s2idle_wakeup;
/*
@@ -904,42 +908,43 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
- if (acpi_sleep_no_lps0) {
- acpi_handle_info(adev->handle,
- "Low Power S0 Idle interface disabled\n");
- return 0;
- }
-
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0;
guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
/* Check if the _DSM is present and as expected. */
out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
- if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
- char bitmask = *(char *)out_obj->buffer.pointer;
-
- lps0_dsm_func_mask = bitmask;
- lps0_device_handle = adev->handle;
- /*
- * Use suspend-to-idle by default if the default
- * suspend mode was not set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
-
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- bitmask);
-
- acpi_ec_mark_gpe_for_wake();
- } else {
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
acpi_handle_debug(adev->handle,
"_DSM function 0 evaluation failed\n");
+ return 0;
}
+
+ lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+
ACPI_FREE(out_obj);
+ acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+ lps0_dsm_func_mask);
+
+ lps0_device_handle = adev->handle;
+
lpi_device_get_constraints();
+ /*
+ * Use suspend-to-idle by default if the default suspend mode was not
+ * set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
+ */
+ acpi_ec_mark_gpe_for_wake();
+
return 0;
}
@@ -951,98 +956,110 @@ static struct acpi_scan_handler lps0_handler = {
static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
- s2idle_in_progress = true;
return 0;
}
static int acpi_s2idle_prepare(void)
{
- if (lps0_device_handle) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-
+ if (acpi_sci_irq_valid()) {
+ enable_irq_wake(acpi_sci_irq);
acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
}
- if (acpi_sci_irq_valid())
- enable_irq_wake(acpi_sci_irq);
-
acpi_enable_wakeup_devices(ACPI_STATE_S0);
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
+
+ s2idle_wakeup = true;
return 0;
}
-static void acpi_s2idle_wake(void)
+static int acpi_s2idle_prepare_late(void)
{
- if (!lps0_device_handle)
- return;
+ if (!lps0_device_handle || sleep_no_lps0)
+ return 0;
if (pm_debug_messages_on)
lpi_check_constraints();
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+
+ return 0;
+}
+
+static void acpi_s2idle_wake(void)
+{
+ /*
+ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
+ * not triggered while suspended, so bail out.
+ */
+ if (!acpi_sci_irq_valid() ||
+ irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ return;
+
/*
- * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
- * that the SCI has triggered while suspended, so cancel the wakeup in
- * case it has not been a wakeup event (the GPEs will be checked later).
+ * If there are EC events to process, the wakeup may be a spurious one
+ * coming from the EC.
*/
- if (acpi_sci_irq_valid() &&
- !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
+ if (acpi_ec_dispatch_gpe()) {
+ /*
+ * Cancel the wakeup and process all pending events in case
+ * there are any wakeup ones in there.
+ *
+ * Note that if any non-EC GPEs are active at this point, the
+ * SCI will retrigger after the rearming below, so no events
+ * should be missed by canceling the wakeup here.
+ */
pm_system_cancel_wakeup();
- s2idle_wakeup = true;
/*
- * On some platforms with the LPS0 _DSM device noirq resume
- * takes too much time for EC wakeup events to survive, so look
- * for them now.
+ * The EC driver uses the system workqueue and an additional
+ * special one, so those need to be flushed too.
*/
- acpi_ec_dispatch_gpe();
+ acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
+ acpi_ec_flush_work();
+ acpi_os_wait_events_complete(); /* synchronize Notify handling */
+
+ rearm_wake_irq(acpi_sci_irq);
}
}
-static void acpi_s2idle_sync(void)
+static void acpi_s2idle_restore_early(void)
{
- /*
- * Process all pending events in case there are any wakeup ones.
- *
- * The EC driver uses the system workqueue and an additional special
- * one, so those need to be flushed too.
- */
- acpi_os_wait_events_complete(); /* synchronize SCI IRQ handling */
- acpi_ec_flush_work();
- acpi_os_wait_events_complete(); /* synchronize Notify handling */
- s2idle_wakeup = false;
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
}
static void acpi_s2idle_restore(void)
{
+ s2idle_wakeup = false;
+
acpi_enable_all_runtime_gpes();
acpi_disable_wakeup_devices(ACPI_STATE_S0);
- if (acpi_sci_irq_valid())
- disable_irq_wake(acpi_sci_irq);
-
- if (lps0_device_handle) {
+ if (acpi_sci_irq_valid()) {
acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
-
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ disable_irq_wake(acpi_sci_irq);
}
}
static void acpi_s2idle_end(void)
{
- s2idle_in_progress = false;
acpi_scan_lock_release();
}
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
+ .prepare_late = acpi_s2idle_prepare_late,
.wake = acpi_s2idle_wake,
- .sync = acpi_s2idle_sync,
+ .restore_early = acpi_s2idle_restore_early,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
@@ -1063,7 +1080,6 @@ static void acpi_sleep_suspend_setup(void)
}
#else /* !CONFIG_SUSPEND */
-#define s2idle_in_progress (false)
#define s2idle_wakeup (false)
#define lps0_device_handle (NULL)
static inline void acpi_sleep_suspend_setup(void) {}
@@ -1074,11 +1090,6 @@ bool acpi_s2idle_wakeup(void)
return s2idle_wakeup;
}
-bool acpi_sleep_no_ec_events(void)
-{
- return !s2idle_in_progress || !lps0_device_handle;
-}
-
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 00f12a86ecbd..d831a61e0010 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -225,13 +225,9 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
if (!tz)
return -EINVAL;
- if (!acpi_has_method(tz->device->handle, "_SCP")) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
+ if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
+ "_SCP", mode)))
return -ENODEV;
- } else if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
- "_SCP", mode))) {
- return -ENODEV;
- }
return 0;
}
@@ -463,8 +459,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
}
- if ((flag & ACPI_TRIPS_DEVICES)
- && acpi_has_method(tz->device->handle, "_TZD")) {
+ if (flag & ACPI_TRIPS_DEVICES) {
memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
NULL, &devices);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index ba277cd5c7fa..697a6b12d6b9 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -69,11 +69,11 @@ static const struct always_present_id always_present_ids[] = {
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
* *and* _STA has been called at least 3 times since.
*/
- ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
- ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), {
+ ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
}),
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index aa64eece77a6..57d3b2e2d007 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -134,22 +134,13 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
}
#ifdef CONFIG_TEGRA_IOMMU_SMMU
-static int tegra_ahb_match_by_smmu(struct device *dev, const void *data)
-{
- struct tegra_ahb *ahb = dev_get_drvdata(dev);
- const struct device_node *dn = data;
-
- return (ahb->dev->of_node == dn) ? 1 : 0;
-}
-
int tegra_ahb_enable_smmu(struct device_node *dn)
{
struct device *dev;
u32 val;
struct tegra_ahb *ahb;
- dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
- tegra_ahb_match_by_smmu);
+ dev = driver_find_device_by_of_node(&tegra_ahb_driver.driver, dn);
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 38a59a630cd4..c0a491277aca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -122,7 +122,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
-static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -196,30 +196,8 @@ static inline void binder_stats_created(enum binder_stat_types type)
atomic_inc(&binder_stats.obj_created[type]);
}
-struct binder_transaction_log_entry {
- int debug_id;
- int debug_id_done;
- int call_type;
- int from_proc;
- int from_thread;
- int target_handle;
- int to_proc;
- int to_thread;
- int to_node;
- int data_size;
- int offsets_size;
- int return_error_line;
- uint32_t return_error;
- uint32_t return_error_param;
- const char *context_name;
-};
-struct binder_transaction_log {
- atomic_t cur;
- bool full;
- struct binder_transaction_log_entry entry[32];
-};
-static struct binder_transaction_log binder_transaction_log;
-static struct binder_transaction_log binder_transaction_log_failed;
+struct binder_transaction_log binder_transaction_log;
+struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@@ -480,6 +458,7 @@ enum binder_deferred_state {
* @inner_lock: can nest under outer_lock and/or node lock
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
+ * @binderfs_entry: process-specific binderfs log file
*
* Bookkeeping structure for binder processes
*/
@@ -509,6 +488,7 @@ struct binder_proc {
struct binder_context *context;
spinlock_t inner_lock;
spinlock_t outer_lock;
+ struct dentry *binderfs_entry;
};
enum {
@@ -2988,7 +2968,7 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
- if (target_node && target_proc == proc) {
+ if (target_node && target_proc->pid == proc->pid) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3239,7 +3219,8 @@ static void binder_transaction(struct binder_proc *proc,
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
- sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
+ sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
+ ALIGN(secctx_sz, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -5229,6 +5210,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
+ struct binderfs_info *info;
+ struct dentry *binder_binderfs_dir_entry_proc = NULL;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
@@ -5243,11 +5226,14 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
/* binderfs stashes devices in i_private */
- if (is_binderfs_device(nodp))
+ if (is_binderfs_device(nodp)) {
binder_dev = nodp->i_private;
- else
+ info = nodp->i_sb->s_fs_info;
+ binder_binderfs_dir_entry_proc = info->proc_log_dir;
+ } else {
binder_dev = container_of(filp->private_data,
struct binder_device, miscdev);
+ }
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
@@ -5278,6 +5264,35 @@ static int binder_open(struct inode *nodp, struct file *filp)
&proc_fops);
}
+ if (binder_binderfs_dir_entry_proc) {
+ char strbuf[11];
+ struct dentry *binderfs_entry;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * Similar to debugfs, the process specific log file is shared
+ * between contexts. If the file has already been created for a
+ * process, the following binderfs_create_file() call will
+ * fail with error code EEXIST if another context of the same
+ * process invoked binder_open(). This is ok since same as
+ * debugfs, the log file will contain information on all
+ * contexts of a given PID.
+ */
+ binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
+ strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
+ if (!IS_ERR(binderfs_entry)) {
+ proc->binderfs_entry = binderfs_entry;
+ } else {
+ int error;
+
+ error = PTR_ERR(binderfs_entry);
+ if (error != -EEXIST) {
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
+ }
+ }
+ }
+
return 0;
}
@@ -5317,6 +5332,12 @@ static int binder_release(struct inode *nodp, struct file *filp)
struct binder_proc *proc = filp->private_data;
debugfs_remove(proc->debugfs_entry);
+
+ if (proc->binderfs_entry) {
+ binderfs_remove_file(proc->binderfs_entry);
+ proc->binderfs_entry = NULL;
+ }
+
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
return 0;
@@ -5906,7 +5927,7 @@ static void print_binder_proc_stats(struct seq_file *m,
}
-static int state_show(struct seq_file *m, void *unused)
+int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -5945,7 +5966,7 @@ static int state_show(struct seq_file *m, void *unused)
return 0;
}
-static int stats_show(struct seq_file *m, void *unused)
+int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -5961,7 +5982,7 @@ static int stats_show(struct seq_file *m, void *unused)
return 0;
}
-static int transactions_show(struct seq_file *m, void *unused)
+int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -6017,7 +6038,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
"\n" : " (incomplete)\n");
}
-static int transaction_log_show(struct seq_file *m, void *unused)
+int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
unsigned int log_cur = atomic_read(&log->cur);
@@ -6049,11 +6070,6 @@ const struct file_operations binder_fops = {
.release = binder_release,
};
-DEFINE_SHOW_ATTRIBUTE(state);
-DEFINE_SHOW_ATTRIBUTE(stats);
-DEFINE_SHOW_ATTRIBUTE(transactions);
-DEFINE_SHOW_ATTRIBUTE(transaction_log);
-
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6107,30 +6123,31 @@ static int __init binder_init(void)
0444,
binder_debugfs_dir_entry_root,
NULL,
- &state_fops);
+ &binder_state_fops);
debugfs_create_file("stats",
0444,
binder_debugfs_dir_entry_root,
NULL,
- &stats_fops);
+ &binder_stats_fops);
debugfs_create_file("transactions",
0444,
binder_debugfs_dir_entry_root,
NULL,
- &transactions_fops);
+ &binder_transactions_fops);
debugfs_create_file("transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
- &transaction_log_fops);
+ &binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
- &transaction_log_fops);
+ &binder_transaction_log_fops);
}
- if (strcmp(binder_devices_param, "") != 0) {
+ if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
+ strcmp(binder_devices_param, "") != 0) {
/*
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 045b3e42d98b..bd47f7f72075 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -35,15 +35,63 @@ struct binder_device {
struct inode *binderfs_inode;
};
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
extern const struct file_operations binder_fops;
+extern char *binder_devices_param;
+
#ifdef CONFIG_ANDROID_BINDERFS
extern bool is_binderfs_device(const struct inode *inode);
+extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
+ const struct file_operations *fops,
+ void *data);
+extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
return false;
}
+static inline struct dentry *binderfs_create_file(struct dentry *dir,
+ const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ return NULL;
+}
+static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
@@ -55,4 +103,42 @@ static inline int __init init_binderfs(void)
}
#endif
+int binder_stats_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_stats);
+
+int binder_state_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_state);
+
+int binder_transactions_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transactions);
+
+int binder_transaction_log_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int debug_id_done;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
+ const char *context_name;
+};
+
+struct binder_transaction_log {
+ atomic_t cur;
+ bool full;
+ struct binder_transaction_log_entry entry[32];
+};
+
+extern struct binder_transaction_log binder_transaction_log;
+extern struct binder_transaction_log binder_transaction_log_failed;
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e773f45d19d9..e2580e5316a2 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -48,45 +48,23 @@ static dev_t binderfs_dev;
static DEFINE_MUTEX(binderfs_minors_mutex);
static DEFINE_IDA(binderfs_minors);
-/**
- * binderfs_mount_opts - mount options for binderfs
- * @max: maximum number of allocatable binderfs binder devices
- */
-struct binderfs_mount_opts {
- int max;
-};
-
enum {
Opt_max,
+ Opt_stats_mode,
Opt_err
};
+enum binderfs_stats_mode {
+ STATS_NONE,
+ STATS_GLOBAL,
+};
+
static const match_table_t tokens = {
{ Opt_max, "max=%d" },
+ { Opt_stats_mode, "stats=%s" },
{ Opt_err, NULL }
};
-/**
- * binderfs_info - information about a binderfs mount
- * @ipc_ns: The ipc namespace the binderfs mount belongs to.
- * @control_dentry: This records the dentry of this binderfs mount
- * binder-control device.
- * @root_uid: uid that needs to be used when a new binder device is
- * created.
- * @root_gid: gid that needs to be used when a new binder device is
- * created.
- * @mount_opts: The mount options in use.
- * @device_count: The current number of allocated binder devices.
- */
-struct binderfs_info {
- struct ipc_namespace *ipc_ns;
- struct dentry *control_dentry;
- kuid_t root_uid;
- kgid_t root_gid;
- struct binderfs_mount_opts mount_opts;
- int device_count;
-};
-
static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
@@ -186,8 +164,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
req->major = MAJOR(binderfs_dev);
req->minor = minor;
- ret = copy_to_user(userp, req, sizeof(*req));
- if (ret) {
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
ret = -EFAULT;
goto err;
}
@@ -272,7 +249,7 @@ static void binderfs_evict_inode(struct inode *inode)
clear_inode(inode);
- if (!device)
+ if (!S_ISCHR(inode->i_mode) || !device)
return;
mutex_lock(&binderfs_minors_mutex);
@@ -291,8 +268,9 @@ static void binderfs_evict_inode(struct inode *inode)
static int binderfs_parse_mount_opts(char *data,
struct binderfs_mount_opts *opts)
{
- char *p;
+ char *p, *stats;
opts->max = BINDERFS_MAX_MINOR;
+ opts->stats_mode = STATS_NONE;
while ((p = strsep(&data, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
@@ -312,6 +290,22 @@ static int binderfs_parse_mount_opts(char *data,
opts->max = max_devices;
break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ stats = match_strdup(&args[0]);
+ if (!stats)
+ return -ENOMEM;
+
+ if (strcmp(stats, "global") != 0) {
+ kfree(stats);
+ return -EINVAL;
+ }
+
+ opts->stats_mode = STATS_GLOBAL;
+ kfree(stats);
+ break;
default:
pr_err("Invalid mount options\n");
return -EINVAL;
@@ -323,8 +317,21 @@ static int binderfs_parse_mount_opts(char *data,
static int binderfs_remount(struct super_block *sb, int *flags, char *data)
{
+ int prev_stats_mode, ret;
struct binderfs_info *info = sb->s_fs_info;
- return binderfs_parse_mount_opts(data, &info->mount_opts);
+
+ prev_stats_mode = info->mount_opts.stats_mode;
+ ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+ if (ret)
+ return ret;
+
+ if (prev_stats_mode != info->mount_opts.stats_mode) {
+ pr_err("Binderfs stats mode cannot be changed during a remount\n");
+ info->mount_opts.stats_mode = prev_stats_mode;
+ return -EINVAL;
+ }
+
+ return 0;
}
static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
@@ -334,6 +341,8 @@ static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
info = root->d_sb->s_fs_info;
if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
seq_printf(seq, ",max=%d", info->mount_opts.max);
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ seq_printf(seq, ",stats=global");
return 0;
}
@@ -462,11 +471,192 @@ static const struct inode_operations binderfs_dir_inode_operations = {
.unlink = binderfs_unlink,
};
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "stats",
+ &binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "state",
+ &binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
+ &binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "failed_transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log_failed);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
{
int ret;
struct binderfs_info *info;
struct inode *inode = NULL;
+ struct binderfs_device device_info = { 0 };
+ const char *name;
+ size_t len;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -521,7 +711,25 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root)
return -ENOMEM;
- return binderfs_binder_ctl_create(sb);
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ return init_binder_logs(sb);
+
+ return 0;
}
static struct dentry *binderfs_mount(struct file_system_type *fs_type,
@@ -553,6 +761,18 @@ static struct file_system_type binder_fs_type = {
int __init init_binderfs(void)
{
int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
/* Allocate new major number for binderfs. */
ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 85357f27a66b..753985c01517 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -160,37 +160,6 @@ static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
-static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
-{
- int rc;
-
- if (using_dac &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
- return 0;
-}
-
static void acard_ahci_pci_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
@@ -471,9 +440,12 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
}
/* initialize adapter */
- rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
- if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK((hpriv->cap & HOST_CAP_64) ? 64 : 32));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
+ }
rc = ahci_reset_controller(host);
if (rc)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f7652baa6337..dd92faf197d5 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -65,6 +65,12 @@ enum board_ids {
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
+ /*
+ * board IDs for Intel chipsets that support more than 6 ports
+ * *and* end up needing the PCS quirk.
+ */
+ board_ahci_pcs7,
+
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
@@ -220,6 +226,12 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
+ [board_ahci_pcs7] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
};
static const struct pci_device_id ahci_pci_tbl[] = {
@@ -264,26 +276,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -623,30 +635,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
-static int ahci_pci_reset_controller(struct ata_host *host)
-{
- struct pci_dev *pdev = to_pci_dev(host->dev);
- int rc;
-
- rc = ahci_reset_controller(host);
- if (rc)
- return rc;
-
- if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
- struct ahci_host_priv *hpriv = host->private_data;
- u16 tmp16;
-
- /* configure PCS */
- pci_read_config_word(pdev, 0x92, &tmp16);
- if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
- tmp16 |= hpriv->port_map;
- pci_write_config_word(pdev, 0x92, tmp16);
- }
- }
-
- return 0;
-}
-
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@@ -849,7 +837,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ahci_pci_reset_controller(host);
+ rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@@ -884,7 +872,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_pci_reset_controller(host);
+ rc = ahci_reset_controller(host);
if (rc)
return rc;
@@ -901,40 +889,23 @@ static int ahci_pci_device_resume(struct device *dev)
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
+ const int dma_bits = using_dac ? 64 : 32;
int rc;
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
+ *
+ * XXX: manipulating the DMA mask from platform code is completely
+ * bogus, platform code should use dev->bus_dma_mask instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;
- if (using_dac &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
- return 0;
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits));
+ if (rc)
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return rc;
}
static void ahci_pci_print_info(struct ata_host *host)
@@ -1619,6 +1590,34 @@ update_policy:
ap->target_lpm_policy = policy;
}
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+ const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
+ u16 tmp16;
+
+ /*
+ * Only apply the 6-port PCS quirk for known legacy platforms.
+ */
+ if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
+ return;
+ if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+ return;
+
+ /*
+ * port_map is determined from PORTS_IMPL PCI register which is
+ * implemented as write or write-once register. If the register
+ * isn't programmed, ahci automatically generates it from number
+ * of ports, which is good enough for PCS programming. It is
+ * otherwise expected that platform firmware enables the ports
+ * before the OS boots.
+ */
+ pci_read_config_word(pdev, PCS_6, &tmp16);
+ if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+ tmp16 |= hpriv->port_map;
+ pci_write_config_word(pdev, PCS_6, tmp16);
+ }
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1731,6 +1730,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
+ /*
+ * If platform firmware failed to enable ports, try to enable
+ * them here.
+ */
+ ahci_intel_pcs_quirk(pdev, hpriv);
+
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@@ -1840,7 +1845,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_pci_reset_controller(host);
+ rc = ahci_reset_controller(host);
if (rc)
return rc;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 0570629d719d..3dbf398c92ea 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -247,6 +247,8 @@ enum {
ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
ICH_MAP = 0x90, /* ICH MAP register */
+ PCS_6 = 0x92, /* 6 port PCS */
+ PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
EM_MAX_SLOTS = 8,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index e4c45d3cca79..bff369d9a1a7 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -175,7 +175,6 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
static bool ahci_em_messages __read_mostly = true;
-EXPORT_SYMBOL_GPL(ahci_em_messages);
module_param(ahci_em_messages, bool, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 72312ad2e142..9e9583a6bba9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
hpriv->phys[port] = NULL;
rc = 0;
break;
+ case -EPROBE_DEFER:
+ /* Do not complain yet */
+ break;
default:
dev_err(dev,
@@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->mmio = devm_ioremap_resource(dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (IS_ERR(hpriv->mmio)) {
- dev_err(dev, "no mmio space\n");
rc = PTR_ERR(hpriv->mmio);
goto err_out;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 391ac0503dc0..76d0f9de767b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1786,6 +1786,21 @@ nothing_to_do:
return 1;
}
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+ struct request *rq = scmd->request;
+ u32 req_blocks;
+
+ if (!blk_rq_is_passthrough(rq))
+ return true;
+
+ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+ if (n_blocks > req_blocks)
+ return false;
+
+ return true;
+}
+
/**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile
@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_6:
case WRITE_6:
@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
*/
if (!n_block)
n_block = 256;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_16:
case WRITE_16:
@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_16_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
default:
DPRINTK("no-byte command\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 10aa27882142..4ed682da52ae 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset;
unsigned char *buf;
+ if (!qc->cursg) {
+ qc->curbytes = qc->nbytes;
+ return;
+ }
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
+ if (!qc->cursg)
+ ap->hsm_task_state = HSM_ST_LAST;
qc->cursg_ofs = 0;
}
}
@@ -3147,15 +3153,9 @@ void ata_pci_bmdma_init(struct ata_host *host)
* ->sff_irq_clear method. Try to initialize bmdma_addr
* regardless of dma masks.
*/
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
ata_bmdma_nodma(host, "failed to set dma mask");
- if (!rc) {
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- ata_bmdma_nodma(host,
- "failed to set consistent dma mask");
- }
/* request and iomap DMA region */
rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2dd9af..eefda51f97d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc;
struct ata_taskfile tf;
- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 2b9ed4ddef8d..cfd0cf2cbca6 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -463,12 +463,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
atp867x_fixup(host);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
- return rc;
+ return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
}
static int atp867x_init_one(struct pci_dev *pdev,
diff --git a/drivers/ata/pata_buddha.c b/drivers/ata/pata_buddha.c
index 11a8044ff633..27d4c417fc60 100644
--- a/drivers/ata/pata_buddha.c
+++ b/drivers/ata/pata_buddha.c
@@ -18,7 +18,9 @@
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/mm.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/types.h>
#include <linux/zorro.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -29,7 +31,7 @@
#include <asm/setup.h>
#define DRV_NAME "pata_buddha"
-#define DRV_VERSION "0.1.0"
+#define DRV_VERSION "0.1.1"
#define BUDDHA_BASE1 0x800
#define BUDDHA_BASE2 0xa00
@@ -47,11 +49,11 @@ enum {
BOARD_XSURF
};
-static unsigned int buddha_bases[3] __initdata = {
+static unsigned int buddha_bases[3] = {
BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3
};
-static unsigned int xsurf_bases[2] __initdata = {
+static unsigned int xsurf_bases[2] = {
XSURF_BASE1, XSURF_BASE2
};
@@ -145,111 +147,151 @@ static struct ata_port_operations pata_xsurf_ops = {
.set_mode = pata_buddha_set_mode,
};
-static int __init pata_buddha_init_one(void)
+static int pata_buddha_probe(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
- struct zorro_dev *z = NULL;
+ static const char * const board_name[] = {
+ "Buddha", "Catweasel", "X-Surf"
+ };
+ struct ata_host *host;
+ void __iomem *buddha_board;
+ unsigned long board;
+ unsigned int type = ent->driver_data;
+ unsigned int nr_ports = (type == BOARD_CATWEASEL) ? 3 : 2;
+ void *old_drvdata;
+ int i;
+
+ dev_info(&z->dev, "%s IDE controller\n", board_name[type]);
+
+ board = z->resource.start;
+
+ if (type != BOARD_XSURF) {
+ if (!devm_request_mem_region(&z->dev,
+ board + BUDDHA_BASE1,
+ 0x800, DRV_NAME))
+ return -ENXIO;
+ } else {
+ if (!devm_request_mem_region(&z->dev,
+ board + XSURF_BASE1,
+ 0x1000, DRV_NAME))
+ return -ENXIO;
+ if (!devm_request_mem_region(&z->dev,
+ board + XSURF_BASE2,
+ 0x1000, DRV_NAME)) {
+ }
+ }
+
+ /* Workaround for X-Surf: Save drvdata in case zorro8390 has set it */
+ if (type == BOARD_XSURF)
+ old_drvdata = dev_get_drvdata(&z->dev);
+
+ /* allocate host */
+ host = ata_host_alloc(&z->dev, nr_ports);
+ if (type == BOARD_XSURF)
+ dev_set_drvdata(&z->dev, old_drvdata);
+ if (!host)
+ return -ENXIO;
+
+ buddha_board = ZTWO_VADDR(board);
+
+ /* enable the board IRQ on Buddha/Catweasel */
+ if (type != BOARD_XSURF)
+ z_writeb(0, buddha_board + BUDDHA_IRQ_MR);
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- static const char *board_name[]
- = { "Buddha", "Catweasel", "X-Surf" };
- struct ata_host *host;
- void __iomem *buddha_board;
- unsigned long board;
- unsigned int type, nr_ports = 2;
- int i;
-
- if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
- type = BOARD_BUDDHA;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) {
- type = BOARD_CATWEASEL;
- nr_ports++;
- } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF) {
- type = BOARD_XSURF;
- } else
- continue;
-
- dev_info(&z->dev, "%s IDE controller\n", board_name[type]);
-
- board = z->resource.start;
+ for (i = 0; i < nr_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *base, *irqport;
+ unsigned long ctl = 0;
if (type != BOARD_XSURF) {
- if (!devm_request_mem_region(&z->dev,
- board + BUDDHA_BASE1,
- 0x800, DRV_NAME))
- continue;
+ ap->ops = &pata_buddha_ops;
+ base = buddha_board + buddha_bases[i];
+ ctl = BUDDHA_CONTROL;
+ irqport = buddha_board + BUDDHA_IRQ + i * 0x40;
} else {
- if (!devm_request_mem_region(&z->dev,
- board + XSURF_BASE1,
- 0x1000, DRV_NAME))
- continue;
- if (!devm_request_mem_region(&z->dev,
- board + XSURF_BASE2,
- 0x1000, DRV_NAME))
- continue;
+ ap->ops = &pata_xsurf_ops;
+ base = buddha_board + xsurf_bases[i];
+ /* X-Surf has no CS1* (Control/AltStat) */
+ irqport = buddha_board + XSURF_IRQ;
}
- /* allocate host */
- host = ata_host_alloc(&z->dev, nr_ports);
- if (!host)
- continue;
-
- buddha_board = ZTWO_VADDR(board);
-
- /* enable the board IRQ on Buddha/Catweasel */
- if (type != BOARD_XSURF)
- z_writeb(0, buddha_board + BUDDHA_IRQ_MR);
-
- for (i = 0; i < nr_ports; i++) {
- struct ata_port *ap = host->ports[i];
- void __iomem *base, *irqport;
- unsigned long ctl = 0;
-
- if (type != BOARD_XSURF) {
- ap->ops = &pata_buddha_ops;
- base = buddha_board + buddha_bases[i];
- ctl = BUDDHA_CONTROL;
- irqport = buddha_board + BUDDHA_IRQ + i * 0x40;
- } else {
- ap->ops = &pata_xsurf_ops;
- base = buddha_board + xsurf_bases[i];
- /* X-Surf has no CS1* (Control/AltStat) */
- irqport = buddha_board + XSURF_IRQ;
- }
-
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
-
- ap->ioaddr.data_addr = base;
- ap->ioaddr.error_addr = base + 2 + 1 * 4;
- ap->ioaddr.feature_addr = base + 2 + 1 * 4;
- ap->ioaddr.nsect_addr = base + 2 + 2 * 4;
- ap->ioaddr.lbal_addr = base + 2 + 3 * 4;
- ap->ioaddr.lbam_addr = base + 2 + 4 * 4;
- ap->ioaddr.lbah_addr = base + 2 + 5 * 4;
- ap->ioaddr.device_addr = base + 2 + 6 * 4;
- ap->ioaddr.status_addr = base + 2 + 7 * 4;
- ap->ioaddr.command_addr = base + 2 + 7 * 4;
-
- if (ctl) {
- ap->ioaddr.altstatus_addr = base + ctl;
- ap->ioaddr.ctl_addr = base + ctl;
- }
-
- ap->private_data = (void *)irqport;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", board,
- ctl ? board + buddha_bases[i] + ctl : 0);
+ ap->pio_mask = ATA_PIO4;
+ ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+
+ ap->ioaddr.data_addr = base;
+ ap->ioaddr.error_addr = base + 2 + 1 * 4;
+ ap->ioaddr.feature_addr = base + 2 + 1 * 4;
+ ap->ioaddr.nsect_addr = base + 2 + 2 * 4;
+ ap->ioaddr.lbal_addr = base + 2 + 3 * 4;
+ ap->ioaddr.lbam_addr = base + 2 + 4 * 4;
+ ap->ioaddr.lbah_addr = base + 2 + 5 * 4;
+ ap->ioaddr.device_addr = base + 2 + 6 * 4;
+ ap->ioaddr.status_addr = base + 2 + 7 * 4;
+ ap->ioaddr.command_addr = base + 2 + 7 * 4;
+
+ if (ctl) {
+ ap->ioaddr.altstatus_addr = base + ctl;
+ ap->ioaddr.ctl_addr = base + ctl;
}
- ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt,
- IRQF_SHARED, &pata_buddha_sht);
+ ap->private_data = (void *)irqport;
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", board,
+ ctl ? board + buddha_bases[i] + ctl : 0);
}
+ ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt,
+ IRQF_SHARED, &pata_buddha_sht);
+
return 0;
}
-module_init(pata_buddha_init_one);
+static void pata_buddha_remove(struct zorro_dev *z)
+{
+ struct ata_host *host = dev_get_drvdata(&z->dev);
+
+ ata_host_detach(host);
+}
+
+static const struct zorro_device_id pata_buddha_zorro_tbl[] = {
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA, BOARD_BUDDHA},
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL, BOARD_CATWEASEL},
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, pata_buddha_zorro_tbl);
+
+static struct zorro_driver pata_buddha_driver = {
+ .name = "pata_buddha",
+ .id_table = pata_buddha_zorro_tbl,
+ .probe = pata_buddha_probe,
+ .remove = pata_buddha_remove,
+};
+
+/*
+ * We cannot have a modalias for X-Surf boards, as it competes with the
+ * zorro8390 network driver. As a stopgap measure until we have proper
+ * MFD support for this board, we manually attach to it late after Zorro
+ * has enumerated its boards.
+ */
+static int __init pata_buddha_late_init(void)
+{
+ struct zorro_dev *z = NULL;
+
+ /* Auto-bind to regular boards */
+ zorro_register_driver(&pata_buddha_driver);
+
+ /* Manually bind to all X-Surf boards */
+ while ((z = zorro_find_device(ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, z))) {
+ static struct zorro_device_id xsurf_ent = {
+ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, BOARD_XSURF
+ };
+
+ pata_buddha_probe(z, &xsurf_ent);
+ }
+
+ return 0;
+}
+late_initcall(pata_buddha_late_init);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("low-level driver for Buddha/Catweasel/X-Surf PATA");
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 099a5c68a4c9..9052148b306d 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -155,14 +155,10 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
- return -ENODEV;
- }
/* Map IO ports and initialize host accordingly */
iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b2fc023783b1..83974d5eb387 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -221,10 +221,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 607db1f05f9a..f9255d6fd194 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -123,10 +123,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc;
host->iomap = pcim_iomap_table(dev);
- rc = dma_set_mask(&dev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&dev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(dev);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b656e1536855..de834fbb6dfe 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -722,11 +722,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
return rc;
host->iomap = pcim_iomap_table(pdev);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 7c37f2ff09e4..deae466395de 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
static int rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
- struct rb532_cf_info *info = ah->private_data;
ata_host_detach(ah);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c14071be4f55..7ab9aea3b630 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -374,10 +374,7 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(pdev);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index c5bbb07aa7d9..cb490531b62e 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -572,23 +572,6 @@ static void adma_host_init(struct ata_host *host, unsigned int chip_id)
adma_reset_engine(host->ports[port_no]);
}
-static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
-{
- int rc;
-
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
- return rc;
- }
- return 0;
-}
-
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -619,9 +602,11 @@ static int adma_ata_init_one(struct pci_dev *pdev,
host->iomap = pcim_iomap_table(pdev);
mmio_base = host->iomap[ADMA_MMIO_BAR];
- rc = adma_set_dma_masks(pdev, mmio_base);
- if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
+ }
for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
struct ata_port *ap = host->ports[port_no];
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 790968497dfe..7f99e23bff88 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -862,18 +862,12 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set dma_mask. This devices doesn't support 64bit addressing. */
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
- return rc;
- }
-
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
dev_err(&pdev->dev, "failed to initialize controller\n");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index da585d2bded6..ad385a113391 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4314,38 +4314,6 @@ static struct pci_driver mv_pci_driver = {
};
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
- int rc;
-
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
-
- return rc;
-}
-
/**
* mv_print_info - Dump key info to kernel log for perusal.
* @host: ATA host to print info about
@@ -4430,9 +4398,11 @@ static int mv_pci_init_one(struct pci_dev *pdev,
host->iomap = pcim_iomap_table(pdev);
hpriv->base = host->iomap[MV_PRIMARY_BAR];
- rc = pci_go_64(pdev);
- if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
+ }
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index b44b4b64354c..56946012d113 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1122,14 +1122,10 @@ static int nv_adma_port_start(struct ata_port *ap)
/*
* Now that the legacy PRD and padding buffer are allocated we can
- * try to raise the DMA mask to allocate the CPB/APRD table.
+ * raise the DMA mask to allocate the CPB/APRD table.
*/
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
- return rc;
- }
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f4dfec3b6e42..5fd464765ddc 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -1230,10 +1230,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
/* initialize adapter */
pdc_host_init(host);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 865e5c58bd94..c53c5a47204d 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -537,33 +537,13 @@ static void qs_host_init(struct ata_host *host, unsigned int chip_id)
static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
u32 bus_info = readl(mmio_base + QS_HID_HPHY);
- int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
-
- if (have_64bit_bus &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
- return 0;
+ int dma_bits = (bus_info & QS_HPHY_64BIT) ? 64 : 32;
+ int rc;
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits));
+ if (rc)
+ dev_err(&pdev->dev, "%d-bit DMA enable failed\n", dma_bits);
+ return rc;
}
static int qs_ata_init_one(struct pci_dev *pdev,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 25b6a52be5ab..e6fbae2f645a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -757,10 +757,7 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->iomap = pcim_iomap_table(pdev);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 98aad8206921..7bef82de53ca 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1301,28 +1301,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->iomap = iomap;
/* configure and activate the device */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "32-bit DMA enable failed\n");
- return rc;
- }
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return rc;
}
/* Set max read request size to 4096. This slightly increases
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index b903d55c6c20..f8552559db7f 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -471,10 +471,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
ata_port_pbar_desc(ap, 5, offset, "port");
}
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ae8e374d0a77..2277ba0c9c7f 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1470,10 +1470,7 @@ static int pdc_sata_init_one(struct pci_dev *pdev,
}
/* configure and activate */
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
+ rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index fcb9245b184f..c7891cc84ea0 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -505,14 +505,7 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
for (i = 0; i < host->n_ports; i++)
vt6421_init_addrs(host->ports[i]);
- rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- return 0;
+ return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
}
static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index fd401e9164ef..8fa952cb9f7f 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -371,10 +371,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
/*
* Use 32 bit DMA mask, because 64 bit address support is poor.
*/
- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
- return rc;
- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
make the card work).
config ATM_NICSTAR_USE_IDT77105
- bool "Use IDT77015 PHY driver (25Mbps)"
+ bool "Use IDT77105 PHY driver (25Mbps)"
depends on ATM_NICSTAR
help
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 302cf0ba1600..8c7a996d1f16 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -63,6 +63,7 @@
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include "iphase.h"
#include "suni.h"
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
}
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
board = ia_cmds.status;
- if ((board < 0) || (board > iadev_count))
- board = 0;
+
+ if ((board < 0) || (board > iadev_count))
+ board = 0;
+ board = array_index_nospec(board, iadev_count + 1);
+
iadev = ia_dev[board];
switch (ia_cmds.cmd) {
case MEMDUMP:
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..b8313a04422d 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -97,7 +97,7 @@ config CFAG12864B
say Y. You also need the ks0108 LCD Controller driver.
For help about how to wire your LCD to the parallel port,
- check Documentation/auxdisplay/cfag12864b
+ check Documentation/admin-guide/auxdisplay/cfag12864b.rst
Depends on the x86 arch and the framebuffer support.
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
choice
prompt "Backlight initial state"
default CHARLCD_BL_FLASH
+ ---help---
+ Select the initial backlight state on boot or module load.
+
+ Previously, there was no option for this: the backlight flashed
+ briefly on init. Now you can also turn it off/on.
config CHARLCD_BL_OFF
bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
#include <generated/utsrelease.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define LCD_MINOR 156
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
new file mode 100644
index 000000000000..00911ad0f3de
--- /dev/null
+++ b/drivers/auxdisplay/charlcd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#ifndef _CHARLCD_H
+#define _CHARLCD_H
+
+struct charlcd {
+ const struct charlcd_ops *ops;
+ const unsigned char *char_conv; /* Optional */
+
+ int ifwidth; /* 4-bit or 8-bit (default) */
+ int height;
+ int width;
+ int bwidth; /* Default set by charlcd_alloc() */
+ int hwidth; /* Default set by charlcd_alloc() */
+
+ void *drvdata; /* Set by charlcd_alloc() */
+};
+
+struct charlcd_ops {
+ /* Required */
+ void (*write_cmd)(struct charlcd *lcd, int cmd);
+ void (*write_data)(struct charlcd *lcd, int data);
+
+ /* Optional */
+ void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
+ void (*clear_fast)(struct charlcd *lcd);
+ void (*backlight)(struct charlcd *lcd, int on);
+};
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
+
+int charlcd_register(struct charlcd *lcd);
+int charlcd_unregister(struct charlcd *lcd);
+
+void charlcd_poke(struct charlcd *lcd);
+
+#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
#include <linux/property.h>
#include <linux/slab.h>
-#include <misc/charlcd.h>
-
+#include "charlcd.h"
enum hd44780_pin {
/* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
struct ht16k33_fbdev fbdev;
};
-static struct fb_fix_screeninfo ht16k33_fb_fix = {
+static const struct fb_fix_screeninfo ht16k33_fb_fix = {
.id = DRIVER_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo ht16k33_fb_var = {
+static const struct fb_var_screeninfo ht16k33_fb_var = {
.xres = HT16K33_MATRIX_LED_MAX_ROWS,
.yres = HT16K33_MATRIX_LED_MAX_COLS,
.xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <misc/charlcd.h>
+#include "charlcd.h"
#define KEYPAD_MINOR 185
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
return;
err_lcd_unreg:
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index dc404492381d..28b92e3cc570 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY
help
Enable support for architectures common topology code: e.g., parsing
CPU capacity information from DT, usage of such information for
- appropriate scaling, sysfs interface for changing capacity values at
+ appropriate scaling, sysfs interface for reading capacity values at
runtime.
endmenu
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 63c1e76739f1..1eb81f113786 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -15,6 +15,11 @@
#include <linux/string.h>
#include <linux/sched/topology.h>
#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
@@ -174,7 +179,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
if (!raw_capacity)
return 0;
- if (val != CPUFREQ_NOTIFY)
+ if (val != CPUFREQ_CREATE_POLICY)
return 0;
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
@@ -241,3 +246,296 @@ static void parsing_done_workfn(struct work_struct *work)
#else
core_initcall(free_raw_capacity);
#endif
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (cpu >= 0)
+ topology_parse_cpu_capacity(cpu_node, cpu);
+ else
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+
+ of_node_put(cpu_node);
+ return cpu;
+}
+
+static int __init parse_core(struct device_node *core, int package_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%pOF: Can't get CPU for thread\n",
+ t);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int package_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, core_id++);
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%pOF: empty cluster\n", cluster);
+
+ if (leaf)
+ package_id++;
+
+ return 0;
+}
+
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ topology_normalize_cpu_scale();
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_topology[cpu].package_id == -1)
+ ret = -EINVAL;
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+#endif
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Find the smaller of NUMA, core or LLC siblings */
+ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
+ /* not numa in package, lets use the package siblings */
+ core_mask = &cpu_topology[cpu].core_sibling;
+ }
+ if (cpu_topology[cpu].llc_id != -1) {
+ if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+ core_mask = &cpu_topology[cpu].llc_sibling;
+ }
+
+ return core_mask;
+}
+
+void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ /* update core and thread sibling masks */
+ for_each_online_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
+ }
+
+ if (cpuid_topo->package_id != cpu_topo->package_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+static void clear_cpu_topology(int cpu)
+{
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpumask_clear(&cpu_topo->llc_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+}
+
+void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->package_id = -1;
+ cpu_topo->llc_id = -1;
+
+ clear_cpu_topology(cpu);
+ }
+}
+
+void remove_cpu_topology(unsigned int cpu)
+{
+ int sibling;
+
+ for_each_cpu(sibling, topology_core_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_llc_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+
+ clear_cpu_topology(cpu);
+}
+
+__weak int __init parse_acpi_topology(void)
+{
+ return 0;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_acpi_topology())
+ reset_cpu_topology();
+ else if (of_have_populated_dt() && parse_dt_topology())
+ reset_cpu_topology();
+}
+#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b405436ee28e..0d32544b6f91 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -165,6 +165,7 @@ static inline int devtmpfs_init(void) { return 0; }
/* Device links support */
extern int device_links_read_lock(void);
extern void device_links_read_unlock(int idx);
+extern int device_links_read_lock_held(void);
extern int device_links_check_suppliers(struct device *dev);
extern void device_links_driver_bound(struct device *dev);
extern void device_links_driver_cleanup(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index df3cac739813..a1d1e8256324 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -342,30 +342,6 @@ struct device *bus_find_device(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
-static int match_name(struct device *dev, const void *data)
-{
- const char *name = data;
-
- return sysfs_streq(name, dev_name(dev));
-}
-
-/**
- * bus_find_device_by_name - device iterator for locating a particular device of a specific name
- * @bus: bus type
- * @start: Device to begin with
- * @name: name of the device to match
- *
- * This is similar to the bus_find_device() function above, but it handles
- * searching by a name automatically, no need to write another strcmp matching
- * function.
- */
-struct device *bus_find_device_by_name(struct bus_type *bus,
- struct device *start, const char *name)
-{
- return bus_find_device(bus, start, (void *)name, match_name);
-}
-EXPORT_SYMBOL_GPL(bus_find_device_by_name);
-
/**
* subsys_find_device_by_id - find a device with a specific enumeration number
* @subsys: subsystem
diff --git a/drivers/base/core.c b/drivers/base/core.c
index da84a73f2ba6..2db62d98e395 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -68,6 +68,11 @@ void device_links_read_unlock(int idx)
{
srcu_read_unlock(&device_links_srcu, idx);
}
+
+int device_links_read_lock_held(void)
+{
+ return srcu_read_lock_held(&device_links_srcu);
+}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);
@@ -91,6 +96,13 @@ void device_links_read_unlock(int not_used)
{
up_read(&device_links_lock);
}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+int device_links_read_lock_held(void)
+{
+ return lockdep_is_held(&device_links_lock);
+}
+#endif
#endif /* !CONFIG_SRCU */
/**
@@ -124,6 +136,50 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
}
+static void device_link_init_status(struct device_link *link,
+ struct device *consumer,
+ struct device *supplier)
+{
+ switch (supplier->links.status) {
+ case DL_DEV_PROBING:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * A consumer driver can create a link to a supplier
+ * that has not completed its probing yet as long as it
+ * knows that the supplier is already functional (for
+ * example, it has just acquired some resources from the
+ * supplier).
+ */
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+}
+
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
@@ -165,6 +221,13 @@ void device_pm_move_to_tail(struct device *dev)
device_links_read_unlock(idx);
}
+#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
+ DL_FLAG_AUTOREMOVE_SUPPLIER | \
+ DL_FLAG_AUTOPROBE_CONSUMER)
+
+#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
+
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
@@ -179,9 +242,9 @@ void device_pm_move_to_tail(struct device *dev)
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
- * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by
- * the driver core and, in particular, the caller of this function is expected
- * to drop the reference to the link acquired by it directly.
+ * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
+ * expected to release the link returned by it directly with the help of either
+ * device_link_del() or device_link_remove().
*
* If that flag is not set, however, the caller of this function is handing the
* management of the link over to the driver core entirely and its return value
@@ -201,9 +264,16 @@ void device_pm_move_to_tail(struct device *dev)
* be used to request the driver core to automaticall probe for a consmer
* driver after successfully binding a driver to the supplier device.
*
- * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
- * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
- * will cause NULL to be returned upfront.
+ * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
+ * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
+ * the same time is invalid and will cause NULL to be returned upfront.
+ * However, if a device link between the given @consumer and @supplier pair
+ * exists already when this function is called for them, the existing link will
+ * be returned regardless of its current type and status (the link's flags may
+ * be modified then). The caller of this function is then expected to treat
+ * the link as though it has just been created, so (in particular) if
+ * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
+ * explicitly when not needed any more (as stated above).
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
@@ -219,11 +289,8 @@ struct device_link *device_link_add(struct device *consumer,
{
struct device_link *link;
- if (!consumer || !supplier ||
- (flags & DL_FLAG_STATELESS &&
- flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
- DL_FLAG_AUTOREMOVE_SUPPLIER |
- DL_FLAG_AUTOPROBE_CONSUMER)) ||
+ if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
+ (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -236,6 +303,9 @@ struct device_link *device_link_add(struct device *consumer,
}
}
+ if (!(flags & DL_FLAG_STATELESS))
+ flags |= DL_FLAG_MANAGED;
+
device_links_write_lock();
device_pm_lock();
@@ -262,15 +332,6 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
- /*
- * Don't return a stateless link if the caller wants a stateful
- * one and vice versa.
- */
- if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
- link = NULL;
- goto out;
- }
-
if (flags & DL_FLAG_PM_RUNTIME) {
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
@@ -281,6 +342,7 @@ struct device_link *device_link_add(struct device *consumer,
}
if (flags & DL_FLAG_STATELESS) {
+ link->flags |= DL_FLAG_STATELESS;
kref_get(&link->kref);
goto out;
}
@@ -299,6 +361,11 @@ struct device_link *device_link_add(struct device *consumer,
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
+ if (!(link->flags & DL_FLAG_MANAGED)) {
+ kref_get(&link->kref);
+ link->flags |= DL_FLAG_MANAGED;
+ device_link_init_status(link, consumer, supplier);
+ }
goto out;
}
@@ -325,48 +392,10 @@ struct device_link *device_link_add(struct device *consumer,
kref_init(&link->kref);
/* Determine the initial link state. */
- if (flags & DL_FLAG_STATELESS) {
+ if (flags & DL_FLAG_STATELESS)
link->status = DL_STATE_NONE;
- } else {
- switch (supplier->links.status) {
- case DL_DEV_PROBING:
- switch (consumer->links.status) {
- case DL_DEV_PROBING:
- /*
- * A consumer driver can create a link to a
- * supplier that has not completed its probing
- * yet as long as it knows that the supplier is
- * already functional (for example, it has just
- * acquired some resources from the supplier).
- */
- link->status = DL_STATE_CONSUMER_PROBE;
- break;
- default:
- link->status = DL_STATE_DORMANT;
- break;
- }
- break;
- case DL_DEV_DRIVER_BOUND:
- switch (consumer->links.status) {
- case DL_DEV_PROBING:
- link->status = DL_STATE_CONSUMER_PROBE;
- break;
- case DL_DEV_DRIVER_BOUND:
- link->status = DL_STATE_ACTIVE;
- break;
- default:
- link->status = DL_STATE_AVAILABLE;
- break;
- }
- break;
- case DL_DEV_UNBINDING:
- link->status = DL_STATE_SUPPLIER_UNBIND;
- break;
- default:
- link->status = DL_STATE_DORMANT;
- break;
- }
- }
+ else
+ device_link_init_status(link, consumer, supplier);
/*
* Some callers expect the link creation during consumer driver probe to
@@ -528,7 +557,7 @@ static void device_links_missing_supplier(struct device *dev)
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
@@ -538,7 +567,7 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -563,7 +592,7 @@ int device_links_check_suppliers(struct device *dev)
*
* Also change the status of @dev's links to suppliers to "active".
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
@@ -572,7 +601,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
@@ -593,7 +622,7 @@ void device_links_driver_bound(struct device *dev)
}
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
@@ -605,6 +634,13 @@ void device_links_driver_bound(struct device *dev)
device_links_write_unlock();
}
+static void device_link_drop_managed(struct device_link *link)
+{
+ link->flags &= ~DL_FLAG_MANAGED;
+ WRITE_ONCE(link->status, DL_STATE_NONE);
+ kref_put(&link->kref, __device_link_del);
+}
+
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
@@ -615,18 +651,18 @@ void device_links_driver_bound(struct device *dev)
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
- __device_link_del(&link->kref);
+ device_link_drop_managed(link);
else if (link->status == DL_STATE_CONSUMER_PROBE ||
link->status == DL_STATE_ACTIVE)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
@@ -643,7 +679,7 @@ static void __device_links_no_driver(struct device *dev)
* %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_no_driver(struct device *dev)
{
@@ -652,7 +688,7 @@ void device_links_no_driver(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
/*
@@ -680,7 +716,7 @@ void device_links_no_driver(struct device *dev)
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
@@ -689,7 +725,7 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
@@ -702,7 +738,7 @@ void device_links_driver_cleanup(struct device *dev)
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
- __device_link_del(&link->kref);
+ device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
@@ -724,7 +760,7 @@ void device_links_driver_cleanup(struct device *dev)
*
* Return 'false' if there are no probing or active consumers.
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
@@ -734,7 +770,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@@ -764,7 +800,7 @@ bool device_links_busy(struct device *dev)
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
@@ -776,7 +812,7 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (link->flags & DL_FLAG_STATELESS)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
status = link->status;
@@ -1663,6 +1699,9 @@ void device_initialize(struct device *dev)
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
+#ifdef CONFIG_PROVE_LOCKING
+ mutex_init(&dev->lockdep_mutex);
+#endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
@@ -1820,12 +1859,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
+ unsigned int ref;
+
/* see if we live in a "glue" directory */
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
- if (!kobject_has_children(glue_dir))
+ /**
+ * There is a race condition between removing glue directory
+ * and adding a new device under the glue directory.
+ *
+ * CPU1: CPU2:
+ *
+ * device_add()
+ * get_device_parent()
+ * class_dir_create_and_add()
+ * kobject_add_internal()
+ * create_dir() // create glue_dir
+ *
+ * device_add()
+ * get_device_parent()
+ * kobject_get() // get glue_dir
+ *
+ * device_del()
+ * cleanup_glue_dir()
+ * kobject_del(glue_dir)
+ *
+ * kobject_add()
+ * kobject_add_internal()
+ * create_dir() // in glue_dir
+ * sysfs_create_dir_ns()
+ * kernfs_create_dir_ns(sd)
+ *
+ * sysfs_remove_dir() // glue_dir->sd=NULL
+ * sysfs_put() // free glue_dir->sd
+ *
+ * // sd is freed
+ * kernfs_new_node(sd)
+ * kernfs_get(glue_dir)
+ * kernfs_add_one()
+ * kernfs_put()
+ *
+ * Before CPU1 remove last child device under glue dir, if CPU2 add
+ * a new device under glue dir, the glue_dir kobject reference count
+ * will be increase to 2 in kobject_get(k). And CPU2 has been called
+ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+ * and sysfs_put(). This result in glue_dir->sd is freed.
+ *
+ * Then the CPU2 will see a stale "empty" but still potentially used
+ * glue dir around in kernfs_new_node().
+ *
+ * In order to avoid this happening, we also should make sure that
+ * kernfs_node for glue_dir is released in CPU1 only when refcount
+ * for glue_dir kobj is 1.
+ */
+ ref = kref_read(&glue_dir->kref);
+ if (!kobject_has_children(glue_dir) && !--ref)
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
@@ -2211,6 +2301,24 @@ void put_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(put_device);
+bool kill_device(struct device *dev)
+{
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ lockdep_assert_held(&dev->mutex);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
/**
* device_del - delete device from system.
* @dev: device.
@@ -2230,15 +2338,8 @@ void device_del(struct device *dev)
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
- /*
- * Hold the device lock and set the "dead" flag to guarantee that
- * the update behavior is consistent with the other bitfields near
- * it and that we cannot have an asynchronous probe routine trying
- * to run while we are tearing out the bus/class/sysfs from
- * underneath the device.
- */
device_lock(dev);
- dev->p->dead = true;
+ kill_device(dev);
device_unlock(dev);
/* Notify clients of device removal. This call must come
@@ -2867,13 +2968,6 @@ struct device *device_create_with_groups(struct class *class,
}
EXPORT_SYMBOL_GPL(device_create_with_groups);
-static int __match_devt(struct device *dev, const void *data)
-{
- const dev_t *devt = data;
-
- return dev->devt == *devt;
-}
-
/**
* device_destroy - removes a device that was created with device_create()
* @class: pointer to the struct class that this device was registered with
@@ -2886,7 +2980,7 @@ void device_destroy(struct class *class, dev_t devt)
{
struct device *dev;
- dev = class_find_device(class, NULL, &devt, __match_devt);
+ dev = class_find_device_by_devt(class, devt);
if (dev) {
put_device(dev);
device_unregister(dev);
@@ -3357,8 +3451,38 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+int device_match_name(struct device *dev, const void *name)
+{
+ return sysfs_streq(dev_name(dev), name);
+}
+EXPORT_SYMBOL_GPL(device_match_name);
+
int device_match_of_node(struct device *dev, const void *np)
{
return dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
+
+int device_match_fwnode(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+EXPORT_SYMBOL_GPL(device_match_fwnode);
+
+int device_match_devt(struct device *dev, const void *pdevt)
+{
+ return dev->devt == *(dev_t *)pdevt;
+}
+EXPORT_SYMBOL_GPL(device_match_devt);
+
+int device_match_acpi_dev(struct device *dev, const void *adev)
+{
+ return ACPI_COMPANION(dev) == adev;
+}
+EXPORT_SYMBOL(device_match_acpi_dev);
+
+int device_match_any(struct device *dev, const void *unused)
+{
+ return 1;
+}
+EXPORT_SYMBOL_GPL(device_match_any);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 994a90747420..d811e60610d3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -554,9 +554,16 @@ re_probe:
goto probe_failed;
}
+ if (device_add_groups(dev, drv->dev_groups)) {
+ dev_err(dev, "device_add_groups() failed\n");
+ goto dev_groups_failed;
+ }
+
if (test_remove) {
test_remove = false;
+ device_remove_groups(dev, drv->dev_groups);
+
if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
@@ -584,6 +591,11 @@ re_probe:
drv->bus->name, __func__, dev_name(dev), drv->name);
goto done;
+dev_groups_failed:
+ if (dev->bus->remove)
+ dev->bus->remove(dev);
+ else if (drv->remove)
+ drv->remove(dev);
probe_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
@@ -1114,6 +1126,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
pm_runtime_put_sync(dev);
+ device_remove_groups(dev, drv->dev_groups);
+
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
index 09f28479b243..14e2178e09f8 100644
--- a/drivers/base/devcon.c
+++ b/drivers/base/devcon.c
@@ -12,9 +12,6 @@
static DEFINE_MUTEX(devcon_lock);
static LIST_HEAD(devcon_list);
-typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
- void *data);
-
static void *
fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
void *data, devcon_match_fn_t match)
@@ -61,6 +58,34 @@ fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
}
/**
+ * fwnode_connection_find_match - Find connection from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @fwnode and another
+ * device node. @match will be used to convert the connection description to
+ * data the caller is expecting to be returned.
+ */
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ void *ret;
+
+ if (!fwnode || !match)
+ return NULL;
+
+ ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
+ if (ret)
+ return ret;
+
+ return fwnode_devcon_match(fwnode, con_id, data, match);
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
* device_connection_find_match - Find physical connection to a device
* @dev: Device with the connection
* @con_id: Identifier for the connection
@@ -83,15 +108,9 @@ void *device_connection_find_match(struct device *dev, const char *con_id,
if (!match)
return NULL;
- if (fwnode) {
- ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
- if (ret)
- return ret;
-
- ret = fwnode_devcon_match(fwnode, con_id, data, match);
- if (ret)
- return ret;
- }
+ ret = fwnode_connection_find_match(fwnode, con_id, data, match);
+ if (ret)
+ return ret;
mutex_lock(&devcon_lock);
@@ -133,19 +152,13 @@ static struct bus_type *generic_match_buses[] = {
NULL,
};
-static int device_fwnode_match(struct device *dev, const void *fwnode)
-{
- return dev_fwnode(dev) == fwnode;
-}
-
static void *device_connection_fwnode_match(struct device_connection *con)
{
struct bus_type *bus;
struct device *dev;
for (bus = generic_match_buses[0]; bus; bus++) {
- dev = bus_find_device(bus, NULL, (void *)con->fwnode,
- device_fwnode_match);
+ dev = bus_find_device_by_fwnode(bus, con->fwnode);
if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id)))
return dev;
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index f1a3353f3494..e42d0b514384 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -164,16 +164,7 @@ static struct class devcd_class = {
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen)
{
- if (offset > datalen)
- return -EINVAL;
-
- if (offset + count > datalen)
- count = datalen - offset;
-
- if (count)
- memcpy(buffer, ((u8 *)data) + offset, count);
-
- return count;
+ return memory_read_from_buffer(buffer, count, &offset, data, datalen);
}
static void devcd_freev(void *data)
@@ -323,7 +314,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
EXPORT_SYMBOL_GPL(dev_coredumpm);
/**
- * dev_coredumpmsg - create device coredump that uses scatterlist as data
+ * dev_coredumpsg - create device coredump that uses scatterlist as data
* parameter
* @dev: the struct device for the crashed device
* @table: the dump data
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7048a41973ed..7ecd590e67fe 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
-int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
-int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
+static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
#endif
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 506a0175a5a7..b6c6c7d97d5b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -40,25 +40,6 @@ struct device platform_bus = {
EXPORT_SYMBOL_GPL(platform_bus);
/**
- * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
- * @pdev: platform device
- *
- * This is called before platform_device_add() such that any pdev_archdata may
- * be setup before the platform_notifier is called. So if a user needs to
- * manipulate any relevant information in the pdev_archdata they can do:
- *
- * platform_device_alloc()
- * ... manipulate ...
- * platform_device_add()
- *
- * And if they don't care they can just call platform_device_register() and
- * everything will just work out.
- */
-void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
-{
-}
-
-/**
* platform_get_resource - get a resource for a device
* @dev: platform device
* @type: resource type
@@ -99,12 +80,7 @@ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
#endif /* CONFIG_HAS_IOMEM */
-/**
- * platform_get_irq - get an IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- */
-int platform_get_irq(struct platform_device *dev, unsigned int num)
+static int __platform_get_irq(struct platform_device *dev, unsigned int num)
{
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
@@ -157,15 +133,70 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
* the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource.
*/
- if (num == 0 && has_acpi_companion(&dev->dev))
- return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+ if (num == 0 && has_acpi_companion(&dev->dev)) {
+ int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+
+ /* Our callers expect -ENXIO for missing IRQs. */
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
return -ENXIO;
#endif
}
+
+/**
+ * platform_get_irq - get an IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device and prints an error message if finding the
+ * IRQ fails. Device drivers should check the return value for errors so as to
+ * not pass a negative integer value to the request_irq() APIs.
+ *
+ * Example:
+ * int irq = platform_get_irq(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq(struct platform_device *dev, unsigned int num)
+{
+ int ret;
+
+ ret = __platform_get_irq(dev, num);
+ if (ret < 0 && ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "IRQ index %u not found\n", num);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
+ * platform_get_irq_optional - get an optional IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device. Device drivers should check the return
+ * value for errors so as to not pass a negative integer value to the
+ * request_irq() APIs. This is the same as platform_get_irq(), except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Example:
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ return __platform_get_irq(dev, num);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_optional);
+
+/**
* platform_irq_count - Count the number of IRQs a platform device uses
* @dev: platform device
*
@@ -175,7 +206,7 @@ int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
- while ((ret = platform_get_irq(dev, nr)) >= 0)
+ while ((ret = __platform_get_irq(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
@@ -228,7 +259,11 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
- return r ? r->start : -ENXIO;
+ if (r)
+ return r->start;
+
+ dev_err(&dev->dev, "IRQ %s not found\n", name);
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
@@ -259,6 +294,20 @@ struct platform_object {
char name[];
};
+/*
+ * Set up default DMA mask for platform devices if the they weren't
+ * previously set by the architecture / DT.
+ */
+static void setup_pdev_dma_masks(struct platform_device *pdev)
+{
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!pdev->dma_mask)
+ pdev->dma_mask = DMA_BIT_MASK(32);
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dma_mask;
+};
+
/**
* platform_device_put - destroy a platform device
* @pdev: platform device to free
@@ -305,7 +354,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
pa->pdev.id = id;
device_initialize(&pa->pdev.dev);
pa->pdev.dev.release = platform_device_release;
- arch_setup_pdev_archdata(&pa->pdev);
+ setup_pdev_dma_masks(&pa->pdev);
}
return pa ? &pa->pdev : NULL;
@@ -507,7 +556,7 @@ EXPORT_SYMBOL_GPL(platform_device_del);
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
- arch_setup_pdev_archdata(pdev);
+ setup_pdev_dma_masks(pdev);
return platform_device_add(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_register);
@@ -1197,6 +1246,20 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
+/**
+ * platform_find_device_by_driver - Find a platform device with a given
+ * driver.
+ * @start: The device to start the search from.
+ * @drv: The device driver to look for.
+ */
+struct device *platform_find_device_by_driver(struct device *start,
+ const struct device_driver *drv)
+{
+ return bus_find_device(&platform_bus_type, start, drv,
+ (void *)platform_match);
+}
+EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
+
int __init platform_bus_init(void)
{
int error;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index e1bb691cf8f1..ec5bb190b9d0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
-obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index b063bc41b0a9..cc85e87eaf05 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -149,29 +149,24 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
return ret;
}
+static int genpd_runtime_suspend(struct device *dev);
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
* and checks that the PM domain pointer is a real generic PM domain.
* Any failure results in NULL being returned.
*/
-static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
+static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
return NULL;
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (&gpd->domain == dev->pm_domain) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
+ /* A genpd's always have its ->runtime_suspend() callback assigned. */
+ if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
+ return pd_to_genpd(dev->pm_domain);
- return genpd;
+ return NULL;
}
/*
@@ -385,8 +380,8 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
unsigned int prev;
int ret;
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
return -ENODEV;
if (unlikely(!genpd->set_performance_state))
@@ -1610,7 +1605,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
*/
int pm_genpd_remove_device(struct device *dev)
{
- struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
+ struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
if (!genpd)
return -EINVAL;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7fb2c39bc725..134a8af51511 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -716,7 +716,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
-void dpm_noirq_resume_devices(pm_message_t state)
+static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -760,13 +760,6 @@ void dpm_noirq_resume_devices(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
-void dpm_noirq_end(void)
-{
- resume_device_irqs();
- device_wakeup_disarm_wake_irqs();
- cpuidle_resume();
-}
-
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -777,7 +770,11 @@ void dpm_noirq_end(void)
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
- dpm_noirq_end();
+
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
+
+ cpuidle_resume();
}
static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
@@ -1291,11 +1288,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
- if (pm_wakeup_pending()) {
- async_error = -EBUSY;
- goto Complete;
- }
-
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@@ -1362,14 +1354,7 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
-void dpm_noirq_begin(void)
-{
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
-}
-
-int dpm_noirq_suspend_devices(pm_message_t state)
+static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
@@ -1426,7 +1411,11 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
- dpm_noirq_begin();
+ cpuidle_pause();
+
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index ec33fbdb919b..39a06a0cfdaa 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -149,3 +149,21 @@ static inline void device_pm_init(struct device *dev)
device_pm_sleep_init(dev);
pm_runtime_init(dev);
}
+
+#ifdef CONFIG_PM_SLEEP
+
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b75335508d2c..48616f358854 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -287,7 +287,8 @@ static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
int retval;
if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
@@ -309,7 +310,8 @@ static void rpm_put_suppliers(struct device *dev)
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
continue;
@@ -1624,7 +1626,7 @@ void pm_runtime_remove(struct device *dev)
* runtime PM references to the device, drop the usage counter of the device
* (as many times as needed).
*
- * Links with the DL_FLAG_STATELESS flag set are ignored.
+ * Links with the DL_FLAG_MANAGED flag unset are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
@@ -1640,8 +1642,9 @@ void pm_runtime_clean_up_links(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
+ device_links_read_lock_held()) {
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
while (refcount_dec_not_one(&link->rpm_active))
@@ -1662,7 +1665,8 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
refcount_inc(&link->rpm_active);
@@ -1683,7 +1687,8 @@ void pm_runtime_put_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
if (link->supplier_preactivated) {
link->supplier_preactivated = false;
if (refcount_dec_not_one(&link->rpm_active))
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 1b9c281cbe41..d7d82db2e4bc 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -667,8 +668,13 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_wakeup;
}
+ rc = pm_wakeup_source_sysfs_add(dev);
+ if (rc)
+ goto err_latency;
return 0;
+ err_latency:
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
err_wakeup:
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
err_runtime:
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index ee31d4f8d856..5817b51d2b15 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -72,22 +72,7 @@ static struct wakeup_source deleted_ws = {
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
-/**
- * wakeup_source_prepare - Prepare a new wakeup source for initialization.
- * @ws: Wakeup source to prepare.
- * @name: Pointer to the name of the new wakeup source.
- *
- * Callers must ensure that the @name string won't be freed when @ws is still in
- * use.
- */
-void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
-{
- if (ws) {
- memset(ws, 0, sizeof(*ws));
- ws->name = name;
- }
-}
-EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+static DEFINE_IDA(wakeup_ida);
/**
* wakeup_source_create - Create a struct wakeup_source object.
@@ -96,13 +81,31 @@ EXPORT_SYMBOL_GPL(wakeup_source_prepare);
struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
+ const char *ws_name;
+ int id;
- ws = kmalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
- return NULL;
+ goto err_ws;
+
+ ws_name = kstrdup_const(name, GFP_KERNEL);
+ if (!ws_name)
+ goto err_name;
+ ws->name = ws_name;
+
+ id = ida_alloc(&wakeup_ida, GFP_KERNEL);
+ if (id < 0)
+ goto err_id;
+ ws->id = id;
- wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
return ws;
+
+err_id:
+ kfree_const(ws->name);
+err_name:
+ kfree(ws);
+err_ws:
+ return NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
@@ -134,6 +137,13 @@ static void wakeup_source_record(struct wakeup_source *ws)
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
+static void wakeup_source_free(struct wakeup_source *ws)
+{
+ ida_free(&wakeup_ida, ws->id);
+ kfree_const(ws->name);
+ kfree(ws);
+}
+
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
@@ -147,8 +157,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
__pm_relax(ws);
wakeup_source_record(ws);
- kfree_const(ws->name);
- kfree(ws);
+ wakeup_source_free(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
@@ -200,16 +209,26 @@ EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
+ * @dev: Device this wakeup source is associated with (or NULL if virtual).
* @name: Name of the wakeup source to register.
*/
-struct wakeup_source *wakeup_source_register(const char *name)
+struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
{
struct wakeup_source *ws;
+ int ret;
ws = wakeup_source_create(name);
- if (ws)
+ if (ws) {
+ if (!dev || device_is_registered(dev)) {
+ ret = wakeup_source_sysfs_add(dev, ws);
+ if (ret) {
+ wakeup_source_free(ws);
+ return NULL;
+ }
+ }
wakeup_source_add(ws);
-
+ }
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_register);
@@ -222,6 +241,7 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
+ wakeup_source_sysfs_remove(ws);
wakeup_source_destroy(ws);
}
}
@@ -265,7 +285,7 @@ int device_wakeup_enable(struct device *dev)
if (pm_suspend_target_state != PM_SUSPEND_ON)
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
- ws = wakeup_source_register(dev_name(dev));
+ ws = wakeup_source_register(dev, dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -859,7 +879,7 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_system_cancel_wakeup(void)
{
- atomic_dec(&pm_abort_suspend);
+ atomic_dec_if_positive(&pm_abort_suspend);
}
void pm_wakeup_clear(bool reset)
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
new file mode 100644
index 000000000000..c7734914d914
--- /dev/null
+++ b/drivers/base/power/wakeup_stats.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wakeup statistics in sysfs
+ *
+ * Copyright (c) 2019 Linux Foundation
+ * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2019 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "power.h"
+
+static struct class *wakeup_class;
+
+#define wakeup_attr(_name) \
+static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wakeup_source *ws = dev_get_drvdata(dev); \
+ \
+ return sprintf(buf, "%lu\n", ws->_name); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+wakeup_attr(active_count);
+wakeup_attr(event_count);
+wakeup_attr(wakeup_count);
+wakeup_attr(expire_count);
+
+static ssize_t active_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time =
+ ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
+
+ return sprintf(buf, "%lld\n", ktime_to_ms(active_time));
+}
+static DEVICE_ATTR_RO(active_time_ms);
+
+static ssize_t total_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t total_time = ws->total_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(total_time));
+}
+static DEVICE_ATTR_RO(total_time_ms);
+
+static ssize_t max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t max_time = ws->max_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ if (active_time > max_time)
+ max_time = active_time;
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(max_time));
+}
+static DEVICE_ATTR_RO(max_time_ms);
+
+static ssize_t last_change_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lld\n", ktime_to_ms(ws->last_time));
+}
+static DEVICE_ATTR_RO(last_change_ms);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ws->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t prevent_suspend_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t prevent_sleep_time = ws->prevent_sleep_time;
+
+ if (ws->active && ws->autosleep_enabled) {
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(ktime_get(), ws->start_prevent_time));
+ }
+ return sprintf(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+}
+static DEVICE_ATTR_RO(prevent_suspend_time_ms);
+
+static struct attribute *wakeup_source_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_active_count.attr,
+ &dev_attr_event_count.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_expire_count.attr,
+ &dev_attr_active_time_ms.attr,
+ &dev_attr_total_time_ms.attr,
+ &dev_attr_max_time_ms.attr,
+ &dev_attr_last_change_ms.attr,
+ &dev_attr_prevent_suspend_time_ms.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wakeup_source);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *wakeup_source_device_create(struct device *parent,
+ struct wakeup_source *ws)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = MKDEV(0, 0);
+ dev->class = wakeup_class;
+ dev->parent = parent;
+ dev->groups = wakeup_source_groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, ws);
+ device_set_pm_not_required(dev);
+
+ retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
+ * @parent: Device given wakeup source is associated with (or NULL if virtual).
+ * @ws: Wakeup source to be added in sysfs.
+ */
+int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
+{
+ struct device *dev;
+
+ dev = wakeup_source_device_create(parent, ws);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ ws->dev = dev;
+
+ return 0;
+}
+
+/**
+ * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
+ * for a device if they're missing.
+ * @parent: Device given wakeup source is associated with
+ */
+int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ if (!parent->power.wakeup || parent->power.wakeup->dev)
+ return 0;
+
+ return wakeup_source_sysfs_add(parent, parent->power.wakeup);
+}
+
+/**
+ * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
+ * @ws: Wakeup source to be removed from sysfs.
+ */
+void wakeup_source_sysfs_remove(struct wakeup_source *ws)
+{
+ device_unregister(ws->dev);
+}
+
+static int __init wakeup_sources_sysfs_init(void)
+{
+ wakeup_class = class_create(THIS_MODULE, "wakeup");
+
+ return PTR_ERR_OR_ZERO(wakeup_class);
+}
+postcore_initcall(wakeup_sources_sysfs_init);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
config REGMAP_SOUNDWIRE
tristate
- depends on SOUNDWIRE_BUS
+ depends on SOUNDWIRE
config REGMAP_SCCB
tristate
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index e5e1b3a01b1a..e72843fe41df 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -588,14 +588,6 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
}
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
- if (!map->debugfs) {
- dev_warn(map->dev,
- "Failed to create %s debugfs directory\n", name);
-
- kfree(map->debugfs_name);
- map->debugfs_name = NULL;
- return;
- }
debugfs_create_file("name", 0400, map->debugfs,
map, &regmap_name_fops);
@@ -672,10 +664,6 @@ void regmap_debugfs_initcall(void)
struct regmap_debugfs_node *node, *tmp;
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
- if (!regmap_debugfs_root) {
- pr_warn("regmap: Failed to create debugfs root\n");
- return;
- }
mutex_lock(&regmap_debugfs_early_lock);
list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index c9dc70ceca5f..3d64c9331a82 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -370,7 +370,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
- pm_runtime_put(map->dev);
goto exit;
}
}
@@ -425,8 +424,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
dev_err(map->dev,
"Failed to read IRQ status %d\n",
ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
goto exit;
}
}
@@ -478,8 +475,6 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
dev_err(map->dev,
"Failed to read IRQ status: %d\n",
ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
goto exit;
}
}
@@ -513,10 +508,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
}
+exit:
if (chip->runtime_pm)
pm_runtime_put(map->dev);
-exit:
if (chip->handle_post_irq)
chip->handle_post_irq(chip->irq_drv_data);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 10b280f30217..7c0c5ca5953d 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -33,6 +33,7 @@ static struct bus_type soc_bus_type = {
static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(serial_number, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
@@ -57,6 +58,9 @@ static umode_t soc_attribute_mode(struct kobject *kobj,
if ((attr == &dev_attr_revision.attr)
&& (soc_dev->attr->revision != NULL))
return attr->mode;
+ if ((attr == &dev_attr_serial_number.attr)
+ && (soc_dev->attr->serial_number != NULL))
+ return attr->mode;
if ((attr == &dev_attr_soc_id.attr)
&& (soc_dev->attr->soc_id != NULL))
return attr->mode;
@@ -77,6 +81,8 @@ static ssize_t soc_info_get(struct device *dev,
return sprintf(buf, "%s\n", soc_dev->attr->family);
if (attr == &dev_attr_revision)
return sprintf(buf, "%s\n", soc_dev->attr->revision);
+ if (attr == &dev_attr_serial_number)
+ return sprintf(buf, "%s\n", soc_dev->attr->serial_number);
if (attr == &dev_attr_soc_id)
return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
@@ -87,6 +93,7 @@ static ssize_t soc_info_get(struct device *dev,
static struct attribute *soc_attr[] = {
&dev_attr_machine.attr,
&dev_attr_family.attr,
+ &dev_attr_serial_number.attr,
&dev_attr_soc_id.attr,
&dev_attr_revision.attr,
NULL,
@@ -157,6 +164,7 @@ out2:
out1:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
@@ -166,6 +174,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
+EXPORT_SYMBOL_GPL(soc_device_unregister);
static int __init soc_bus_register(void)
{
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index e7b3aa3bd55a..a1f3f0994f9f 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(is_software_node);
static struct swnode *
software_node_to_swnode(const struct software_node *node)
{
- struct swnode *swnode;
+ struct swnode *swnode = NULL;
struct kobject *k;
if (!node)
@@ -620,6 +620,43 @@ static const struct fwnode_operations software_node_ops = {
/* -------------------------------------------------------------------------- */
+/**
+ * software_node_find_by_name - Find software node by name
+ * @parent: Parent of the software node
+ * @name: Name of the software node
+ *
+ * The function will find a node that is child of @parent and that is named
+ * @name. If no node is found, the function returns NULL.
+ *
+ * NOTE: you will need to drop the reference with fwnode_handle_put() after use.
+ */
+const struct software_node *
+software_node_find_by_name(const struct software_node *parent, const char *name)
+{
+ struct swnode *swnode = NULL;
+ struct kobject *k;
+
+ if (!name)
+ return NULL;
+
+ spin_lock(&swnode_kset->list_lock);
+
+ list_for_each_entry(k, &swnode_kset->list, entry) {
+ swnode = kobj_to_swnode(k);
+ if (parent == swnode->node->parent && swnode->node->name &&
+ !strcmp(name, swnode->node->name)) {
+ kobject_get(&swnode->kobj);
+ break;
+ }
+ swnode = NULL;
+ }
+
+ spin_unlock(&swnode_kset->list_lock);
+
+ return swnode ? swnode->node : NULL;
+}
+EXPORT_SYMBOL_GPL(software_node_find_by_name);
+
static int
software_node_register_properties(struct software_node *node,
const struct property_entry *properties)
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 5b49f1b33ebe..e2ea2356da06 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
}
flush_scheduled_work();
- /* pass one: without sleeping, do aoedev_downdev */
+ /* pass one: do aoedev_downdev, which might sleep */
+restart1:
spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL)
+ goto cont;
+
if (exiting) {
/* unconditionally take each device down */
} else if (specified) {
@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
|| d->ref)
goto cont;
+ spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&devlist_lock, flags);
aoedev_downdev(d);
d->flags |= DEVFL_TKILL;
+ goto restart1;
cont:
spin_unlock(&d->lock);
}
@@ -348,7 +355,7 @@ cont:
/* pass two: call freedev, which might sleep,
* for aoedevs marked with DEVFL_TKILL
*/
-restart:
+restart2:
spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
@@ -357,7 +364,7 @@ restart:
spin_unlock(&d->lock);
spin_unlock_irqrestore(&devlist_lock, flags);
freedev(d);
- goto restart;
+ goto restart2;
}
spin_unlock(&d->lock);
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 85f20e371f2f..bd7d3bb8b890 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+ /* Fall through */
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
thi->name[0],
resource->name);
+ allow_kernel_signal(DRBD_SIGKILL);
+ allow_kernel_signal(SIGXCPU);
restart:
retval = thi->function(thi);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 90ebfcae0ce6..2b3103c30857 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+ struct shash_desc *desc;
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
+ desc = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(connection->cram_hmac_tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ rv = -1;
+ goto fail;
+ }
desc->tfm = connection->cram_hmac_tfm;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
- shash_desc_zero(desc);
+ if (desc) {
+ shash_desc_zero(desc);
+ kfree(desc);
+ }
return rv;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 0469aceaa230..485865fd0412 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive,
v.native_format = UDP->native_format;
mutex_unlock(&floppy_mutex);
- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
return 0;
}
@@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll,
v.bufblocks = UDRS->bufblocks;
mutex_unlock(&floppy_mutex);
- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
return -EFAULT;
return 0;
Eintr:
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44c9985f352a..1410fa893653 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
static int loop_kthread_worker_fn(void *worker_ptr)
{
- current->flags |= PF_LESS_THROTTLE;
+ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
return kthread_worker_fn(worker_ptr);
}
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file;
struct inode *inode;
struct address_space *mapping;
+ struct block_device *claimed_bdev = NULL;
int lo_flags = 0;
int error;
loff_t size;
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- bdgrab(bdev);
- error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
- if (error)
+ claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+ if (IS_ERR(claimed_bdev)) {
+ error = PTR_ERR(claimed_bdev);
goto out_putf;
+ }
}
error = mutex_lock_killable(&loop_ctl_mutex);
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
out_putf:
fput(file);
out:
@@ -1753,6 +1755,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
+ case LOOP_SET_DIRECT_IO:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9bcde2325893..a8e3815295fe 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -108,6 +108,7 @@ struct nbd_device {
struct nbd_config *config;
struct mutex config_lock;
struct gendisk *disk;
+ struct workqueue_struct *recv_workq;
struct list_head list;
struct task_struct *task_recv;
@@ -121,6 +122,7 @@ struct nbd_cmd {
struct mutex lock;
int index;
int cookie;
+ int retries;
blk_status_t status;
unsigned long flags;
u32 cmd_cookie;
@@ -138,7 +140,6 @@ static struct dentry *nbd_dbg_dir;
static unsigned int nbds_max = 16;
static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
static int part_shift;
static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -344,6 +345,22 @@ static void sock_shutdown(struct nbd_device *nbd)
dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
}
+static u32 req_to_nbd_cmd_type(struct request *req)
+{
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
+ return NBD_CMD_TRIM;
+ case REQ_OP_FLUSH:
+ return NBD_CMD_FLUSH;
+ case REQ_OP_WRITE:
+ return NBD_CMD_WRITE;
+ case REQ_OP_READ:
+ return NBD_CMD_READ;
+ default:
+ return U32_MAX;
+ }
+}
+
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
bool reserved)
{
@@ -357,8 +374,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
- if (!mutex_trylock(&cmd->lock))
+ if (!mutex_trylock(&cmd->lock)) {
+ nbd_config_put(nbd);
return BLK_EH_RESET_TIMER;
+ }
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
@@ -389,10 +408,25 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
nbd_config_put(nbd);
return BLK_EH_DONE;
}
- } else {
- dev_err_ratelimited(nbd_to_dev(nbd),
- "Connection timed out\n");
}
+
+ if (!nbd->tag_set.timeout) {
+ /*
+ * Userspace sets timeout=0 to disable socket disconnection,
+ * so just warn and reset the timer.
+ */
+ cmd->retries++;
+ dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
+ req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
+ (unsigned long long)blk_rq_pos(req) << 9,
+ blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
+
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+ return BLK_EH_RESET_TIMER;
+ }
+
+ dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@@ -480,22 +514,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
- switch (req_op(req)) {
- case REQ_OP_DISCARD:
- type = NBD_CMD_TRIM;
- break;
- case REQ_OP_FLUSH:
- type = NBD_CMD_FLUSH;
- break;
- case REQ_OP_WRITE:
- type = NBD_CMD_WRITE;
- break;
- case REQ_OP_READ:
- type = NBD_CMD_READ;
- break;
- default:
+ type = req_to_nbd_cmd_type(req);
+ if (type == U32_MAX)
return -EIO;
- }
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
@@ -526,6 +547,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
}
cmd->index = index;
cmd->cookie = nsock->cookie;
+ cmd->retries = 0;
request.type = htonl(type | nbd_cmd_flags);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -1036,7 +1058,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
- queue_work(recv_workqueue, &args->work);
+ queue_work(nbd->recv_workq, &args->work);
atomic_inc(&config->live_connections);
wake_up(&config->conn_wait);
@@ -1137,6 +1159,10 @@ static void nbd_config_put(struct nbd_device *nbd)
kfree(nbd->config);
nbd->config = NULL;
+ if (nbd->recv_workq)
+ destroy_workqueue(nbd->recv_workq);
+ nbd->recv_workq = NULL;
+
nbd->tag_set.timeout = 0;
nbd->disk->queue->limits.discard_granularity = 0;
nbd->disk->queue->limits.discard_alignment = 0;
@@ -1165,6 +1191,14 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
+ nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI |
+ WQ_UNBOUND, 0, nbd->index);
+ if (!nbd->recv_workq) {
+ dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
+ return -ENOMEM;
+ }
+
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
nbd->task_recv = current;
@@ -1195,7 +1229,7 @@ static int nbd_start_device(struct nbd_device *nbd)
INIT_WORK(&args->work, recv_work);
args->nbd = nbd;
args->index = i;
- queue_work(recv_workqueue, &args->work);
+ queue_work(nbd->recv_workq, &args->work);
}
nbd_size_update(nbd);
return error;
@@ -1215,8 +1249,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret)
+ if (ret) {
sock_shutdown(nbd);
+ flush_workqueue(nbd->recv_workq);
+ }
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */
@@ -1231,7 +1267,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
sock_shutdown(nbd);
- kill_bdev(bdev);
+ __invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
@@ -1246,6 +1282,13 @@ static bool nbd_is_valid_blksize(unsigned long blksize)
return true;
}
+static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
+{
+ nbd->tag_set.timeout = timeout * HZ;
+ if (timeout)
+ blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
+}
+
/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
@@ -1276,10 +1319,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_size_set(nbd, config->blksize, arg);
return 0;
case NBD_SET_TIMEOUT:
- if (arg) {
- nbd->tag_set.timeout = arg * HZ;
- blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
- }
+ nbd_set_cmd_timeout(nbd, arg);
return 0;
case NBD_SET_FLAGS:
@@ -1799,11 +1839,9 @@ again:
if (ret)
goto out;
- if (info->attrs[NBD_ATTR_TIMEOUT]) {
- u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
- nbd->tag_set.timeout = timeout * HZ;
- blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
- }
+ if (info->attrs[NBD_ATTR_TIMEOUT])
+ nbd_set_cmd_timeout(nbd,
+ nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
config->dead_conn_timeout =
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
@@ -1875,6 +1913,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
nbd_disconnect(nbd);
nbd_clear_sock(nbd);
mutex_unlock(&nbd->config_lock);
+ /*
+ * Make sure recv thread has finished, so it does not drop the last
+ * config ref and try to destroy the workqueue from inside the work
+ * queue.
+ */
+ flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -1971,11 +2015,9 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
if (ret)
goto out;
- if (info->attrs[NBD_ATTR_TIMEOUT]) {
- u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
- nbd->tag_set.timeout = timeout * HZ;
- blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
- }
+ if (info->attrs[NBD_ATTR_TIMEOUT])
+ nbd_set_cmd_timeout(nbd,
+ nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
config->dead_conn_timeout =
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
@@ -2261,20 +2303,12 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
- recv_workqueue = alloc_workqueue("knbd-recv",
- WQ_MEM_RECLAIM | WQ_HIGHPRI |
- WQ_UNBOUND, 0);
- if (!recv_workqueue)
- return -ENOMEM;
- if (register_blkdev(NBD_MAJOR, "nbd")) {
- destroy_workqueue(recv_workqueue);
+ if (register_blkdev(NBD_MAJOR, "nbd"))
return -EIO;
- }
if (genl_register_family(&nbd_genl_family)) {
unregister_blkdev(NBD_MAJOR, "nbd");
- destroy_workqueue(recv_workqueue);
return -EINVAL;
}
nbd_dbg_init();
@@ -2316,7 +2350,6 @@ static void __exit nbd_cleanup(void)
idr_destroy(&nbd_index_idr);
genl_unregister_family(&nbd_genl_family);
- destroy_workqueue(recv_workqueue);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index a1b9929bd911..a235c45e22a7 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -2,6 +2,9 @@
#ifndef __BLK_NULL_BLK_H
#define __BLK_NULL_BLK_H
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
@@ -90,13 +93,13 @@ int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
int null_zone_report(struct gendisk *disk, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones);
-void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
- unsigned int nr_sectors);
-void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
+blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector,
+ sector_t nr_sectors);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
- pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
+ pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
@@ -106,10 +109,11 @@ static inline int null_zone_report(struct gendisk *disk, sector_t sector,
{
return -EOPNOTSUPP;
}
-static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
- unsigned int nr_sectors)
+static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
+ enum req_opf op, sector_t sector,
+ sector_t nr_sectors)
{
+ return BLK_STS_NOTSUPP;
}
-static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 99328ded60d1..0e7da5015ccd 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -141,8 +141,8 @@ static int g_bs = 512;
module_param_named(bs, g_bs, int, 0444);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
-static int nr_devices = 1;
-module_param(nr_devices, int, 0444);
+static unsigned int nr_devices = 1;
+module_param(nr_devices, uint, 0444);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
static bool g_blocking;
@@ -1133,93 +1133,61 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
- int err = 0;
+ blk_status_t sts = BLK_STS_OK;
+ struct request *rq = cmd->rq;
- if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
- struct request *rq = cmd->rq;
-
- if (!hrtimer_active(&nullb->bw_timer))
- hrtimer_restart(&nullb->bw_timer);
-
- if (atomic_long_sub_return(blk_rq_bytes(rq),
- &nullb->cur_bytes) < 0) {
- null_stop_queue(nullb);
- /* race with timer */
- if (atomic_long_read(&nullb->cur_bytes) > 0)
- null_restart_queue_async(nullb);
- /* requeue request */
- return BLK_STS_DEV_RESOURCE;
- }
- }
+ if (!hrtimer_active(&nullb->bw_timer))
+ hrtimer_restart(&nullb->bw_timer);
- if (nullb->dev->badblocks.shift != -1) {
- int bad_sectors;
- sector_t sector, size, first_bad;
- bool is_flush = true;
-
- if (dev->queue_mode == NULL_Q_BIO &&
- bio_op(cmd->bio) != REQ_OP_FLUSH) {
- is_flush = false;
- sector = cmd->bio->bi_iter.bi_sector;
- size = bio_sectors(cmd->bio);
- }
- if (dev->queue_mode != NULL_Q_BIO &&
- req_op(cmd->rq) != REQ_OP_FLUSH) {
- is_flush = false;
- sector = blk_rq_pos(cmd->rq);
- size = blk_rq_sectors(cmd->rq);
- }
- if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
- size, &first_bad, &bad_sectors)) {
- cmd->error = BLK_STS_IOERR;
- goto out;
- }
+ if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
+ null_stop_queue(nullb);
+ /* race with timer */
+ if (atomic_long_read(&nullb->cur_bytes) > 0)
+ null_restart_queue_async(nullb);
+ /* requeue request */
+ sts = BLK_STS_DEV_RESOURCE;
}
+ return sts;
+}
- if (dev->memory_backed) {
- if (dev->queue_mode == NULL_Q_BIO) {
- if (bio_op(cmd->bio) == REQ_OP_FLUSH)
- err = null_handle_flush(nullb);
- else
- err = null_handle_bio(cmd);
- } else {
- if (req_op(cmd->rq) == REQ_OP_FLUSH)
- err = null_handle_flush(nullb);
- else
- err = null_handle_rq(cmd);
- }
- }
- cmd->error = errno_to_blk_status(err);
+static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
+ sector_t sector,
+ sector_t nr_sectors)
+{
+ struct badblocks *bb = &cmd->nq->dev->badblocks;
+ sector_t first_bad;
+ int bad_sectors;
- if (!cmd->error && dev->zoned) {
- sector_t sector;
- unsigned int nr_sectors;
- enum req_opf op;
+ if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
+ return BLK_STS_IOERR;
- if (dev->queue_mode == NULL_Q_BIO) {
- op = bio_op(cmd->bio);
- sector = cmd->bio->bi_iter.bi_sector;
- nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
- } else {
- op = req_op(cmd->rq);
- sector = blk_rq_pos(cmd->rq);
- nr_sectors = blk_rq_sectors(cmd->rq);
- }
+ return BLK_STS_OK;
+}
- if (op == REQ_OP_WRITE)
- null_zone_write(cmd, sector, nr_sectors);
- else if (op == REQ_OP_ZONE_RESET)
- null_zone_reset(cmd, sector);
- }
-out:
+static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
+ enum req_opf op)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ int err;
+
+ if (dev->queue_mode == NULL_Q_BIO)
+ err = null_handle_bio(cmd);
+ else
+ err = null_handle_rq(cmd);
+
+ return errno_to_blk_status(err);
+}
+
+static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
+{
/* Complete IO by inline, softirq or timer */
- switch (dev->irqmode) {
+ switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (dev->queue_mode) {
+ switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
@@ -1238,6 +1206,40 @@ out:
null_cmd_end_timer(cmd);
break;
}
+}
+
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+ sector_t nr_sectors, enum req_opf op)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct nullb *nullb = dev->nullb;
+ blk_status_t sts;
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
+ sts = null_handle_throttled(cmd);
+ if (sts != BLK_STS_OK)
+ return sts;
+ }
+
+ if (op == REQ_OP_FLUSH) {
+ cmd->error = errno_to_blk_status(null_handle_flush(nullb));
+ goto out;
+ }
+
+ if (nullb->dev->badblocks.shift != -1) {
+ cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (cmd->error != BLK_STS_OK)
+ goto out;
+ }
+
+ if (dev->memory_backed)
+ cmd->error = null_handle_memory_backed(cmd, op);
+
+ if (!cmd->error && dev->zoned)
+ cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
+
+out:
+ nullb_complete_cmd(cmd);
return BLK_STS_OK;
}
@@ -1280,6 +1282,8 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
{
+ sector_t sector = bio->bi_iter.bi_sector;
+ sector_t nr_sectors = bio_sectors(bio);
struct nullb *nullb = q->queuedata;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
@@ -1287,7 +1291,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
cmd = alloc_cmd(nq, 1);
cmd->bio = bio;
- null_handle_cmd(cmd);
+ null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
return BLK_QC_T_NONE;
}
@@ -1311,7 +1315,7 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
- pr_info("null: rq %p timed out\n", rq);
+ pr_info("rq %p timed out\n", rq);
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
@@ -1321,6 +1325,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
struct nullb_queue *nq = hctx->driver_data;
+ sector_t nr_sectors = blk_rq_sectors(bd->rq);
+ sector_t sector = blk_rq_pos(bd->rq);
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -1349,7 +1355,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (should_timeout_request(bd->rq))
return BLK_STS_OK;
- return null_handle_cmd(cmd);
+ return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
}
static const struct blk_mq_ops null_mq_ops = {
@@ -1688,6 +1694,9 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
nullb->q->limits.zoned = BLK_ZONED_HM;
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
+ blk_queue_required_elevator_features(nullb->q,
+ ELEVATOR_F_ZBD_SEQ_WRITE);
}
nullb->q->queuedata = nullb;
@@ -1739,28 +1748,28 @@ static int __init null_init(void)
struct nullb_device *dev;
if (g_bs > PAGE_SIZE) {
- pr_warn("null_blk: invalid block size\n");
- pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+ pr_warn("invalid block size\n");
+ pr_warn("defaults block size to %lu\n", PAGE_SIZE);
g_bs = PAGE_SIZE;
}
if (!is_power_of_2(g_zone_size)) {
- pr_err("null_blk: zone_size must be power-of-two\n");
+ pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
- pr_err("null_blk: invalid home_node value\n");
+ pr_err("invalid home_node value\n");
g_home_node = NUMA_NO_NODE;
}
if (g_queue_mode == NULL_Q_RQ) {
- pr_err("null_blk: legacy IO path no longer available\n");
+ pr_err("legacy IO path no longer available\n");
return -EINVAL;
}
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
- pr_warn("null_blk: submit_queues param is set to %u.\n",
+ pr_warn("submit_queues param is set to %u.\n",
nr_online_nodes);
g_submit_queues = nr_online_nodes;
}
@@ -1803,7 +1812,7 @@ static int __init null_init(void)
}
}
- pr_info("null: module loaded\n");
+ pr_info("module loaded\n");
return 0;
err_dev:
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index cb28d93f2bd1..eabc116832a7 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -17,7 +17,7 @@ int null_zone_init(struct nullb_device *dev)
unsigned int i;
if (!is_power_of_2(dev->zone_size)) {
- pr_err("null_blk: zone_size must be power-of-two\n");
+ pr_err("zone_size must be power-of-two\n");
return -EINVAL;
}
@@ -31,7 +31,7 @@ int null_zone_init(struct nullb_device *dev)
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
- pr_info("null_blk: changed the number of conventional zones to %u",
+ pr_info("changed the number of conventional zones to %u",
dev->zone_nr_conv);
}
@@ -84,7 +84,7 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
return 0;
}
-void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
@@ -95,14 +95,12 @@ void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
cmd->error = BLK_STS_IOERR;
- break;
+ return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */
- if (sector != zone->wp) {
- cmd->error = BLK_STS_IOERR;
- break;
- }
+ if (sector != zone->wp)
+ return BLK_STS_IOERR;
if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
@@ -115,22 +113,51 @@ void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
break;
default:
/* Invalid zone condition */
- cmd->error = BLK_STS_IOERR;
- break;
+ return BLK_STS_IOERR;
}
+ return BLK_STS_OK;
}
-void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
+static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
+ size_t i;
+
+ switch (req_op(cmd->rq)) {
+ case REQ_OP_ZONE_RESET_ALL:
+ for (i = 0; i < dev->nr_zones; i++) {
+ if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
+ zone[i].cond = BLK_ZONE_COND_EMPTY;
+ zone[i].wp = zone[i].start;
+ }
+ break;
+ case REQ_OP_ZONE_RESET:
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
- cmd->error = BLK_STS_IOERR;
- return;
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ break;
+ default:
+ cmd->error = BLK_STS_NOTSUPP;
+ break;
}
+ return BLK_STS_OK;
+}
- zone->cond = BLK_ZONE_COND_EMPTY;
- zone->wp = zone->start;
+blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
+ sector_t sector, sector_t nr_sectors)
+{
+ switch (op) {
+ case REQ_OP_WRITE:
+ return null_zone_write(cmd, sector, nr_sectors);
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return null_zone_reset(cmd, sector);
+ default:
+ return BLK_STS_OK;
+ }
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 001dbdcbf355..636bfea2de6f 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,8 +314,8 @@ static void pcd_init_units(void)
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
- put_disk(disk);
disk->queue = NULL;
+ put_disk(disk);
continue;
}
@@ -723,9 +723,9 @@ static int pcd_detect(void)
k = 0;
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
cd = pcd;
- if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer,
- PI_PCD, verbose, cd->name)) {
- if (!pcd_probe(cd, -1, id) && cd->disk) {
+ if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
+ pcd_buffer, PI_PCD, verbose, cd->name)) {
+ if (!pcd_probe(cd, -1, id)) {
cd->present = 1;
k++;
} else
@@ -736,11 +736,13 @@ static int pcd_detect(void)
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
+ if (!cd->disk)
+ continue;
if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pcd_buffer, PI_PCD, verbose, cd->name))
continue;
- if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) {
+ if (!pcd_probe(cd, conf[D_SLV], id)) {
cd->present = 1;
k++;
} else
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 1e9c50a7256c..6b7d4cab3687 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -300,8 +300,8 @@ static void __init pf_init_units(void)
disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
- put_disk(disk);
disk->queue = NULL;
+ put_disk(disk);
continue;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3327192bb71f..c8fb886aebd4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3038,6 +3038,17 @@ again:
}
return true;
case RBD_OBJ_READ_PARENT:
+ /*
+ * The parent image is read only up to the overlap -- zero-fill
+ * from the overlap to the end of the request.
+ */
+ if (!*result) {
+ u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
+
+ if (obj_overlap < obj_req->ex.oe_len)
+ rbd_obj_zero_range(obj_req, obj_overlap,
+ obj_req->ex.oe_len - obj_overlap);
+ }
return true;
default:
BUG();
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3ac6a5d18071..b90dbcd99c03 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
}
+ err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
- return err;
+ goto fail;
}
return 0;
@@ -1007,8 +1008,7 @@ fail:
}
kfree(req);
}
- return -ENOMEM;
-
+ return err;
}
static int connect_ring(struct backend_info *be)
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index a0e84538cec8..1fa58c059cbf 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
usb_free_urb(urb);
- return 0;
+ return err;
}
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8b33128dccee..0875470a7806 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
return 0;
}
+int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err;
+
+ bt_dev_dbg(hdev, "QCA pre shutdown cmd");
+
+ skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
+ NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+
static void qca_tlv_check_data(struct rome_config *config,
const struct firmware *fw)
{
@@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
BT_DBG("Length\t\t : %d bytes", length);
config->dnld_mode = ROME_SKIP_EVT_NONE;
+ config->dnld_type = ROME_SKIP_EVT_NONE;
switch (config->type) {
case TLV_TYPE_PATCH:
@@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
evt = skb_put(skb, sizeof(*evt));
evt->ncmd = 1;
- evt->opcode = QCA_HCI_CC_OPCODE;
+ evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
@@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
*/
if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
config->dnld_type == ROME_SKIP_EVT_VSE)
- return qca_inject_cmd_complete_event(hdev);
+ ret = qca_inject_cmd_complete_event(hdev);
out:
release_firmware(fw);
@@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
+ /* Give the controller some time to get ready to receive the NVM */
+ msleep(10);
+
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6a291a7a5d96..69c5315a65fd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
+#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define EDL_CMD_REQ_RES_EVT (0x00)
#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
const char *firmware_name);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
@@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
return false;
}
+
+static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3876fee6ad13..ba4149054304 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -1170,10 +1173,6 @@ static int btusb_open(struct hci_dev *hdev)
}
data->intf->needs_remote_wakeup = 1;
- /* device specific wakeup source enabled and required for USB
- * remote wakeup while host is suspended
- */
- device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@@ -1238,7 +1237,6 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
- device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
@@ -2762,8 +2760,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30)
+ if (fw_size < 30) {
+ err = -EINVAL;
goto err_release_fw;
+ }
fw_size -= 30;
fw_ptr += 30;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index a55be205b91a..dbfe34664633 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8905ad2edde7..ae2624fce913 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
if (!bcm)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 207bae5e0d46..31f25153087d 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
if (!intel)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8950e07889fe..85a30fb9177b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+ /* serdev nodes check if the needed operations are present */
+ if (hu->serdev)
+ return true;
+
+ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+ return true;
+
+ return false;
+}
+
/* Flow control or un-flow control the device */
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
{
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index f98e5cc343b2..fbc3f7c3a5c7 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
if (!mrvl)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9a5c9c1f9484..31dec435767b 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work)
ws_awake_device);
struct hci_uart *hu = qca->hu;
unsigned long retrans_delay;
+ unsigned long flags;
BT_DBG("hu %p wq awake device", hu);
/* Vote for serial clock */
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
/* Send wake indication to device */
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
@@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work)
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
@@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work)
struct qca_data *qca = container_of(work, struct qca_data,
ws_awake_rx);
struct hci_uart *hu = qca->hu;
+ unsigned long flags;
BT_DBG("hu %p wq awake rx", hu);
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
/* Always acknowledge device wake up,
@@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
qca->ibs_sent_wacks++;
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
@@ -473,6 +475,9 @@ static int qca_open(struct hci_uart *hu)
BT_DBG("hu %p qca_open", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@@ -702,7 +707,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
unsigned long flags;
struct qca_data *qca = hu->priv;
- BT_DBG("hu %p want to sleep", hu);
+ BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
@@ -717,7 +722,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
break;
case HCI_IBS_RX_ASLEEP:
- /* Fall through */
+ break;
default:
/* Any other state is illegal */
@@ -909,7 +914,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
if (hdr->evt == HCI_EV_VENDOR)
complete(&qca->drop_ev_comp);
- kfree(skb);
+ kfree_skb(skb);
return 0;
}
@@ -1383,6 +1388,9 @@ static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
+ /* Perform pre shutdown command */
+ qca_send_pre_shutdown_cmd(hdev);
+
qca_power_shutdown(hu);
return 0;
}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index f11af3912ce6..6ab631101019 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_work(struct work_struct *work);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1851112ccc29..6b331061d34b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -29,6 +29,16 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config MOXTET
+ tristate "CZ.NIC Turris Mox module configuration bus"
+ depends on SPI_MASTER && OF
+ help
+ Say yes here to add support for the module configuration bus found
+ on CZ.NIC's Turris Mox. This is needed for the ability to discover
+ the order in which the modules are connected and to get/set some of
+ their settings. For example the GPIOs on Mox SFP module are
+ configured through this bus.
+
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ca300b1914ce..16b43d3468c6 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
+obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 8ad77246f322..cc7bb900f524 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -330,7 +330,6 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
fsl_mc_resource_free(resource);
- device_link_del(mc_adev->consumer_link);
mc_adev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 3ae574a58cce..d9629fc13a15 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -255,7 +255,6 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
fsl_destroy_mc_io(mc_io);
fsl_mc_resource_free(resource);
- device_link_del(dpmcp_dev->consumer_link);
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
size_t pdata_size;
};
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct acpi_device *child;
+
+ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+ list_for_each_entry(child, &adev->children, node)
+ acpi_device_clear_enumerated(child);
+}
+
/*
* hisi_lpc_acpi_probe - probe children for ACPI FW
* @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
return 0;
fail:
- device_for_each_child(hostdev, NULL,
- hisi_lpc_acpi_remove_subdev);
+ hisi_lpc_acpi_remove(hostdev);
return ret;
}
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
{
return -ENODEV;
}
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
#endif // CONFIG_ACPI
/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
range->fwnode = dev->fwnode;
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
+ range->hostdata = lpcdev;
+ range->ops = &hisi_lpc_ops;
+ lpcdev->io_host = range;
ret = logic_pio_register_range(range);
if (ret) {
dev_err(dev, "register IO range failed (%d)!\n", ret);
return ret;
}
- lpcdev->io_host = range;
/* register the LPC host PIO resources */
if (acpi_device)
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
+ if (ret) {
+ logic_pio_unregister_range(range);
return ret;
+ }
- lpcdev->io_host->hostdata = lpcdev;
- lpcdev->io_host->ops = &hisi_lpc_ops;
+ dev_set_drvdata(dev, lpcdev);
io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
return ret;
}
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+ struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+ if (acpi_device)
+ hisi_lpc_acpi_remove(dev);
+ else
+ of_platform_depopulate(dev);
+
+ logic_pio_unregister_range(range);
+
+ return 0;
+}
+
static const struct of_device_id hisi_lpc_of_match[] = {
{ .compatible = "hisilicon,hip06-lpc", },
{ .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
},
.probe = hisi_lpc_probe,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index db74334ca5ef..28bb65a5613f 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -19,6 +19,8 @@ struct imx_weim_devtype {
unsigned int cs_count;
unsigned int cs_regs_count;
unsigned int cs_stride;
+ unsigned int wcr_offset;
+ unsigned int wcr_bcm;
};
static const struct imx_weim_devtype imx1_weim_devtype = {
@@ -37,6 +39,8 @@ static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_count = 4,
.cs_regs_count = 6,
.cs_stride = 0x18,
+ .wcr_offset = 0x90,
+ .wcr_bcm = BIT(0),
};
static const struct imx_weim_devtype imx51_weim_devtype = {
@@ -72,7 +76,7 @@ static const struct of_device_id weim_id_table[] = {
};
MODULE_DEVICE_TABLE(of, weim_id_table);
-static int __init imx_weim_gpr_setup(struct platform_device *pdev)
+static int imx_weim_gpr_setup(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct property *prop;
@@ -122,10 +126,10 @@ err:
}
/* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device *dev,
- struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype,
- struct cs_timing_state *ts)
+static int weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
@@ -183,8 +187,7 @@ static int __init weim_timing_setup(struct device *dev,
return 0;
}
-static int __init weim_parse_dt(struct platform_device *pdev,
- void __iomem *base)
+static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
{
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
@@ -192,6 +195,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
struct device_node *child;
int ret, have_child = 0;
struct cs_timing_state ts = {};
+ u32 reg;
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -199,6 +203,17 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
+ if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
+ if (devtype->wcr_bcm) {
+ reg = readl(base + devtype->wcr_offset);
+ writel(reg | devtype->wcr_bcm,
+ base + devtype->wcr_offset);
+ } else {
+ dev_err(&pdev->dev, "burst clk mode not supported.\n");
+ return -EINVAL;
+ }
+ }
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
if (ret)
@@ -217,7 +232,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
-static int __init weim_probe(struct platform_device *pdev)
+static int weim_probe(struct platform_device *pdev)
{
struct resource *res;
struct clk *clk;
@@ -254,8 +269,9 @@ static struct platform_driver weim_driver = {
.name = "imx-weim",
.of_match_table = weim_id_table,
},
+ .probe = weim_probe,
};
-module_platform_driver_probe(weim_driver, weim_probe);
+module_platform_driver(weim_driver);
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_DESCRIPTION("i.MX EIM Controller Driver");
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
new file mode 100644
index 000000000000..36cf13eee6b8
--- /dev/null
+++ b/drivers/bus/moxtet.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox module configuration bus driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <dt-bindings/bus/moxtet.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moxtet.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+
+/*
+ * @name: module name for sysfs
+ * @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
+ * @nirqs: how many interrupts does the shift register provide
+ * @desc: module description for kernel log
+ */
+static const struct {
+ const char *name;
+ int hwirq_base;
+ int nirqs;
+ const char *desc;
+} mox_module_table[] = {
+ /* do not change order of this array! */
+ { NULL, 0, 0, NULL },
+ { "sfp", -1, 0, "MOX D (SFP cage)" },
+ { "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
+ { "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
+ { "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
+ { "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
+ { "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
+};
+
+static inline bool mox_module_known(unsigned int id)
+{
+ return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
+}
+
+static inline const char *mox_module_name(unsigned int id)
+{
+ if (mox_module_known(id))
+ return mox_module_table[id].name;
+ else
+ return "unknown";
+}
+
+#define DEF_MODULE_ATTR(name, fmt, ...) \
+static ssize_t \
+module_##name##_show(struct device *dev, struct device_attribute *a, \
+ char *buf) \
+{ \
+ struct moxtet_device *mdev = to_moxtet_device(dev); \
+ return sprintf(buf, (fmt), __VA_ARGS__); \
+} \
+static DEVICE_ATTR_RO(module_##name)
+
+DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
+DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
+DEF_MODULE_ATTR(description, "%s\n",
+ mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
+ : "");
+
+static struct attribute *moxtet_dev_attrs[] = {
+ &dev_attr_module_id.attr,
+ &dev_attr_module_name.attr,
+ &dev_attr_module_description.attr,
+ NULL,
+};
+
+static const struct attribute_group moxtet_dev_group = {
+ .attrs = moxtet_dev_attrs,
+};
+
+static const struct attribute_group *moxtet_dev_groups[] = {
+ &moxtet_dev_group,
+ NULL,
+};
+
+static int moxtet_match(struct device *dev, struct device_driver *drv)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const enum turris_mox_module_id *t;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!tdrv->id_table)
+ return 0;
+
+ for (t = tdrv->id_table; *t; ++t)
+ if (*t == mdev->id)
+ return 1;
+
+ return 0;
+}
+
+struct bus_type moxtet_bus_type = {
+ .name = "moxtet",
+ .dev_groups = moxtet_dev_groups,
+ .match = moxtet_match,
+};
+EXPORT_SYMBOL_GPL(moxtet_bus_type);
+
+int __moxtet_register_driver(struct module *owner,
+ struct moxtet_driver *mdrv)
+{
+ mdrv->driver.owner = owner;
+ mdrv->driver.bus = &moxtet_bus_type;
+ return driver_register(&mdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__moxtet_register_driver);
+
+static int moxtet_dev_check(struct device *dev, void *data)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_device *new_dev = data;
+
+ if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
+ mdev->idx == new_dev->idx)
+ return -EBUSY;
+ return 0;
+}
+
+static void moxtet_dev_release(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+
+ put_device(mdev->moxtet->dev);
+ kfree(mdev);
+}
+
+static struct moxtet_device *
+moxtet_alloc_device(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+
+ if (!get_device(moxtet->dev))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ put_device(moxtet->dev);
+ return NULL;
+ }
+
+ dev->moxtet = moxtet;
+ dev->dev.parent = moxtet->dev;
+ dev->dev.bus = &moxtet_bus_type;
+ dev->dev.release = moxtet_dev_release;
+
+ device_initialize(&dev->dev);
+
+ return dev;
+}
+
+static int moxtet_add_device(struct moxtet_device *dev)
+{
+ static DEFINE_MUTEX(add_mutex);
+ int ret;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
+ return -EINVAL;
+
+ dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
+ dev->idx);
+
+ mutex_lock(&add_mutex);
+
+ ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
+ moxtet_dev_check);
+ if (ret)
+ goto done;
+
+ ret = device_add(&dev->dev);
+ if (ret < 0)
+ dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
+ dev_name(dev->moxtet->dev), ret);
+
+done:
+ mutex_unlock(&add_mutex);
+ return ret;
+}
+
+static int __unregister(struct device *dev, void *null)
+{
+ if (dev->of_node) {
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_put(dev->of_node);
+ }
+
+ device_unregister(dev);
+
+ return 0;
+}
+
+static struct moxtet_device *
+of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
+{
+ struct moxtet_device *dev;
+ u32 val;
+ int ret;
+
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev,
+ "Moxtet device alloc error for %pOF\n", nc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(nc, "reg", &val);
+ if (ret) {
+ dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
+ nc, ret);
+ goto err_put;
+ }
+
+ dev->idx = val;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
+ nc, dev->idx);
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ dev->id = moxtet->modules[dev->idx];
+
+ if (!dev->id) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
+ dev->idx);
+ ret = -ENODEV;
+ goto err_put;
+ }
+
+ of_node_get(nc);
+ dev->dev.of_node = nc;
+
+ ret = moxtet_add_device(dev);
+ if (ret) {
+ dev_err(moxtet->dev,
+ "Moxtet device register error for %pOF\n", nc);
+ of_node_put(nc);
+ goto err_put;
+ }
+
+ return dev;
+
+err_put:
+ put_device(&dev->dev);
+ return ERR_PTR(ret);
+}
+
+static void of_register_moxtet_devices(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ struct device_node *nc;
+
+ if (!moxtet->dev->of_node)
+ return;
+
+ for_each_available_child_of_node(moxtet->dev->of_node, nc) {
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ dev = of_register_moxtet_device(moxtet, nc);
+ if (IS_ERR(dev)) {
+ dev_warn(moxtet->dev,
+ "Failed to create Moxtet device for %pOF\n",
+ nc);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
+ }
+}
+
+static void
+moxtet_register_devices_from_topology(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ int i, ret;
+
+ for (i = 0; i < moxtet->count; ++i) {
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
+ i);
+ continue;
+ }
+
+ dev->idx = i;
+ dev->id = moxtet->modules[i];
+
+ ret = moxtet_add_device(dev);
+ if (ret && ret != -EBUSY) {
+ put_device(&dev->dev);
+ dev_err(moxtet->dev,
+ "Moxtet device %u register error: %i\n", i,
+ ret);
+ }
+ }
+}
+
+/*
+ * @nsame: how many modules with same id are already in moxtet->modules
+ */
+static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
+{
+ int i, first;
+ struct moxtet_irqpos *pos;
+
+ first = mox_module_table[id].hwirq_base +
+ nsame * mox_module_table[id].nirqs;
+
+ if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
+ return -EINVAL;
+
+ for (i = 0; i < mox_module_table[id].nirqs; ++i) {
+ pos = &moxtet->irq.position[first + i];
+ pos->idx = idx;
+ pos->bit = i;
+ moxtet->irq.exists |= BIT(first + i);
+ }
+
+ return 0;
+}
+
+static int moxtet_find_topology(struct moxtet *moxtet)
+{
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int cnts[TURRIS_MOX_MODULE_LAST];
+ int i, ret;
+
+ memset(cnts, 0, sizeof(cnts));
+
+ ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
+ if (ret < 0)
+ return ret;
+
+ if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
+ dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
+ } else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
+ dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
+ } else {
+ dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
+ buf[0]);
+ return -ENODEV;
+ }
+
+ moxtet->count = 0;
+
+ for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
+ int id;
+
+ if (buf[i] == 0xff)
+ break;
+
+ id = buf[i] & 0xf;
+
+ moxtet->modules[i-1] = id;
+ ++moxtet->count;
+
+ if (mox_module_known(id)) {
+ dev_info(moxtet->dev, "Found %s module\n",
+ mox_module_table[id].desc);
+
+ if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
+ dev_err(moxtet->dev,
+ " Cannot set IRQ for module %s\n",
+ mox_module_table[id].desc);
+ } else {
+ dev_warn(moxtet->dev,
+ "Unknown Moxtet module found (ID 0x%02x)\n",
+ id);
+ }
+ }
+
+ return 0;
+}
+
+static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
+{
+ struct spi_transfer xfer = {
+ .rx_buf = buf,
+ .tx_buf = moxtet->tx,
+ .len = moxtet->count + 1
+ };
+ int ret;
+
+ mutex_lock(&moxtet->lock);
+
+ ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+
+int moxtet_device_read(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ return buf[mdev->idx + 1] >> 4;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_read);
+
+int moxtet_device_write(struct device *dev, u8 val)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ moxtet->tx[moxtet->count - mdev->idx] = val;
+
+ ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_write);
+
+int moxtet_device_written(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ return moxtet->tx[moxtet->count - mdev->idx];
+}
+EXPORT_SYMBOL_GPL(moxtet_device_written);
+
+#ifdef CONFIG_DEBUG_FS
+static int moxtet_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t input_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(buf) * 2 + 1];
+ int ret, n;
+
+ ret = moxtet_spi_read(moxtet, bin);
+ if (ret < 0)
+ return ret;
+
+ n = moxtet->count + 1;
+ bin2hex(hex, bin, n);
+
+ hex[2*n] = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
+}
+
+static const struct file_operations input_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = input_read,
+ .llseek = no_llseek,
+};
+
+static ssize_t output_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
+ u8 *p = hex;
+ int i;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
+
+ mutex_unlock(&moxtet->lock);
+
+ *p++ = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
+}
+
+static ssize_t output_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ ssize_t res;
+ loff_t dummy = 0;
+ int err, i;
+
+ if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
+ return -EINVAL;
+
+ res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
+ if (res < 0)
+ return res;
+
+ if (len % 2 == 1 && hex[len - 1] != '\n')
+ return -EINVAL;
+
+ err = hex2bin(bin, hex, moxtet->count);
+ if (err < 0)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ moxtet->tx[moxtet->count - i] = bin[i];
+
+ err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return err < 0 ? err : len;
+}
+
+static const struct file_operations output_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = output_read,
+ .write = output_write,
+ .llseek = no_llseek,
+};
+
+static int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("moxtet", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
+ &input_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
+ &output_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ moxtet->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+ debugfs_remove_recursive(moxtet->debugfs_root);
+}
+#else
+static inline int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ return 0;
+}
+
+static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+}
+#endif
+
+static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct moxtet *moxtet = d->host_data;
+
+ if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
+ dev_err(moxtet->dev, "Invalid hw irq number\n");
+ return -EINVAL;
+ }
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
+
+ return 0;
+}
+
+static int moxtet_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct moxtet *moxtet = d->host_data;
+ int irq;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ irq = intspec[0];
+
+ if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
+ return -EINVAL;
+
+ *out_hwirq = irq;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops moxtet_irq_domain = {
+ .map = moxtet_irq_domain_map,
+ .xlate = moxtet_irq_domain_xlate,
+};
+
+static void moxtet_irq_mask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked |= BIT(d->hwirq);
+}
+
+static void moxtet_irq_unmask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked &= ~BIT(d->hwirq);
+}
+
+static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+ struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
+ int id;
+
+ id = moxtet->modules[pos->idx];
+
+ seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ pos->bit);
+}
+
+static const struct irq_chip moxtet_irq_chip = {
+ .name = "moxtet",
+ .irq_mask = moxtet_irq_mask,
+ .irq_unmask = moxtet_irq_unmask,
+ .irq_print_chip = moxtet_irq_print_chip,
+};
+
+static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
+{
+ struct moxtet_irqpos *pos = moxtet->irq.position;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int i, ret;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ *map = 0;
+
+ for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
+ if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
+ set_bit(i, map);
+ }
+
+ return 0;
+}
+
+static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
+{
+ struct moxtet *moxtet = data;
+ unsigned long set;
+ int nhandled = 0, i, sub_irq, ret;
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+
+ do {
+ for_each_set_bit(i, &set, MOXTET_NIRQS) {
+ sub_irq = irq_find_mapping(moxtet->irq.domain, i);
+ handle_nested_irq(sub_irq);
+ dev_dbg(moxtet->dev, "%i irq\n", i);
+ ++nhandled;
+ }
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+ } while (set);
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void moxtet_irq_free(struct moxtet *moxtet)
+{
+ int i, irq;
+
+ for (i = 0; i < MOXTET_NIRQS; ++i) {
+ if (moxtet->irq.exists & BIT(i)) {
+ irq = irq_find_mapping(moxtet->irq.domain, i);
+ irq_dispose_mapping(irq);
+ }
+ }
+
+ irq_domain_remove(moxtet->irq.domain);
+}
+
+static int moxtet_irq_setup(struct moxtet *moxtet)
+{
+ int i, ret;
+
+ moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
+ if (moxtet->irq.domain == NULL) {
+ dev_err(moxtet->dev, "Could not add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MOXTET_NIRQS; ++i)
+ if (moxtet->irq.exists & BIT(i))
+ irq_create_mapping(moxtet->irq.domain, i);
+
+ moxtet->irq.chip = moxtet_irq_chip;
+ moxtet->irq.masked = ~0;
+
+ ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
+ IRQF_ONESHOT, "moxtet", moxtet);
+ if (ret < 0)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ moxtet_irq_free(moxtet);
+ return ret;
+}
+
+static int moxtet_probe(struct spi_device *spi)
+{
+ struct moxtet *moxtet;
+ int ret;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
+ GFP_KERNEL);
+ if (!moxtet)
+ return -ENOMEM;
+
+ moxtet->dev = &spi->dev;
+ spi_set_drvdata(spi, moxtet);
+
+ mutex_init(&moxtet->lock);
+
+ moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
+ if (moxtet->dev_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (moxtet->dev_irq <= 0) {
+ dev_err(moxtet->dev, "No IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ ret = moxtet_find_topology(moxtet);
+ if (ret < 0)
+ return ret;
+
+ if (moxtet->irq.exists) {
+ ret = moxtet_irq_setup(moxtet);
+ if (ret < 0)
+ return ret;
+ }
+
+ of_register_moxtet_devices(moxtet);
+ moxtet_register_devices_from_topology(moxtet);
+
+ ret = moxtet_register_debugfs(moxtet);
+ if (ret < 0)
+ dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
+ ret);
+
+ return 0;
+}
+
+static int moxtet_remove(struct spi_device *spi)
+{
+ struct moxtet *moxtet = spi_get_drvdata(spi);
+
+ free_irq(moxtet->dev_irq, moxtet);
+
+ moxtet_irq_free(moxtet);
+
+ moxtet_unregister_debugfs(moxtet);
+
+ device_for_each_child(moxtet->dev, NULL, __unregister);
+
+ mutex_destroy(&moxtet->lock);
+
+ return 0;
+}
+
+static const struct of_device_id moxtet_dt_ids[] = {
+ { .compatible = "cznic,moxtet" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
+
+static struct spi_driver moxtet_spi_driver = {
+ .driver = {
+ .name = "moxtet",
+ .of_match_table = moxtet_dt_ids,
+ },
+ .probe = moxtet_probe,
+ .remove = moxtet_remove,
+};
+
+static int __init moxtet_init(void)
+{
+ int ret;
+
+ ret = bus_register(&moxtet_bus_type);
+ if (ret < 0) {
+ pr_err("moxtet bus registration failed: %d\n", ret);
+ goto error;
+ }
+
+ ret = spi_register_driver(&moxtet_spi_driver);
+ if (ret < 0) {
+ pr_err("moxtet spi driver registration failed: %d\n", ret);
+ goto error_bus;
+ }
+
+ return 0;
+
+error_bus:
+ bus_unregister(&moxtet_bus_type);
+error:
+ return ret;
+}
+postcore_initcall_sync(moxtet_init);
+
+static void __exit moxtet_exit(void)
+{
+ spi_unregister_driver(&moxtet_spi_driver);
+ bus_unregister(&moxtet_bus_type);
+}
+module_exit(moxtet_exit);
+
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1b76d9585902..be79d6c6a4e4 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -651,10 +651,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return PTR_ERR(rsb->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
rsb->clk = devm_clk_get(dev, NULL);
if (IS_ERR(rsb->clk)) {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..2db474ab4c6b 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
*best_mode = SYSC_IDLE_SMART_WKUP;
else if (idlemodes & BIT(SYSC_IDLE_SMART))
*best_mode = SYSC_IDLE_SMART;
- else if (idlemodes & SYSC_IDLE_FORCE)
+ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
*best_mode = SYSC_IDLE_FORCE;
else
return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
if (error)
return 0;
- if (val)
- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
- else
- ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
return 0;
}
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
error = sysc_init_dts_quirks(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_map_and_check_registers(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_sysc_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_idlemodes(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_syss_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_pdata(ddata);
if (error)
- goto unprepare;
+ return error;
sysc_init_early_quirks(ddata);
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
error = sysc_init_resets(ddata);
if (error)
- return error;
+ goto unprepare;
error = sysc_init_module(ddata);
if (error)
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index e845c1a93f21..f70dedace20b 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -176,7 +176,6 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_system_bus_priv *priv;
- struct resource *regs;
const __be32 *ranges;
u32 cells, addr, size;
u64 paddr;
@@ -186,8 +185,7 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, regs);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3e866885a405..df0fc997dc3e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -26,28 +26,6 @@ config DEVKMEM
kind of kernel debugging operations.
When in doubt, say "N".
-config SGI_SNSC
- bool "SGI Altix system controller communication support"
- depends on (IA64_SGI_SN2 || IA64_GENERIC)
- help
- If you have an SGI Altix and you want to enable system
- controller communication from user space (you want this!),
- say Y. Otherwise, say N.
-
-config SGI_TIOCX
- bool "SGI TIO CX driver support"
- depends on (IA64_SGI_SN2 || IA64_GENERIC)
- help
- If you have an SGI Altix and you have fpga devices attached
- to your TIO, say Y here, otherwise say N.
-
-config SGI_MBCS
- tristate "SGI FPGA Core Services driver support"
- depends on SGI_TIOCX
- help
- If you have an SGI Altix with an attached SABrick
- say Y or M here, otherwise say N.
-
source "drivers/tty/serial/Kconfig"
source "drivers/tty/serdev/Kconfig"
@@ -573,3 +551,12 @@ config RANDOM_TRUST_CPU
has not installed a hidden back door to compromise the CPU's
random number generation facilities. This can also be configured
at boot with "random.trust_cpu=on/off".
+
+config RANDOM_TRUST_BOOTLOADER
+ bool "Trust the bootloader to initialize Linux's CRNG"
+ help
+ Some bootloaders can provide entropy to increase the kernel's initial
+ device randomness. Say Y here to assume the entropy provided by the
+ booloader is trustworthy so it will be added to the kernel's entropy
+ pool. Otherwise, say N here so it will be regarded as device input that
+ only mixes the entropy pool. \ No newline at end of file
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fbea7dd12932..7c5ea6f9df14 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,11 +9,9 @@ obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
-obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
-obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_PRINTER) += lp.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 6231714ef3c8..812d6aa6e013 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -111,14 +111,14 @@ config AGP_VIA
config AGP_I460
tristate "Intel 460GX chipset support"
- depends on AGP && (IA64_DIG || IA64_GENERIC)
+ depends on AGP && IA64
help
This option gives you AGP GART support for the Intel 460GX chipset
for IA64 processors.
config AGP_HP_ZX1
tristate "HP ZX1 chipset AGP support"
- depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+ depends on AGP && IA64
help
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
@@ -150,13 +150,6 @@ config AGP_EFFICEON
This option gives you AGP support for the Transmeta Efficeon
series processors with integrated northbridges.
-config AGP_SGI_TIOCA
- tristate "SGI TIO chipset AGP support"
- depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
- help
- This option gives you AGP GART support for the SGI TIO chipset
- for IA64 processors.
-
config INTEL_GTT
tristate
depends on X86 && PCI
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 4a786ffd9dee..cb2497d157f6 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
-obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
deleted file mode 100644
index e7d5bdc02d93..000000000000
--- a/drivers/char/agp/sgi-agp.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/*
- * SGI TIOCA AGPGART routines.
- *
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/agp_backend.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/io.h>
-#include <asm/sn/pcidev.h>
-#include <asm/sn/pcibus_provider_defs.h>
-#include <asm/sn/tioca_provider.h>
-#include "agp.h"
-
-extern int agp_memory_reserved;
-extern uint32_t tioca_gart_found;
-extern struct list_head tioca_list;
-static struct agp_bridge_data **sgi_tioca_agp_bridges;
-
-/*
- * The aperature size and related information is set up at TIOCA init time.
- * Values for this table will be extracted and filled in at
- * sgi_tioca_fetch_size() time.
- */
-
-static struct aper_size_info_fixed sgi_tioca_sizes[] = {
- {0, 0, 0},
-};
-
-static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
-{
- struct page *page;
- int nid;
- struct tioca_kernel *info =
- (struct tioca_kernel *)bridge->dev_private_data;
-
- nid = info->ca_closest_node;
- page = alloc_pages_node(nid, GFP_KERNEL, 0);
- if (!page)
- return NULL;
-
- get_page(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page;
-}
-
-/*
- * Flush GART tlb's. Cannot selectively flush based on memory so the mem
- * arg is ignored.
- */
-
-static void sgi_tioca_tlbflush(struct agp_memory *mem)
-{
- tioca_tlbflush(mem->bridge->dev_private_data);
-}
-
-/*
- * Given an address of a host physical page, turn it into a valid gart
- * entry.
- */
-static unsigned long
-sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
- int type)
-{
- return tioca_physpage_to_gart(addr);
-}
-
-static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
- tioca_fastwrite_enable(bridge->dev_private_data);
-}
-
-/*
- * sgi_tioca_configure() doesn't have anything to do since the base CA driver
- * has alreay set up the GART.
- */
-
-static int sgi_tioca_configure(void)
-{
- return 0;
-}
-
-/*
- * Determine gfx aperature size. This has already been determined by the
- * CA driver init, so just need to set agp_bridge values accordingly.
- */
-
-static int sgi_tioca_fetch_size(void)
-{
- struct tioca_kernel *info =
- (struct tioca_kernel *)agp_bridge->dev_private_data;
-
- sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
- sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
-
- return sgi_tioca_sizes[0].size;
-}
-
-static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
-{
- struct tioca_kernel *info =
- (struct tioca_kernel *)bridge->dev_private_data;
-
- bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
- bridge->gatt_table = bridge->gatt_table_real;
- bridge->gatt_bus_addr = info->ca_gfxgart_base;
-
- return 0;
-}
-
-static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
-{
- return 0;
-}
-
-static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int num_entries;
- size_t i;
- off_t j;
- void *temp;
- struct agp_bridge_data *bridge;
- u64 *table;
-
- bridge = mem->bridge;
- if (!bridge)
- return -EINVAL;
-
- table = (u64 *)bridge->gatt_table;
-
- temp = bridge->current_size;
-
- switch (bridge->driver->size_type) {
- case U8_APER_SIZE:
- num_entries = A_SIZE_8(temp)->num_entries;
- break;
- case U16_APER_SIZE:
- num_entries = A_SIZE_16(temp)->num_entries;
- break;
- case U32_APER_SIZE:
- num_entries = A_SIZE_32(temp)->num_entries;
- break;
- case FIXED_APER_SIZE:
- num_entries = A_SIZE_FIX(temp)->num_entries;
- break;
- case LVL2_APER_SIZE:
- return -EINVAL;
- default:
- num_entries = 0;
- break;
- }
-
- num_entries -= agp_memory_reserved / PAGE_SIZE;
- if (num_entries < 0)
- num_entries = 0;
-
- if (type != 0 || mem->type != 0) {
- return -EINVAL;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- return -EINVAL;
-
- j = pg_start;
-
- while (j < (pg_start + mem->page_count)) {
- if (table[j])
- return -EBUSY;
- j++;
- }
-
- if (!mem->is_flushed) {
- bridge->driver->cache_flush();
- mem->is_flushed = true;
- }
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- table[j] =
- bridge->driver->mask_memory(bridge,
- page_to_phys(mem->pages[i]),
- mem->type);
- }
-
- bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- size_t i;
- struct agp_bridge_data *bridge;
- u64 *table;
-
- bridge = mem->bridge;
- if (!bridge)
- return -EINVAL;
-
- if (type != 0 || mem->type != 0) {
- return -EINVAL;
- }
-
- table = (u64 *)bridge->gatt_table;
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- table[i] = 0;
- }
-
- bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static void sgi_tioca_cache_flush(void)
-{
-}
-
-/*
- * Cleanup. Nothing to do as the CA driver owns the GART.
- */
-
-static void sgi_tioca_cleanup(void)
-{
-}
-
-static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
-{
- struct agp_bridge_data *bridge;
-
- list_for_each_entry(bridge, &agp_bridges, list) {
- if (bridge->dev->bus == pdev->bus)
- break;
- }
- return bridge;
-}
-
-const struct agp_bridge_driver sgi_tioca_driver = {
- .owner = THIS_MODULE,
- .size_type = U16_APER_SIZE,
- .configure = sgi_tioca_configure,
- .fetch_size = sgi_tioca_fetch_size,
- .cleanup = sgi_tioca_cleanup,
- .tlb_flush = sgi_tioca_tlbflush,
- .mask_memory = sgi_tioca_mask_memory,
- .agp_enable = sgi_tioca_agp_enable,
- .cache_flush = sgi_tioca_cache_flush,
- .create_gatt_table = sgi_tioca_create_gatt_table,
- .free_gatt_table = sgi_tioca_free_gatt_table,
- .insert_memory = sgi_tioca_insert_memory,
- .remove_memory = sgi_tioca_remove_memory,
- .alloc_by_type = agp_generic_alloc_by_type,
- .free_by_type = agp_generic_free_by_type,
- .agp_alloc_page = sgi_tioca_alloc_page,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .cant_use_aperture = true,
- .needs_scratch_page = false,
- .num_aperture_sizes = 1,
-};
-
-static int agp_sgi_init(void)
-{
- unsigned int j;
- struct tioca_kernel *info;
- struct pci_dev *pdev = NULL;
-
- if (tioca_gart_found)
- printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
- else
- return 0;
-
- sgi_tioca_agp_bridges = kmalloc_array(tioca_gart_found,
- sizeof(struct agp_bridge_data *),
- GFP_KERNEL);
- if (!sgi_tioca_agp_bridges)
- return -ENOMEM;
-
- j = 0;
- list_for_each_entry(info, &tioca_list, ca_list) {
- if (list_empty(info->ca_devices))
- continue;
- list_for_each_entry(pdev, info->ca_devices, bus_list) {
- u8 cap_ptr;
-
- if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
- continue;
- cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
- if (!cap_ptr)
- continue;
- }
- sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
- printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
- sgi_tioca_agp_bridges[j]);
- if (sgi_tioca_agp_bridges[j]) {
- sgi_tioca_agp_bridges[j]->dev = pdev;
- sgi_tioca_agp_bridges[j]->dev_private_data = info;
- sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
- sgi_tioca_agp_bridges[j]->gart_bus_addr =
- info->ca_gfxap_base;
- sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
- (0x1 << 9) | /* SBA supported */
- (0x1 << 5) | /* 64-bit addresses supported */
- (0x1 << 4) | /* FW supported */
- (0x1 << 3) | /* AGP 3.0 mode */
- 0x2; /* 8x transfer only */
- sgi_tioca_agp_bridges[j]->current_size =
- sgi_tioca_agp_bridges[j]->previous_size =
- (void *)&sgi_tioca_sizes[0];
- agp_add_bridge(sgi_tioca_agp_bridges[j]);
- }
- j++;
- }
-
- agp_find_bridge = &sgi_tioca_find_bridge;
- return 0;
-}
-
-static void agp_sgi_cleanup(void)
-{
- kfree(sgi_tioca_agp_bridges);
- sgi_tioca_agp_bridges = NULL;
-}
-
-module_init(agp_sgi_init);
-module_exit(agp_sgi_cleanup);
-
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5c39f20378b8..9ac6671bb514 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
- do_div(m, dis);
- return (unsigned long)m;
+ return div64_ul(m, dis);
}
static int
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 57204335c5f5..285e0b8f9a97 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
struct ipmb_request_elem *queue_elem;
struct ipmb_msg msg;
- ssize_t ret;
+ ssize_t ret = 0;
memset(&msg, 0, sizeof(msg));
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
deleted file mode 100644
index 0a31b60bee7b..000000000000
--- a/drivers/char/mbcs.c
+++ /dev/null
@@ -1,831 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * MOATB Core Services driver.
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mm.h>
-#include <linux/uio.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/tiocx.h>
-#include "mbcs.h"
-
-#define MBCS_DEBUG 0
-#if MBCS_DEBUG
-#define DBG(fmt...) printk(KERN_ALERT fmt)
-#else
-#define DBG(fmt...)
-#endif
-static DEFINE_MUTEX(mbcs_mutex);
-static int mbcs_major;
-
-static LIST_HEAD(soft_list);
-
-/*
- * file operations
- */
-static const struct file_operations mbcs_ops = {
- .owner = THIS_MODULE,
- .open = mbcs_open,
- .llseek = mbcs_sram_llseek,
- .read = mbcs_sram_read,
- .write = mbcs_sram_write,
- .mmap = mbcs_gscr_mmap,
-};
-
-struct mbcs_callback_arg {
- int minor;
- struct cx_dev *cx_dev;
-};
-
-static inline void mbcs_getdma_init(struct getdma *gdma)
-{
- memset(gdma, 0, sizeof(struct getdma));
- gdma->DoneIntEnable = 1;
-}
-
-static inline void mbcs_putdma_init(struct putdma *pdma)
-{
- memset(pdma, 0, sizeof(struct putdma));
- pdma->DoneIntEnable = 1;
-}
-
-static inline void mbcs_algo_init(struct algoblock *algo_soft)
-{
- memset(algo_soft, 0, sizeof(struct algoblock));
-}
-
-static inline void mbcs_getdma_set(void *mmr,
- uint64_t hostAddr,
- uint64_t localAddr,
- uint64_t localRamSel,
- uint64_t numPkts,
- uint64_t amoEnable,
- uint64_t intrEnable,
- uint64_t peerIO,
- uint64_t amoHostDest,
- uint64_t amoModType, uint64_t intrHostDest,
- uint64_t intrVector)
-{
- union dma_control rdma_control;
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union dma_localaddr local_addr;
- union dma_hostaddr host_addr;
-
- rdma_control.dma_control_reg = 0;
- amo_dest.dma_amo_dest_reg = 0;
- intr_dest.intr_dest_reg = 0;
- local_addr.dma_localaddr_reg = 0;
- host_addr.dma_hostaddr_reg = 0;
-
- host_addr.dma_sys_addr = hostAddr;
- MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
-
- local_addr.dma_ram_addr = localAddr;
- local_addr.dma_ram_sel = localRamSel;
- MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
-
- rdma_control.dma_op_length = numPkts;
- rdma_control.done_amo_en = amoEnable;
- rdma_control.done_int_en = intrEnable;
- rdma_control.pio_mem_n = peerIO;
- MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
-
- amo_dest.dma_amo_sys_addr = amoHostDest;
- amo_dest.dma_amo_mod_type = amoModType;
- MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
-
- intr_dest.address = intrHostDest;
- intr_dest.int_vector = intrVector;
- MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
-
-}
-
-static inline void mbcs_putdma_set(void *mmr,
- uint64_t hostAddr,
- uint64_t localAddr,
- uint64_t localRamSel,
- uint64_t numPkts,
- uint64_t amoEnable,
- uint64_t intrEnable,
- uint64_t peerIO,
- uint64_t amoHostDest,
- uint64_t amoModType,
- uint64_t intrHostDest, uint64_t intrVector)
-{
- union dma_control wdma_control;
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union dma_localaddr local_addr;
- union dma_hostaddr host_addr;
-
- wdma_control.dma_control_reg = 0;
- amo_dest.dma_amo_dest_reg = 0;
- intr_dest.intr_dest_reg = 0;
- local_addr.dma_localaddr_reg = 0;
- host_addr.dma_hostaddr_reg = 0;
-
- host_addr.dma_sys_addr = hostAddr;
- MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
-
- local_addr.dma_ram_addr = localAddr;
- local_addr.dma_ram_sel = localRamSel;
- MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
-
- wdma_control.dma_op_length = numPkts;
- wdma_control.done_amo_en = amoEnable;
- wdma_control.done_int_en = intrEnable;
- wdma_control.pio_mem_n = peerIO;
- MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
-
- amo_dest.dma_amo_sys_addr = amoHostDest;
- amo_dest.dma_amo_mod_type = amoModType;
- MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
-
- intr_dest.address = intrHostDest;
- intr_dest.int_vector = intrVector;
- MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
-
-}
-
-static inline void mbcs_algo_set(void *mmr,
- uint64_t amoHostDest,
- uint64_t amoModType,
- uint64_t intrHostDest,
- uint64_t intrVector, uint64_t algoStepCount)
-{
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union algo_step step;
-
- step.algo_step_reg = 0;
- intr_dest.intr_dest_reg = 0;
- amo_dest.dma_amo_dest_reg = 0;
-
- amo_dest.dma_amo_sys_addr = amoHostDest;
- amo_dest.dma_amo_mod_type = amoModType;
- MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
-
- intr_dest.address = intrHostDest;
- intr_dest.int_vector = intrVector;
- MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
-
- step.alg_step_cnt = algoStepCount;
- MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
-}
-
-static inline int mbcs_getdma_start(struct mbcs_soft *soft)
-{
- void *mmr_base;
- struct getdma *gdma;
- uint64_t numPkts;
- union cm_control cm_control;
-
- mmr_base = soft->mmr_base;
- gdma = &soft->getdma;
-
- /* check that host address got setup */
- if (!gdma->hostAddr)
- return -1;
-
- numPkts =
- (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
-
- /* program engine */
- mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
- gdma->localAddr,
- (gdma->localAddr < MB2) ? 0 :
- (gdma->localAddr < MB4) ? 1 :
- (gdma->localAddr < MB6) ? 2 : 3,
- numPkts,
- gdma->DoneAmoEnable,
- gdma->DoneIntEnable,
- gdma->peerIO,
- gdma->amoHostDest,
- gdma->amoModType,
- gdma->intrHostDest, gdma->intrVector);
-
- /* start engine */
- cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.rd_dma_go = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
-
- return 0;
-
-}
-
-static inline int mbcs_putdma_start(struct mbcs_soft *soft)
-{
- void *mmr_base;
- struct putdma *pdma;
- uint64_t numPkts;
- union cm_control cm_control;
-
- mmr_base = soft->mmr_base;
- pdma = &soft->putdma;
-
- /* check that host address got setup */
- if (!pdma->hostAddr)
- return -1;
-
- numPkts =
- (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
-
- /* program engine */
- mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
- pdma->localAddr,
- (pdma->localAddr < MB2) ? 0 :
- (pdma->localAddr < MB4) ? 1 :
- (pdma->localAddr < MB6) ? 2 : 3,
- numPkts,
- pdma->DoneAmoEnable,
- pdma->DoneIntEnable,
- pdma->peerIO,
- pdma->amoHostDest,
- pdma->amoModType,
- pdma->intrHostDest, pdma->intrVector);
-
- /* start engine */
- cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.wr_dma_go = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
-
- return 0;
-
-}
-
-static inline int mbcs_algo_start(struct mbcs_soft *soft)
-{
- struct algoblock *algo_soft = &soft->algo;
- void *mmr_base = soft->mmr_base;
- union cm_control cm_control;
-
- if (mutex_lock_interruptible(&soft->algolock))
- return -ERESTARTSYS;
-
- atomic_set(&soft->algo_done, 0);
-
- mbcs_algo_set(mmr_base,
- algo_soft->amoHostDest,
- algo_soft->amoModType,
- algo_soft->intrHostDest,
- algo_soft->intrVector, algo_soft->algoStepCount);
-
- /* start algorithm */
- cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.alg_done_int_en = 1;
- cm_control.alg_go = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
-
- mutex_unlock(&soft->algolock);
-
- return 0;
-}
-
-static inline ssize_t
-do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
- size_t len, loff_t * off)
-{
- int rv = 0;
-
- if (mutex_lock_interruptible(&soft->dmawritelock))
- return -ERESTARTSYS;
-
- atomic_set(&soft->dmawrite_done, 0);
-
- soft->putdma.hostAddr = hostAddr;
- soft->putdma.localAddr = *off;
- soft->putdma.bytes = len;
-
- if (mbcs_putdma_start(soft) < 0) {
- DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
- "mbcs_putdma_start failed\n");
- rv = -EAGAIN;
- goto dmawrite_exit;
- }
-
- if (wait_event_interruptible(soft->dmawrite_queue,
- atomic_read(&soft->dmawrite_done))) {
- rv = -ERESTARTSYS;
- goto dmawrite_exit;
- }
-
- rv = len;
- *off += len;
-
-dmawrite_exit:
- mutex_unlock(&soft->dmawritelock);
-
- return rv;
-}
-
-static inline ssize_t
-do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
- size_t len, loff_t * off)
-{
- int rv = 0;
-
- if (mutex_lock_interruptible(&soft->dmareadlock))
- return -ERESTARTSYS;
-
- atomic_set(&soft->dmawrite_done, 0);
-
- soft->getdma.hostAddr = hostAddr;
- soft->getdma.localAddr = *off;
- soft->getdma.bytes = len;
-
- if (mbcs_getdma_start(soft) < 0) {
- DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
- rv = -EAGAIN;
- goto dmaread_exit;
- }
-
- if (wait_event_interruptible(soft->dmaread_queue,
- atomic_read(&soft->dmaread_done))) {
- rv = -ERESTARTSYS;
- goto dmaread_exit;
- }
-
- rv = len;
- *off += len;
-
-dmaread_exit:
- mutex_unlock(&soft->dmareadlock);
-
- return rv;
-}
-
-static int mbcs_open(struct inode *ip, struct file *fp)
-{
- struct mbcs_soft *soft;
- int minor;
-
- mutex_lock(&mbcs_mutex);
- minor = iminor(ip);
-
- /* Nothing protects access to this list... */
- list_for_each_entry(soft, &soft_list, list) {
- if (soft->nasid == minor) {
- fp->private_data = soft->cxdev;
- mutex_unlock(&mbcs_mutex);
- return 0;
- }
- }
-
- mutex_unlock(&mbcs_mutex);
- return -ENODEV;
-}
-
-static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
-{
- struct cx_dev *cx_dev = fp->private_data;
- struct mbcs_soft *soft = cx_dev->soft;
- uint64_t hostAddr;
- int rv = 0;
-
- hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
- if (hostAddr == 0)
- return -ENOMEM;
-
- rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
- if (rv < 0)
- goto exit;
-
- if (copy_to_user(buf, (void *)hostAddr, len))
- rv = -EFAULT;
-
- exit:
- free_pages(hostAddr, get_order(len));
-
- return rv;
-}
-
-static ssize_t
-mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
-{
- struct cx_dev *cx_dev = fp->private_data;
- struct mbcs_soft *soft = cx_dev->soft;
- uint64_t hostAddr;
- int rv = 0;
-
- hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
- if (hostAddr == 0)
- return -ENOMEM;
-
- if (copy_from_user((void *)hostAddr, buf, len)) {
- rv = -EFAULT;
- goto exit;
- }
-
- rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
-
- exit:
- free_pages(hostAddr, get_order(len));
-
- return rv;
-}
-
-static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
-{
- return generic_file_llseek_size(filp, off, whence, MAX_LFS_FILESIZE,
- MBCS_SRAM_SIZE);
-}
-
-static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
-{
- uint64_t mmr_base;
-
- mmr_base = (uint64_t) (soft->mmr_base + offset);
-
- return mmr_base;
-}
-
-static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
-{
- soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
-}
-
-static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
-{
- soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
-}
-
-static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct cx_dev *cx_dev = fp->private_data;
- struct mbcs_soft *soft = cx_dev->soft;
-
- if (vma->vm_pgoff != 0)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* Remap-pfn-range will mark the range VM_IO */
- if (remap_pfn_range(vma,
- vma->vm_start,
- __pa(soft->gscr_addr) >> PAGE_SHIFT,
- PAGE_SIZE,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
-/**
- * mbcs_completion_intr_handler - Primary completion handler.
- * @irq: irq
- * @arg: soft struct for device
- *
- */
-static irqreturn_t
-mbcs_completion_intr_handler(int irq, void *arg)
-{
- struct mbcs_soft *soft = (struct mbcs_soft *)arg;
- void *mmr_base;
- union cm_status cm_status;
- union cm_control cm_control;
-
- mmr_base = soft->mmr_base;
- cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
-
- if (cm_status.rd_dma_done) {
- /* stop dma-read engine, clear status */
- cm_control.cm_control_reg =
- MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.rd_dma_clr = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
- cm_control.cm_control_reg);
- atomic_set(&soft->dmaread_done, 1);
- wake_up(&soft->dmaread_queue);
- }
- if (cm_status.wr_dma_done) {
- /* stop dma-write engine, clear status */
- cm_control.cm_control_reg =
- MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.wr_dma_clr = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
- cm_control.cm_control_reg);
- atomic_set(&soft->dmawrite_done, 1);
- wake_up(&soft->dmawrite_queue);
- }
- if (cm_status.alg_done) {
- /* clear status */
- cm_control.cm_control_reg =
- MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.alg_done_clr = 1;
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
- cm_control.cm_control_reg);
- atomic_set(&soft->algo_done, 1);
- wake_up(&soft->algo_queue);
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * mbcs_intr_alloc - Allocate interrupts.
- * @dev: device pointer
- *
- */
-static int mbcs_intr_alloc(struct cx_dev *dev)
-{
- struct sn_irq_info *sn_irq;
- struct mbcs_soft *soft;
- struct getdma *getdma;
- struct putdma *putdma;
- struct algoblock *algo;
-
- soft = dev->soft;
- getdma = &soft->getdma;
- putdma = &soft->putdma;
- algo = &soft->algo;
-
- soft->get_sn_irq = NULL;
- soft->put_sn_irq = NULL;
- soft->algo_sn_irq = NULL;
-
- sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
- if (sn_irq == NULL)
- return -EAGAIN;
- soft->get_sn_irq = sn_irq;
- getdma->intrHostDest = sn_irq->irq_xtalkaddr;
- getdma->intrVector = sn_irq->irq_irq;
- if (request_irq(sn_irq->irq_irq,
- (void *)mbcs_completion_intr_handler, IRQF_SHARED,
- "MBCS get intr", (void *)soft)) {
- tiocx_irq_free(soft->get_sn_irq);
- return -EAGAIN;
- }
-
- sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
- if (sn_irq == NULL) {
- free_irq(soft->get_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->get_sn_irq);
- return -EAGAIN;
- }
- soft->put_sn_irq = sn_irq;
- putdma->intrHostDest = sn_irq->irq_xtalkaddr;
- putdma->intrVector = sn_irq->irq_irq;
- if (request_irq(sn_irq->irq_irq,
- (void *)mbcs_completion_intr_handler, IRQF_SHARED,
- "MBCS put intr", (void *)soft)) {
- tiocx_irq_free(soft->put_sn_irq);
- free_irq(soft->get_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->get_sn_irq);
- return -EAGAIN;
- }
-
- sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
- if (sn_irq == NULL) {
- free_irq(soft->put_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->put_sn_irq);
- free_irq(soft->get_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->get_sn_irq);
- return -EAGAIN;
- }
- soft->algo_sn_irq = sn_irq;
- algo->intrHostDest = sn_irq->irq_xtalkaddr;
- algo->intrVector = sn_irq->irq_irq;
- if (request_irq(sn_irq->irq_irq,
- (void *)mbcs_completion_intr_handler, IRQF_SHARED,
- "MBCS algo intr", (void *)soft)) {
- tiocx_irq_free(soft->algo_sn_irq);
- free_irq(soft->put_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->put_sn_irq);
- free_irq(soft->get_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->get_sn_irq);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-/**
- * mbcs_intr_dealloc - Remove interrupts.
- * @dev: device pointer
- *
- */
-static void mbcs_intr_dealloc(struct cx_dev *dev)
-{
- struct mbcs_soft *soft;
-
- soft = dev->soft;
-
- free_irq(soft->get_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->get_sn_irq);
- free_irq(soft->put_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->put_sn_irq);
- free_irq(soft->algo_sn_irq->irq_irq, soft);
- tiocx_irq_free(soft->algo_sn_irq);
-}
-
-static inline int mbcs_hw_init(struct mbcs_soft *soft)
-{
- void *mmr_base = soft->mmr_base;
- union cm_control cm_control;
- union cm_req_timeout cm_req_timeout;
- uint64_t err_stat;
-
- cm_req_timeout.cm_req_timeout_reg =
- MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
-
- cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
- MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
- cm_req_timeout.cm_req_timeout_reg);
-
- mbcs_gscr_pioaddr_set(soft);
- mbcs_debug_pioaddr_set(soft);
-
- /* clear errors */
- err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
- MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
- MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
-
- /* enable interrupts */
- /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
- MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
-
- /* arm status regs and clear engines */
- cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
- cm_control.rearm_stat_regs = 1;
- cm_control.alg_clr = 1;
- cm_control.wr_dma_clr = 1;
- cm_control.rd_dma_clr = 1;
-
- MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
-
- return 0;
-}
-
-static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cx_dev *cx_dev = to_cx_dev(dev);
- struct mbcs_soft *soft = cx_dev->soft;
- uint64_t debug0;
-
- /*
- * By convention, the first debug register contains the
- * algorithm number and revision.
- */
- debug0 = *(uint64_t *) soft->debug_addr;
-
- return sprintf(buf, "0x%x 0x%x\n",
- upper_32_bits(debug0), lower_32_bits(debug0));
-}
-
-static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- int n;
- struct cx_dev *cx_dev = to_cx_dev(dev);
- struct mbcs_soft *soft = cx_dev->soft;
-
- if (count <= 0)
- return 0;
-
- n = simple_strtoul(buf, NULL, 0);
-
- if (n == 1) {
- mbcs_algo_start(soft);
- if (wait_event_interruptible(soft->algo_queue,
- atomic_read(&soft->algo_done)))
- return -ERESTARTSYS;
- }
-
- return count;
-}
-
-DEVICE_ATTR(algo, 0644, show_algo, store_algo);
-
-/**
- * mbcs_probe - Initialize for device
- * @dev: device pointer
- * @device_id: id table pointer
- *
- */
-static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
-{
- struct mbcs_soft *soft;
-
- dev->soft = NULL;
-
- soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
- if (soft == NULL)
- return -ENOMEM;
-
- soft->nasid = dev->cx_id.nasid;
- list_add(&soft->list, &soft_list);
- soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
- dev->soft = soft;
- soft->cxdev = dev;
-
- init_waitqueue_head(&soft->dmawrite_queue);
- init_waitqueue_head(&soft->dmaread_queue);
- init_waitqueue_head(&soft->algo_queue);
-
- mutex_init(&soft->dmawritelock);
- mutex_init(&soft->dmareadlock);
- mutex_init(&soft->algolock);
-
- mbcs_getdma_init(&soft->getdma);
- mbcs_putdma_init(&soft->putdma);
- mbcs_algo_init(&soft->algo);
-
- mbcs_hw_init(soft);
-
- /* Allocate interrupts */
- mbcs_intr_alloc(dev);
-
- device_create_file(&dev->dev, &dev_attr_algo);
-
- return 0;
-}
-
-static int mbcs_remove(struct cx_dev *dev)
-{
- if (dev->soft) {
- mbcs_intr_dealloc(dev);
- kfree(dev->soft);
- }
-
- device_remove_file(&dev->dev, &dev_attr_algo);
-
- return 0;
-}
-
-static const struct cx_device_id mbcs_id_table[] = {
- {
- .part_num = MBCS_PART_NUM,
- .mfg_num = MBCS_MFG_NUM,
- },
- {
- .part_num = MBCS_PART_NUM_ALG0,
- .mfg_num = MBCS_MFG_NUM,
- },
- {0, 0}
-};
-
-MODULE_DEVICE_TABLE(cx, mbcs_id_table);
-
-static struct cx_drv mbcs_driver = {
- .name = DEVICE_NAME,
- .id_table = mbcs_id_table,
- .probe = mbcs_probe,
- .remove = mbcs_remove,
-};
-
-static void __exit mbcs_exit(void)
-{
- unregister_chrdev(mbcs_major, DEVICE_NAME);
- cx_driver_unregister(&mbcs_driver);
-}
-
-static int __init mbcs_init(void)
-{
- int rv;
-
- if (!ia64_platform_is("sn2"))
- return -ENODEV;
-
- // Put driver into chrdevs[]. Get major number.
- rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
- if (rv < 0) {
- DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
- return rv;
- }
- mbcs_major = rv;
-
- return cx_driver_register(&mbcs_driver);
-}
-
-module_init(mbcs_init);
-module_exit(mbcs_exit);
-
-MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
-MODULE_DESCRIPTION("Driver for MOATB Core Services");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
deleted file mode 100644
index 1a36884c48b5..000000000000
--- a/drivers/char/mbcs.h
+++ /dev/null
@@ -1,553 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef __MBCS_H__
-#define __MBCS_H__
-
-/*
- * General macros
- */
-#define MB (1024*1024)
-#define MB2 (2*MB)
-#define MB4 (4*MB)
-#define MB6 (6*MB)
-
-/*
- * Offsets and masks
- */
-#define MBCS_CM_ID 0x0000 /* Identification */
-#define MBCS_CM_STATUS 0x0008 /* Status */
-#define MBCS_CM_ERROR_DETAIL1 0x0010 /* Error Detail1 */
-#define MBCS_CM_ERROR_DETAIL2 0x0018 /* Error Detail2 */
-#define MBCS_CM_CONTROL 0x0020 /* Control */
-#define MBCS_CM_REQ_TOUT 0x0028 /* Request Time-out */
-#define MBCS_CM_ERR_INT_DEST 0x0038 /* Error Interrupt Destination */
-#define MBCS_CM_TARG_FL 0x0050 /* Target Flush */
-#define MBCS_CM_ERR_STAT 0x0060 /* Error Status */
-#define MBCS_CM_CLR_ERR_STAT 0x0068 /* Clear Error Status */
-#define MBCS_CM_ERR_INT_EN 0x0070 /* Error Interrupt Enable */
-#define MBCS_RD_DMA_SYS_ADDR 0x0100 /* Read DMA System Address */
-#define MBCS_RD_DMA_LOC_ADDR 0x0108 /* Read DMA Local Address */
-#define MBCS_RD_DMA_CTRL 0x0110 /* Read DMA Control */
-#define MBCS_RD_DMA_AMO_DEST 0x0118 /* Read DMA AMO Destination */
-#define MBCS_RD_DMA_INT_DEST 0x0120 /* Read DMA Interrupt Destination */
-#define MBCS_RD_DMA_AUX_STAT 0x0130 /* Read DMA Auxiliary Status */
-#define MBCS_WR_DMA_SYS_ADDR 0x0200 /* Write DMA System Address */
-#define MBCS_WR_DMA_LOC_ADDR 0x0208 /* Write DMA Local Address */
-#define MBCS_WR_DMA_CTRL 0x0210 /* Write DMA Control */
-#define MBCS_WR_DMA_AMO_DEST 0x0218 /* Write DMA AMO Destination */
-#define MBCS_WR_DMA_INT_DEST 0x0220 /* Write DMA Interrupt Destination */
-#define MBCS_WR_DMA_AUX_STAT 0x0230 /* Write DMA Auxiliary Status */
-#define MBCS_ALG_AMO_DEST 0x0300 /* Algorithm AMO Destination */
-#define MBCS_ALG_INT_DEST 0x0308 /* Algorithm Interrupt Destination */
-#define MBCS_ALG_OFFSETS 0x0310
-#define MBCS_ALG_STEP 0x0318 /* Algorithm Step */
-
-#define MBCS_GSCR_START 0x0000000
-#define MBCS_DEBUG_START 0x0100000
-#define MBCS_RAM0_START 0x0200000
-#define MBCS_RAM1_START 0x0400000
-#define MBCS_RAM2_START 0x0600000
-
-#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL
-//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL
-
-#define MBCS_SRAM_SIZE (1024*1024)
-#define MBCS_CACHELINE_SIZE 128
-
-/*
- * MMR get's and put's
- */
-#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset))
-#define MBCS_MMR_SET(mmr_base, offset, value) { \
- uint64_t *mbcs_mmr_set_u64p, readback; \
- mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset); \
- *mbcs_mmr_set_u64p = value; \
- readback = *mbcs_mmr_set_u64p; \
-}
-#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset)
-#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0)
-
-/*
- * MBCS mmr structures
- */
-union cm_id {
- uint64_t cm_id_reg;
- struct {
- uint64_t always_one:1, // 0
- mfg_id:11, // 11:1
- part_num:16, // 27:12
- bitstream_rev:8, // 35:28
- :28; // 63:36
- };
-};
-
-union cm_status {
- uint64_t cm_status_reg;
- struct {
- uint64_t pending_reads:8, // 7:0
- pending_writes:8, // 15:8
- ice_rsp_credits:8, // 23:16
- ice_req_credits:8, // 31:24
- cm_req_credits:8, // 39:32
- :1, // 40
- rd_dma_in_progress:1, // 41
- rd_dma_done:1, // 42
- :1, // 43
- wr_dma_in_progress:1, // 44
- wr_dma_done:1, // 45
- alg_waiting:1, // 46
- alg_pipe_running:1, // 47
- alg_done:1, // 48
- :3, // 51:49
- pending_int_reqs:8, // 59:52
- :3, // 62:60
- alg_half_speed_sel:1; // 63
- };
-};
-
-union cm_error_detail1 {
- uint64_t cm_error_detail1_reg;
- struct {
- uint64_t packet_type:4, // 3:0
- source_id:2, // 5:4
- data_size:2, // 7:6
- tnum:8, // 15:8
- byte_enable:8, // 23:16
- gfx_cred:8, // 31:24
- read_type:2, // 33:32
- pio_or_memory:1, // 34
- head_cw_error:1, // 35
- :12, // 47:36
- head_error_bit:1, // 48
- data_error_bit:1, // 49
- :13, // 62:50
- valid:1; // 63
- };
-};
-
-union cm_error_detail2 {
- uint64_t cm_error_detail2_reg;
- struct {
- uint64_t address:56, // 55:0
- :8; // 63:56
- };
-};
-
-union cm_control {
- uint64_t cm_control_reg;
- struct {
- uint64_t cm_id:2, // 1:0
- :2, // 3:2
- max_trans:5, // 8:4
- :3, // 11:9
- address_mode:1, // 12
- :7, // 19:13
- credit_limit:8, // 27:20
- :5, // 32:28
- rearm_stat_regs:1, // 33
- prescalar_byp:1, // 34
- force_gap_war:1, // 35
- rd_dma_go:1, // 36
- wr_dma_go:1, // 37
- alg_go:1, // 38
- rd_dma_clr:1, // 39
- wr_dma_clr:1, // 40
- alg_clr:1, // 41
- :2, // 43:42
- alg_wait_step:1, // 44
- alg_done_amo_en:1, // 45
- alg_done_int_en:1, // 46
- :1, // 47
- alg_sram0_locked:1, // 48
- alg_sram1_locked:1, // 49
- alg_sram2_locked:1, // 50
- alg_done_clr:1, // 51
- :12; // 63:52
- };
-};
-
-union cm_req_timeout {
- uint64_t cm_req_timeout_reg;
- struct {
- uint64_t time_out:24, // 23:0
- :40; // 63:24
- };
-};
-
-union intr_dest {
- uint64_t intr_dest_reg;
- struct {
- uint64_t address:56, // 55:0
- int_vector:8; // 63:56
- };
-};
-
-union cm_error_status {
- uint64_t cm_error_status_reg;
- struct {
- uint64_t ecc_sbe:1, // 0
- ecc_mbe:1, // 1
- unsupported_req:1, // 2
- unexpected_rsp:1, // 3
- bad_length:1, // 4
- bad_datavalid:1, // 5
- buffer_overflow:1, // 6
- request_timeout:1, // 7
- :8, // 15:8
- head_inv_data_size:1, // 16
- rsp_pactype_inv:1, // 17
- head_sb_err:1, // 18
- missing_head:1, // 19
- head_inv_rd_type:1, // 20
- head_cmd_err_bit:1, // 21
- req_addr_align_inv:1, // 22
- pio_req_addr_inv:1, // 23
- req_range_dsize_inv:1, // 24
- early_term:1, // 25
- early_tail:1, // 26
- missing_tail:1, // 27
- data_flit_sb_err:1, // 28
- cm2hcm_req_cred_of:1, // 29
- cm2hcm_rsp_cred_of:1, // 30
- rx_bad_didn:1, // 31
- rd_dma_err_rsp:1, // 32
- rd_dma_tnum_tout:1, // 33
- rd_dma_multi_tnum_tou:1, // 34
- wr_dma_err_rsp:1, // 35
- wr_dma_tnum_tout:1, // 36
- wr_dma_multi_tnum_tou:1, // 37
- alg_data_overflow:1, // 38
- alg_data_underflow:1, // 39
- ram0_access_conflict:1, // 40
- ram1_access_conflict:1, // 41
- ram2_access_conflict:1, // 42
- ram0_perr:1, // 43
- ram1_perr:1, // 44
- ram2_perr:1, // 45
- int_gen_rsp_err:1, // 46
- int_gen_tnum_tout:1, // 47
- rd_dma_prog_err:1, // 48
- wr_dma_prog_err:1, // 49
- :14; // 63:50
- };
-};
-
-union cm_clr_error_status {
- uint64_t cm_clr_error_status_reg;
- struct {
- uint64_t clr_ecc_sbe:1, // 0
- clr_ecc_mbe:1, // 1
- clr_unsupported_req:1, // 2
- clr_unexpected_rsp:1, // 3
- clr_bad_length:1, // 4
- clr_bad_datavalid:1, // 5
- clr_buffer_overflow:1, // 6
- clr_request_timeout:1, // 7
- :8, // 15:8
- clr_head_inv_data_siz:1, // 16
- clr_rsp_pactype_inv:1, // 17
- clr_head_sb_err:1, // 18
- clr_missing_head:1, // 19
- clr_head_inv_rd_type:1, // 20
- clr_head_cmd_err_bit:1, // 21
- clr_req_addr_align_in:1, // 22
- clr_pio_req_addr_inv:1, // 23
- clr_req_range_dsize_i:1, // 24
- clr_early_term:1, // 25
- clr_early_tail:1, // 26
- clr_missing_tail:1, // 27
- clr_data_flit_sb_err:1, // 28
- clr_cm2hcm_req_cred_o:1, // 29
- clr_cm2hcm_rsp_cred_o:1, // 30
- clr_rx_bad_didn:1, // 31
- clr_rd_dma_err_rsp:1, // 32
- clr_rd_dma_tnum_tout:1, // 33
- clr_rd_dma_multi_tnum:1, // 34
- clr_wr_dma_err_rsp:1, // 35
- clr_wr_dma_tnum_tout:1, // 36
- clr_wr_dma_multi_tnum:1, // 37
- clr_alg_data_overflow:1, // 38
- clr_alg_data_underflo:1, // 39
- clr_ram0_access_confl:1, // 40
- clr_ram1_access_confl:1, // 41
- clr_ram2_access_confl:1, // 42
- clr_ram0_perr:1, // 43
- clr_ram1_perr:1, // 44
- clr_ram2_perr:1, // 45
- clr_int_gen_rsp_err:1, // 46
- clr_int_gen_tnum_tout:1, // 47
- clr_rd_dma_prog_err:1, // 48
- clr_wr_dma_prog_err:1, // 49
- :14; // 63:50
- };
-};
-
-union cm_error_intr_enable {
- uint64_t cm_error_intr_enable_reg;
- struct {
- uint64_t int_en_ecc_sbe:1, // 0
- int_en_ecc_mbe:1, // 1
- int_en_unsupported_re:1, // 2
- int_en_unexpected_rsp:1, // 3
- int_en_bad_length:1, // 4
- int_en_bad_datavalid:1, // 5
- int_en_buffer_overflo:1, // 6
- int_en_request_timeou:1, // 7
- :8, // 15:8
- int_en_head_inv_data_:1, // 16
- int_en_rsp_pactype_in:1, // 17
- int_en_head_sb_err:1, // 18
- int_en_missing_head:1, // 19
- int_en_head_inv_rd_ty:1, // 20
- int_en_head_cmd_err_b:1, // 21
- int_en_req_addr_align:1, // 22
- int_en_pio_req_addr_i:1, // 23
- int_en_req_range_dsiz:1, // 24
- int_en_early_term:1, // 25
- int_en_early_tail:1, // 26
- int_en_missing_tail:1, // 27
- int_en_data_flit_sb_e:1, // 28
- int_en_cm2hcm_req_cre:1, // 29
- int_en_cm2hcm_rsp_cre:1, // 30
- int_en_rx_bad_didn:1, // 31
- int_en_rd_dma_err_rsp:1, // 32
- int_en_rd_dma_tnum_to:1, // 33
- int_en_rd_dma_multi_t:1, // 34
- int_en_wr_dma_err_rsp:1, // 35
- int_en_wr_dma_tnum_to:1, // 36
- int_en_wr_dma_multi_t:1, // 37
- int_en_alg_data_overf:1, // 38
- int_en_alg_data_under:1, // 39
- int_en_ram0_access_co:1, // 40
- int_en_ram1_access_co:1, // 41
- int_en_ram2_access_co:1, // 42
- int_en_ram0_perr:1, // 43
- int_en_ram1_perr:1, // 44
- int_en_ram2_perr:1, // 45
- int_en_int_gen_rsp_er:1, // 46
- int_en_int_gen_tnum_t:1, // 47
- int_en_rd_dma_prog_er:1, // 48
- int_en_wr_dma_prog_er:1, // 49
- :14; // 63:50
- };
-};
-
-struct cm_mmr {
- union cm_id id;
- union cm_status status;
- union cm_error_detail1 err_detail1;
- union cm_error_detail2 err_detail2;
- union cm_control control;
- union cm_req_timeout req_timeout;
- uint64_t reserved1[1];
- union intr_dest int_dest;
- uint64_t reserved2[2];
- uint64_t targ_flush;
- uint64_t reserved3[1];
- union cm_error_status err_status;
- union cm_clr_error_status clr_err_status;
- union cm_error_intr_enable int_enable;
-};
-
-union dma_hostaddr {
- uint64_t dma_hostaddr_reg;
- struct {
- uint64_t dma_sys_addr:56, // 55:0
- :8; // 63:56
- };
-};
-
-union dma_localaddr {
- uint64_t dma_localaddr_reg;
- struct {
- uint64_t dma_ram_addr:21, // 20:0
- dma_ram_sel:2, // 22:21
- :41; // 63:23
- };
-};
-
-union dma_control {
- uint64_t dma_control_reg;
- struct {
- uint64_t dma_op_length:16, // 15:0
- :18, // 33:16
- done_amo_en:1, // 34
- done_int_en:1, // 35
- :1, // 36
- pio_mem_n:1, // 37
- :26; // 63:38
- };
-};
-
-union dma_amo_dest {
- uint64_t dma_amo_dest_reg;
- struct {
- uint64_t dma_amo_sys_addr:56, // 55:0
- dma_amo_mod_type:3, // 58:56
- :5; // 63:59
- };
-};
-
-union rdma_aux_status {
- uint64_t rdma_aux_status_reg;
- struct {
- uint64_t op_num_pacs_left:17, // 16:0
- :5, // 21:17
- lrsp_buff_empty:1, // 22
- :17, // 39:23
- pending_reqs_left:6, // 45:40
- :18; // 63:46
- };
-};
-
-struct rdma_mmr {
- union dma_hostaddr host_addr;
- union dma_localaddr local_addr;
- union dma_control control;
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union rdma_aux_status aux_status;
-};
-
-union wdma_aux_status {
- uint64_t wdma_aux_status_reg;
- struct {
- uint64_t op_num_pacs_left:17, // 16:0
- :4, // 20:17
- lreq_buff_empty:1, // 21
- :18, // 39:22
- pending_reqs_left:6, // 45:40
- :18; // 63:46
- };
-};
-
-struct wdma_mmr {
- union dma_hostaddr host_addr;
- union dma_localaddr local_addr;
- union dma_control control;
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union wdma_aux_status aux_status;
-};
-
-union algo_step {
- uint64_t algo_step_reg;
- struct {
- uint64_t alg_step_cnt:16, // 15:0
- :48; // 63:16
- };
-};
-
-struct algo_mmr {
- union dma_amo_dest amo_dest;
- union intr_dest intr_dest;
- union {
- uint64_t algo_offset_reg;
- struct {
- uint64_t sram0_offset:7, // 6:0
- reserved0:1, // 7
- sram1_offset:7, // 14:8
- reserved1:1, // 15
- sram2_offset:7, // 22:16
- reserved2:14; // 63:23
- };
- } sram_offset;
- union algo_step step;
-};
-
-struct mbcs_mmr {
- struct cm_mmr cm;
- uint64_t reserved1[17];
- struct rdma_mmr rdDma;
- uint64_t reserved2[25];
- struct wdma_mmr wrDma;
- uint64_t reserved3[25];
- struct algo_mmr algo;
- uint64_t reserved4[156];
-};
-
-/*
- * defines
- */
-#define DEVICE_NAME "mbcs"
-#define MBCS_PART_NUM 0xfff0
-#define MBCS_PART_NUM_ALG0 0xf001
-#define MBCS_MFG_NUM 0x1
-
-struct algoblock {
- uint64_t amoHostDest;
- uint64_t amoModType;
- uint64_t intrHostDest;
- uint64_t intrVector;
- uint64_t algoStepCount;
-};
-
-struct getdma {
- uint64_t hostAddr;
- uint64_t localAddr;
- uint64_t bytes;
- uint64_t DoneAmoEnable;
- uint64_t DoneIntEnable;
- uint64_t peerIO;
- uint64_t amoHostDest;
- uint64_t amoModType;
- uint64_t intrHostDest;
- uint64_t intrVector;
-};
-
-struct putdma {
- uint64_t hostAddr;
- uint64_t localAddr;
- uint64_t bytes;
- uint64_t DoneAmoEnable;
- uint64_t DoneIntEnable;
- uint64_t peerIO;
- uint64_t amoHostDest;
- uint64_t amoModType;
- uint64_t intrHostDest;
- uint64_t intrVector;
-};
-
-struct mbcs_soft {
- struct list_head list;
- struct cx_dev *cxdev;
- int major;
- int nasid;
- void *mmr_base;
- wait_queue_head_t dmawrite_queue;
- wait_queue_head_t dmaread_queue;
- wait_queue_head_t algo_queue;
- struct sn_irq_info *get_sn_irq;
- struct sn_irq_info *put_sn_irq;
- struct sn_irq_info *algo_sn_irq;
- struct getdma getdma;
- struct putdma putdma;
- struct algoblock algo;
- uint64_t gscr_addr; // pio addr
- uint64_t ram0_addr; // pio addr
- uint64_t ram1_addr; // pio addr
- uint64_t ram2_addr; // pio addr
- uint64_t debug_addr; // pio addr
- atomic_t dmawrite_done;
- atomic_t dmaread_done;
- atomic_t algo_done;
- struct mutex dmawritelock;
- struct mutex dmareadlock;
- struct mutex algolock;
-};
-
-static int mbcs_open(struct inode *ip, struct file *fp);
-static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
- loff_t * off);
-static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
- loff_t * off);
-static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
-static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
-
-#endif // __MBCS_H__
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index b08dc50f9f26..9eb564c002f6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
+static inline bool should_stop_iteration(void)
+{
+ if (need_resched())
+ cond_resched();
+ return fatal_signal_pending(current);
+}
+
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
p += sz;
count -= sz;
read += sz;
+ if (should_stop_iteration())
+ break;
}
kfree(bounce);
@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
read += sz;
low_count -= sz;
count -= sz;
+ if (should_stop_iteration()) {
+ count = 0;
+ break;
+ }
}
}
@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
buf += sz;
read += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
buf += sz;
virtr += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index e75c9df7c2d8..a9d9f074fbd6 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -9,11 +9,8 @@
*
* This driver exports the SN special memory (mspec) facility to user
* processes.
- * There are three types of memory made available thru this driver:
- * fetchops, uncached and cached.
- *
- * Fetchops are atomic memory operations that are implemented in the
- * memory controller on SGI SN hardware.
+ * There are two types of memory made available thru this driver:
+ * uncached and cached.
*
* Uncached are used for memory write combining feature of the ia64
* cpu.
@@ -46,16 +43,8 @@
#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/mspec.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/io.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/shubio.h>
-#define FETCHOP_ID "SGI Fetchop,"
#define CACHED_ID "Cached,"
#define UNCACHED_ID "Uncached"
#define REVISION "4.0"
@@ -65,17 +54,10 @@
* Page types allocated by the device.
*/
enum mspec_page_type {
- MSPEC_FETCHOP = 1,
- MSPEC_CACHED,
+ MSPEC_CACHED = 2,
MSPEC_UNCACHED
};
-#ifdef CONFIG_SGI_SN
-static int is_sn2;
-#else
-#define is_sn2 0
-#endif
-
/*
* One of these structures is allocated when an mspec region is mmaped. The
* structure is pointed to by the vma->vm_private_data field in the vma struct.
@@ -96,39 +78,6 @@ struct vma_data {
unsigned long maddr[0]; /* Array of MSPEC addresses. */
};
-/* used on shub2 to clear FOP cache in the HUB */
-static unsigned long scratch_page[MAX_NUMNODES];
-#define SH2_AMO_CACHE_ENTRIES 4
-
-static inline int
-mspec_zero_block(unsigned long addr, int len)
-{
- int status;
-
- if (is_sn2) {
- if (is_shub2()) {
- int nid;
- void *p;
- int i;
-
- nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
- p = (void *)TO_AMO(scratch_page[nid]);
-
- for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
- FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
- p += FETCHOP_VAR_SIZE;
- }
- }
-
- status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
- BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
- } else {
- memset((char *) addr, 0, len);
- status = 0;
- }
- return status;
-}
-
/*
* mspec_open
*
@@ -173,11 +122,8 @@ mspec_close(struct vm_area_struct *vma)
*/
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
- if (!mspec_zero_block(my_page, PAGE_SIZE))
- uncached_free_page(my_page, 1);
- else
- printk(KERN_WARNING "mspec_close(): "
- "failed to zero page %ld\n", my_page);
+ memset((char *)my_page, 0, PAGE_SIZE);
+ uncached_free_page(my_page, 1);
}
kvfree(vdata);
@@ -213,11 +159,7 @@ mspec_fault(struct vm_fault *vmf)
spin_unlock(&vdata->lock);
}
- if (vdata->type == MSPEC_FETCHOP)
- paddr = TO_AMO(maddr);
- else
- paddr = maddr & ~__IA64_UNCACHED_OFFSET;
-
+ paddr = maddr & ~__IA64_UNCACHED_OFFSET;
pfn = paddr >> PAGE_SHIFT;
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
@@ -269,7 +211,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
vma->vm_private_data = vdata;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
+ if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
@@ -277,12 +219,6 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
}
static int
-fetchop_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return mspec_mmap(file, vma, MSPEC_FETCHOP);
-}
-
-static int
cached_mmap(struct file *file, struct vm_area_struct *vma)
{
return mspec_mmap(file, vma, MSPEC_CACHED);
@@ -294,18 +230,6 @@ uncached_mmap(struct file *file, struct vm_area_struct *vma)
return mspec_mmap(file, vma, MSPEC_UNCACHED);
}
-static const struct file_operations fetchop_fops = {
- .owner = THIS_MODULE,
- .mmap = fetchop_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice fetchop_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sgi_fetchop",
- .fops = &fetchop_fops
-};
-
static const struct file_operations cached_fops = {
.owner = THIS_MODULE,
.mmap = cached_mmap,
@@ -339,89 +263,32 @@ static int __init
mspec_init(void)
{
int ret;
- int nid;
-
- /*
- * The fetchop device only works on SN2 hardware, uncached and cached
- * memory drivers should both be valid on all ia64 hardware
- */
-#ifdef CONFIG_SGI_SN
- if (ia64_platform_is("sn2")) {
- is_sn2 = 1;
- if (is_shub2()) {
- ret = -ENOMEM;
- for_each_node_state(nid, N_ONLINE) {
- int actual_nid;
- int nasid;
- unsigned long phys;
-
- scratch_page[nid] = uncached_alloc_page(nid, 1);
- if (scratch_page[nid] == 0)
- goto free_scratch_pages;
- phys = __pa(scratch_page[nid]);
- nasid = get_node_number(phys);
- actual_nid = nasid_to_cnodeid(nasid);
- if (actual_nid != nid)
- goto free_scratch_pages;
- }
- }
- ret = misc_register(&fetchop_miscdev);
- if (ret) {
- printk(KERN_ERR
- "%s: failed to register device %i\n",
- FETCHOP_ID, ret);
- goto free_scratch_pages;
- }
- }
-#endif
ret = misc_register(&cached_miscdev);
if (ret) {
printk(KERN_ERR "%s: failed to register device %i\n",
CACHED_ID, ret);
- if (is_sn2)
- misc_deregister(&fetchop_miscdev);
- goto free_scratch_pages;
+ return ret;
}
ret = misc_register(&uncached_miscdev);
if (ret) {
printk(KERN_ERR "%s: failed to register device %i\n",
UNCACHED_ID, ret);
misc_deregister(&cached_miscdev);
- if (is_sn2)
- misc_deregister(&fetchop_miscdev);
- goto free_scratch_pages;
+ return ret;
}
- printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
- MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
- CACHED_ID, UNCACHED_ID);
+ printk(KERN_INFO "%s %s initialized devices: %s %s\n",
+ MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID);
return 0;
-
- free_scratch_pages:
- for_each_node(nid) {
- if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid], 1);
- }
- return ret;
}
static void __exit
mspec_exit(void)
{
- int nid;
-
misc_deregister(&uncached_miscdev);
misc_deregister(&cached_miscdev);
- if (is_sn2) {
- misc_deregister(&fetchop_miscdev);
-
- for_each_node(nid) {
- if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid], 1);
- }
- }
}
module_init(mspec_init);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f0a8adca1eee..c86f18aa8985 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -737,7 +737,7 @@ static int pp_release(struct inode *inode, struct file *file)
"negotiated back to compatibility mode because user-space forgot\n");
}
- if (pp->flags & PP_CLAIMED) {
+ if ((pp->flags & PP_CLAIMED) && pp->pdev) {
struct ieee1284_info *info;
info = &pp->pdev->port->ieee1284;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5b799aa973a3..d3beed084c0a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2447,3 +2447,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
credit_entropy_bits(poolp, entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/* Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ */
+void add_bootloader_randomness(const void *buf, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+ add_hwgenerator_randomness(buf, size, size * 8);
+ else
+ add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
deleted file mode 100644
index 5918ea7499bb..000000000000
--- a/drivers/char/snsc.c
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * SN Platform system controller communication support
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * System controller communication driver
- *
- * This driver allows a user process to communicate with the system
- * controller (a.k.a. "IRouter") network in an SGI SN system.
- */
-
-#include <linux/interrupt.h>
-#include <linux/sched/signal.h>
-#include <linux/device.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/module.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/nodepda.h>
-#include "snsc.h"
-
-#define SYSCTL_BASENAME "snsc"
-
-#define SCDRV_BUFSZ 2048
-#define SCDRV_TIMEOUT 1000
-
-static DEFINE_MUTEX(scdrv_mutex);
-static irqreturn_t
-scdrv_interrupt(int irq, void *subch_data)
-{
- struct subch_data_s *sd = subch_data;
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&sd->sd_rlock, flags);
- spin_lock(&sd->sd_wlock);
- status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
-
- if (status > 0) {
- if (status & SAL_IROUTER_INTR_RECV) {
- wake_up(&sd->sd_rq);
- }
- if (status & SAL_IROUTER_INTR_XMIT) {
- ia64_sn_irtr_intr_disable
- (sd->sd_nasid, sd->sd_subch,
- SAL_IROUTER_INTR_XMIT);
- wake_up(&sd->sd_wq);
- }
- }
- spin_unlock(&sd->sd_wlock);
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- * scdrv_open
- *
- * Reserve a subchannel for system controller communication.
- */
-
-static int
-scdrv_open(struct inode *inode, struct file *file)
-{
- struct sysctl_data_s *scd;
- struct subch_data_s *sd;
- int rv;
-
- /* look up device info for this device file */
- scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
-
- /* allocate memory for subchannel data */
- sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
- if (sd == NULL) {
- printk("%s: couldn't allocate subchannel data\n",
- __func__);
- return -ENOMEM;
- }
-
- /* initialize subch_data_s fields */
- sd->sd_nasid = scd->scd_nasid;
- sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
-
- if (sd->sd_subch < 0) {
- kfree(sd);
- printk("%s: couldn't allocate subchannel\n", __func__);
- return -EBUSY;
- }
-
- spin_lock_init(&sd->sd_rlock);
- spin_lock_init(&sd->sd_wlock);
- init_waitqueue_head(&sd->sd_rq);
- init_waitqueue_head(&sd->sd_wq);
- sema_init(&sd->sd_rbs, 1);
- sema_init(&sd->sd_wbs, 1);
-
- file->private_data = sd;
-
- /* hook this subchannel up to the system controller interrupt */
- mutex_lock(&scdrv_mutex);
- rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt,
- IRQF_SHARED, SYSCTL_BASENAME, sd);
- if (rv) {
- ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
- kfree(sd);
- printk("%s: irq request failed (%d)\n", __func__, rv);
- mutex_unlock(&scdrv_mutex);
- return -EBUSY;
- }
- mutex_unlock(&scdrv_mutex);
- return 0;
-}
-
-/*
- * scdrv_release
- *
- * Release a previously-reserved subchannel.
- */
-
-static int
-scdrv_release(struct inode *inode, struct file *file)
-{
- struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
- int rv;
-
- /* free the interrupt */
- free_irq(SGI_UART_VECTOR, sd);
-
- /* ask SAL to close the subchannel */
- rv = ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
-
- kfree(sd);
- return rv;
-}
-
-/*
- * scdrv_read
- *
- * Called to read bytes from the open IRouter pipe.
- *
- */
-
-static inline int
-read_status_check(struct subch_data_s *sd, int *len)
-{
- return ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch, sd->sd_rb, len);
-}
-
-static ssize_t
-scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
-{
- int status;
- int len;
- unsigned long flags;
- struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
-
- /* try to get control of the read buffer */
- if (down_trylock(&sd->sd_rbs)) {
- /* somebody else has it now;
- * if we're non-blocking, then exit...
- */
- if (file->f_flags & O_NONBLOCK) {
- return -EAGAIN;
- }
- /* ...or if we want to block, then do so here */
- if (down_interruptible(&sd->sd_rbs)) {
- /* something went wrong with wait */
- return -ERESTARTSYS;
- }
- }
-
- /* anything to read? */
- len = CHUNKSIZE;
- spin_lock_irqsave(&sd->sd_rlock, flags);
- status = read_status_check(sd, &len);
-
- /* if not, and we're blocking I/O, loop */
- while (status < 0) {
- DECLARE_WAITQUEUE(wait, current);
-
- if (file->f_flags & O_NONBLOCK) {
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
- up(&sd->sd_rbs);
- return -EAGAIN;
- }
-
- len = CHUNKSIZE;
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&sd->sd_rq, &wait);
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_rq, &wait);
- if (signal_pending(current)) {
- /* wait was interrupted */
- up(&sd->sd_rbs);
- return -ERESTARTSYS;
- }
-
- spin_lock_irqsave(&sd->sd_rlock, flags);
- status = read_status_check(sd, &len);
- }
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-
- if (len > 0) {
- /* we read something in the last read_status_check(); copy
- * it out to user space
- */
- if (count < len) {
- pr_debug("%s: only accepting %d of %d bytes\n",
- __func__, (int) count, len);
- }
- len = min((int) count, len);
- if (copy_to_user(buf, sd->sd_rb, len))
- len = -EFAULT;
- }
-
- /* release the read buffer and wake anyone who might be
- * waiting for it
- */
- up(&sd->sd_rbs);
-
- /* return the number of characters read in */
- return len;
-}
-
-/*
- * scdrv_write
- *
- * Writes a chunk of an IRouter packet (or other system controller data)
- * to the system controller.
- *
- */
-static inline int
-write_status_check(struct subch_data_s *sd, int count)
-{
- return ia64_sn_irtr_send(sd->sd_nasid, sd->sd_subch, sd->sd_wb, count);
-}
-
-static ssize_t
-scdrv_write(struct file *file, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- unsigned long flags;
- int status;
- struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
-
- /* try to get control of the write buffer */
- if (down_trylock(&sd->sd_wbs)) {
- /* somebody else has it now;
- * if we're non-blocking, then exit...
- */
- if (file->f_flags & O_NONBLOCK) {
- return -EAGAIN;
- }
- /* ...or if we want to block, then do so here */
- if (down_interruptible(&sd->sd_wbs)) {
- /* something went wrong with wait */
- return -ERESTARTSYS;
- }
- }
-
- count = min((int) count, CHUNKSIZE);
- if (copy_from_user(sd->sd_wb, buf, count)) {
- up(&sd->sd_wbs);
- return -EFAULT;
- }
-
- /* try to send the buffer */
- spin_lock_irqsave(&sd->sd_wlock, flags);
- status = write_status_check(sd, count);
-
- /* if we failed, and we want to block, then loop */
- while (status <= 0) {
- DECLARE_WAITQUEUE(wait, current);
-
- if (file->f_flags & O_NONBLOCK) {
- spin_unlock_irqrestore(&sd->sd_wlock, flags);
- up(&sd->sd_wbs);
- return -EAGAIN;
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&sd->sd_wq, &wait);
- spin_unlock_irqrestore(&sd->sd_wlock, flags);
-
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_wq, &wait);
- if (signal_pending(current)) {
- /* wait was interrupted */
- up(&sd->sd_wbs);
- return -ERESTARTSYS;
- }
-
- spin_lock_irqsave(&sd->sd_wlock, flags);
- status = write_status_check(sd, count);
- }
- spin_unlock_irqrestore(&sd->sd_wlock, flags);
-
- /* release the write buffer and wake anyone who's waiting for it */
- up(&sd->sd_wbs);
-
- /* return the number of characters accepted (should be the complete
- * "chunk" as requested)
- */
- if ((status >= 0) && (status < count)) {
- pr_debug("Didn't accept the full chunk; %d of %d\n",
- status, (int) count);
- }
- return status;
-}
-
-static __poll_t
-scdrv_poll(struct file *file, struct poll_table_struct *wait)
-{
- __poll_t mask = 0;
- int status = 0;
- struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
- unsigned long flags;
-
- poll_wait(file, &sd->sd_rq, wait);
- poll_wait(file, &sd->sd_wq, wait);
-
- spin_lock_irqsave(&sd->sd_rlock, flags);
- spin_lock(&sd->sd_wlock);
- status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
- spin_unlock(&sd->sd_wlock);
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-
- if (status > 0) {
- if (status & SAL_IROUTER_INTR_RECV) {
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- if (status & SAL_IROUTER_INTR_XMIT) {
- mask |= EPOLLOUT | EPOLLWRNORM;
- }
- }
-
- return mask;
-}
-
-static const struct file_operations scdrv_fops = {
- .owner = THIS_MODULE,
- .read = scdrv_read,
- .write = scdrv_write,
- .poll = scdrv_poll,
- .open = scdrv_open,
- .release = scdrv_release,
- .llseek = noop_llseek,
-};
-
-static struct class *snsc_class;
-
-/*
- * scdrv_init
- *
- * Called at boot time to initialize the system controller communication
- * facility.
- */
-int __init
-scdrv_init(void)
-{
- geoid_t geoid;
- cnodeid_t cnode;
- char devname[32];
- char *devnamep;
- struct sysctl_data_s *scd;
- void *salbuf;
- dev_t first_dev, dev;
- nasid_t event_nasid;
-
- if (!ia64_platform_is("sn2"))
- return -ENODEV;
-
- event_nasid = ia64_sn_get_console_nasid();
-
- snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
- if (IS_ERR(snsc_class)) {
- printk("%s: failed to allocate class\n", __func__);
- return PTR_ERR(snsc_class);
- }
-
- if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
- SYSCTL_BASENAME) < 0) {
- printk("%s: failed to register SN system controller device\n",
- __func__);
- return -ENODEV;
- }
-
- for (cnode = 0; cnode < num_cnodes; cnode++) {
- geoid = cnodeid_get_geoid(cnode);
- devnamep = devname;
- format_module_id(devnamep, geo_module(geoid),
- MODULE_FORMAT_BRIEF);
- devnamep = devname + strlen(devname);
- sprintf(devnamep, "^%d#%d", geo_slot(geoid),
- geo_slab(geoid));
-
- /* allocate sysctl device data */
- scd = kzalloc(sizeof (struct sysctl_data_s),
- GFP_KERNEL);
- if (!scd) {
- printk("%s: failed to allocate device info"
- "for %s/%s\n", __func__,
- SYSCTL_BASENAME, devname);
- continue;
- }
-
- /* initialize sysctl device data fields */
- scd->scd_nasid = cnodeid_to_nasid(cnode);
- if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
- printk("%s: failed to allocate driver buffer"
- "(%s%s)\n", __func__,
- SYSCTL_BASENAME, devname);
- kfree(scd);
- continue;
- }
-
- if (ia64_sn_irtr_init(scd->scd_nasid, salbuf,
- SCDRV_BUFSZ) < 0) {
- printk
- ("%s: failed to initialize SAL for"
- " system controller communication"
- " (%s/%s): outdated PROM?\n",
- __func__, SYSCTL_BASENAME, devname);
- kfree(scd);
- kfree(salbuf);
- continue;
- }
-
- dev = first_dev + cnode;
- cdev_init(&scd->scd_cdev, &scdrv_fops);
- if (cdev_add(&scd->scd_cdev, dev, 1)) {
- printk("%s: failed to register system"
- " controller device (%s%s)\n",
- __func__, SYSCTL_BASENAME, devname);
- kfree(scd);
- kfree(salbuf);
- continue;
- }
-
- device_create(snsc_class, NULL, dev, NULL,
- "%s", devname);
-
- ia64_sn_irtr_intr_enable(scd->scd_nasid,
- 0 /*ignored */ ,
- SAL_IROUTER_INTR_RECV);
-
- /* on the console nasid, prepare to receive
- * system controller environmental events
- */
- if(scd->scd_nasid == event_nasid) {
- scdrv_event_init(scd);
- }
- }
- return 0;
-}
-device_initcall(scdrv_init);
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
deleted file mode 100644
index e8c52c882b21..000000000000
--- a/drivers/char/snsc.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * SN Platform system controller communication support
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * This file contains macros and data types for communication with the
- * system controllers in SGI SN systems.
- */
-
-#ifndef _SN_SYSCTL_H_
-#define _SN_SYSCTL_H_
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/semaphore.h>
-#include <asm/sn/types.h>
-
-#define CHUNKSIZE 127
-
-/* This structure is used to track an open subchannel. */
-struct subch_data_s {
- nasid_t sd_nasid; /* node on which the subchannel was opened */
- int sd_subch; /* subchannel number */
- spinlock_t sd_rlock; /* monitor lock for rsv */
- spinlock_t sd_wlock; /* monitor lock for wsv */
- wait_queue_head_t sd_rq; /* wait queue for readers */
- wait_queue_head_t sd_wq; /* wait queue for writers */
- struct semaphore sd_rbs; /* semaphore for read buffer */
- struct semaphore sd_wbs; /* semaphore for write buffer */
-
- char sd_rb[CHUNKSIZE]; /* read buffer */
- char sd_wb[CHUNKSIZE]; /* write buffer */
-};
-
-struct sysctl_data_s {
- struct cdev scd_cdev; /* Character device info */
- nasid_t scd_nasid; /* Node on which subchannels are opened. */
-};
-
-
-/* argument types */
-#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */
-#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */
-#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low
- * 7 bits will contain the data
- * length. */
-#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f
-
-
-/* system controller event codes */
-#define EV_CLASS_MASK 0xf000ul
-#define EV_SEVERITY_MASK 0x0f00ul
-#define EV_COMPONENT_MASK 0x00fful
-
-#define EV_CLASS_POWER 0x1000ul
-#define EV_CLASS_FAN 0x2000ul
-#define EV_CLASS_TEMP 0x3000ul
-#define EV_CLASS_ENV 0x4000ul
-#define EV_CLASS_TEST_FAULT 0x5000ul
-#define EV_CLASS_TEST_WARNING 0x6000ul
-#define EV_CLASS_PWRD_NOTIFY 0x8000ul
-
-/* ENV class codes */
-#define ENV_PWRDN_PEND 0x4101ul
-
-#define EV_SEVERITY_POWER_STABLE 0x0000ul
-#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul
-#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul
-#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul
-#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul
-
-#define EV_SEVERITY_FAN_STABLE 0x0000ul
-#define EV_SEVERITY_FAN_WARNING 0x0100ul
-#define EV_SEVERITY_FAN_FAULT 0x0200ul
-
-#define EV_SEVERITY_TEMP_STABLE 0x0000ul
-#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul
-#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul
-#define EV_SEVERITY_TEMP_FAULT 0x0300ul
-
-void scdrv_event_init(struct sysctl_data_s *);
-
-#endif /* _SN_SYSCTL_H_ */
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
deleted file mode 100644
index e452673dff66..000000000000
--- a/drivers/char/snsc_event.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * SN Platform system controller communication support
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * System controller event handler
- *
- * These routines deal with environmental events arriving from the
- * system controllers.
- */
-
-#include <linux/interrupt.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/unaligned.h>
-#include "snsc.h"
-
-static struct subch_data_s *event_sd;
-
-void scdrv_event(unsigned long);
-DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
-
-/*
- * scdrv_event_interrupt
- *
- * Pull incoming environmental events off the physical link to the
- * system controller and put them in a temporary holding area in SAL.
- * Schedule scdrv_event() to move them along to their ultimate
- * destination.
- */
-static irqreturn_t
-scdrv_event_interrupt(int irq, void *subch_data)
-{
- struct subch_data_s *sd = subch_data;
- unsigned long flags;
- int status;
-
- spin_lock_irqsave(&sd->sd_rlock, flags);
- status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
-
- if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
- tasklet_schedule(&sn_sysctl_event);
- }
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- * scdrv_parse_event
- *
- * Break an event (as read from SAL) into useful pieces so we can decide
- * what to do with it.
- */
-static int
-scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
-{
- char *desc_end;
-
- /* record event source address */
- *src = get_unaligned_be32(event);
- event += 4; /* move on to event code */
-
- /* record the system controller's event code */
- *code = get_unaligned_be32(event);
- event += 4; /* move on to event arguments */
-
- /* how many arguments are in the packet? */
- if (*event++ != 2) {
- /* if not 2, give up */
- return -1;
- }
-
- /* parse out the ESP code */
- if (*event++ != IR_ARG_INT) {
- /* not an integer argument, so give up */
- return -1;
- }
- *esp_code = get_unaligned_be32(event);
- event += 4;
-
- /* parse out the event description */
- if (*event++ != IR_ARG_ASCII) {
- /* not an ASCII string, so give up */
- return -1;
- }
- event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */
- event += 2; /* skip leading CR/LF */
- desc_end = desc + sprintf(desc, "%s", event);
-
- /* strip trailing CR/LF (if any) */
- for (desc_end--;
- (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
- desc_end--) {
- *desc_end = '\0';
- }
-
- return 0;
-}
-
-
-/*
- * scdrv_event_severity
- *
- * Figure out how urgent a message we should write to the console/syslog
- * via printk.
- */
-static char *
-scdrv_event_severity(int code)
-{
- int ev_class = (code & EV_CLASS_MASK);
- int ev_severity = (code & EV_SEVERITY_MASK);
- char *pk_severity = KERN_NOTICE;
-
- switch (ev_class) {
- case EV_CLASS_POWER:
- switch (ev_severity) {
- case EV_SEVERITY_POWER_LOW_WARNING:
- case EV_SEVERITY_POWER_HIGH_WARNING:
- pk_severity = KERN_WARNING;
- break;
- case EV_SEVERITY_POWER_HIGH_FAULT:
- case EV_SEVERITY_POWER_LOW_FAULT:
- pk_severity = KERN_ALERT;
- break;
- }
- break;
- case EV_CLASS_FAN:
- switch (ev_severity) {
- case EV_SEVERITY_FAN_WARNING:
- pk_severity = KERN_WARNING;
- break;
- case EV_SEVERITY_FAN_FAULT:
- pk_severity = KERN_CRIT;
- break;
- }
- break;
- case EV_CLASS_TEMP:
- switch (ev_severity) {
- case EV_SEVERITY_TEMP_ADVISORY:
- pk_severity = KERN_WARNING;
- break;
- case EV_SEVERITY_TEMP_CRITICAL:
- pk_severity = KERN_CRIT;
- break;
- case EV_SEVERITY_TEMP_FAULT:
- pk_severity = KERN_ALERT;
- break;
- }
- break;
- case EV_CLASS_ENV:
- pk_severity = KERN_ALERT;
- break;
- case EV_CLASS_TEST_FAULT:
- pk_severity = KERN_ALERT;
- break;
- case EV_CLASS_TEST_WARNING:
- pk_severity = KERN_WARNING;
- break;
- case EV_CLASS_PWRD_NOTIFY:
- pk_severity = KERN_ALERT;
- break;
- }
-
- return pk_severity;
-}
-
-
-/*
- * scdrv_dispatch_event
- *
- * Do the right thing with an incoming event. That's often nothing
- * more than printing it to the system log. For power-down notifications
- * we start a graceful shutdown.
- */
-static void
-scdrv_dispatch_event(char *event, int len)
-{
- static int snsc_shutting_down = 0;
- int code, esp_code, src, class;
- char desc[CHUNKSIZE];
- char *severity;
-
- if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
- /* ignore uninterpretible event */
- return;
- }
-
- /* how urgent is the message? */
- severity = scdrv_event_severity(code);
-
- class = (code & EV_CLASS_MASK);
-
- if (class == EV_CLASS_PWRD_NOTIFY || code == ENV_PWRDN_PEND) {
- if (snsc_shutting_down)
- return;
-
- snsc_shutting_down = 1;
-
- /* give a message for each type of event */
- if (class == EV_CLASS_PWRD_NOTIFY)
- printk(KERN_NOTICE "Power off indication received."
- " Sending SIGPWR to init...\n");
- else if (code == ENV_PWRDN_PEND)
- printk(KERN_CRIT "WARNING: Shutting down the system"
- " due to a critical environmental condition."
- " Sending SIGPWR to init...\n");
-
- /* give a SIGPWR signal to init proc */
- kill_cad_pid(SIGPWR, 0);
- } else {
- /* print to system log */
- printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
- }
-}
-
-
-/*
- * scdrv_event
- *
- * Called as a tasklet when an event arrives from the L1. Read the event
- * from where it's temporarily stored in SAL and call scdrv_dispatch_event()
- * to send it on its way. Keep trying to read events until SAL indicates
- * that there are no more immediately available.
- */
-void
-scdrv_event(unsigned long dummy)
-{
- int status;
- int len;
- unsigned long flags;
- struct subch_data_s *sd = event_sd;
-
- /* anything to read? */
- len = CHUNKSIZE;
- spin_lock_irqsave(&sd->sd_rlock, flags);
- status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
- sd->sd_rb, &len);
-
- while (!(status < 0)) {
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
- scdrv_dispatch_event(sd->sd_rb, len);
- len = CHUNKSIZE;
- spin_lock_irqsave(&sd->sd_rlock, flags);
- status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
- sd->sd_rb, &len);
- }
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-}
-
-
-/*
- * scdrv_event_init
- *
- * Sets up a system controller subchannel to begin receiving event
- * messages. This is sort of a specialized version of scdrv_open()
- * in drivers/char/sn_sysctl.c.
- */
-void
-scdrv_event_init(struct sysctl_data_s *scd)
-{
- int rv;
-
- event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
- if (event_sd == NULL) {
- printk(KERN_WARNING "%s: couldn't allocate subchannel info"
- " for event monitoring\n", __func__);
- return;
- }
-
- /* initialize subch_data_s fields */
- event_sd->sd_nasid = scd->scd_nasid;
- spin_lock_init(&event_sd->sd_rlock);
-
- /* ask the system controllers to send events to this node */
- event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
-
- if (event_sd->sd_subch < 0) {
- kfree(event_sd);
- printk(KERN_WARNING "%s: couldn't open event subchannel\n",
- __func__);
- return;
- }
-
- /* hook event subchannel up to the system controller interrupt */
- rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
- IRQF_SHARED, "system controller events", event_sd);
- if (rv) {
- printk(KERN_WARNING "%s: irq request failed (%d)\n",
- __func__, rv);
- ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
- kfree(event_sd);
- return;
- }
-}
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 0bdc602f0d48..98f3150e0048 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -373,7 +373,7 @@ static int tosh_get_machine_id(void __iomem *bios)
value. This has been verified on a Satellite Pro 430CDT,
Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
#if TOSH_DEBUG
- printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
+ pr_debug("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
#endif
bx = 0xe6f5;
@@ -417,7 +417,7 @@ static int tosh_probe(void)
for (i=0;i<7;i++) {
if (readb(bios+0xe010+i)!=signature[i]) {
- printk("toshiba: not a supported Toshiba laptop\n");
+ pr_err("toshiba: not a supported Toshiba laptop\n");
iounmap(bios);
return -ENODEV;
}
@@ -433,7 +433,7 @@ static int tosh_probe(void)
/* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
- printk("toshiba: not a supported Toshiba laptop\n");
+ pr_err("toshiba: not a supported Toshiba laptop\n");
iounmap(bios);
return -ENODEV;
}
@@ -486,7 +486,7 @@ static int __init toshiba_init(void)
if (tosh_probe())
return -ENODEV;
- printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n");
+ pr_info("Toshiba System Management Mode driver v" TOSH_VERSION "\n");
/* set the port to use for Fn status if not specified as a parameter */
if (tosh_fn==0x00)
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 88a3c06fc153..9c37047f4b56 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -164,6 +164,11 @@ config TCG_VTPM_PROXY
/dev/vtpmX and a server-side file descriptor on which the vTPM
can receive commands.
+config TCG_FTPM_TEE
+ tristate "TEE based fTPM Interface"
+ depends on TEE && OPTEE
+ help
+ This driver proxies for firmware TPM running in TEE.
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a01c4cab902a..c354cdff9c62 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index d47ad10a35fe..3d6d394a8661 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip)
return chip->ops->go_idle(chip);
}
+static void tpm_clk_enable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, true);
+}
+
+static void tpm_clk_disable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+}
+
/**
* tpm_chip_start() - power on the TPM
* @chip: a TPM chip to use
@@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip)
{
int ret;
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, true);
+ tpm_clk_enable(chip);
if (chip->locality == -1) {
ret = tpm_request_locality(chip);
if (ret) {
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
return ret;
}
}
@@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip)
ret = tpm_cmd_ready(chip);
if (ret) {
tpm_relinquish_locality(chip);
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
return ret;
}
@@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip)
{
tpm_go_idle(chip);
tpm_relinquish_locality(chip);
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_stop);
@@ -278,12 +287,9 @@ static void tpm_devs_release(struct device *dev)
* @dev: device to which the chip is associated.
*
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
- * TPM 2.0 spec.
- * Then, calls bus- and device- specific shutdown code.
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
*
- * XXX: This codepath relies on the fact that sysfs is not enabled for
- * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
- * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ * Return: always 0 (i.e. success)
*/
static int tpm_class_shutdown(struct device *dev)
{
@@ -545,6 +551,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip)
return hwrng_register(&chip->hwrng);
}
+static int tpm_get_pcr_allocation(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
+ tpm2_get_pcr_allocation(chip) :
+ tpm1_get_pcr_allocation(chip);
+
+ if (rc > 0)
+ return -ENODEV;
+
+ return rc;
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -564,6 +584,12 @@ int tpm_chip_register(struct tpm_chip *chip)
if (rc)
return rc;
rc = tpm_auto_startup(chip);
+ if (rc) {
+ tpm_chip_stop(chip);
+ return rc;
+ }
+
+ rc = tpm_get_pcr_allocation(chip);
tpm_chip_stop(chip);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index d9caedda075b..edfa89160010 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -329,16 +329,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- /* XXX: If you wish to remove this restriction, you must first update
- * tpm_sysfs to explicitly lock chip->ops.
- */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return;
- /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
- * is called before ops is null'd and the sysfs core synchronizes this
- * removal so that no callbacks are running or can run again
- */
WARN_ON(chip->groups_cnt != 0);
chip->groups[chip->groups_cnt++] = &tpm_dev_group;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index e503ffc3aa39..a7fea3e0ca86 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc, size_t min_cap_length);
int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm1_get_pcr_allocation(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index faacbe1ffa1a..149e953ca369 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip)
goto out;
}
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
- rc = -ENOMEM;
- goto out;
- }
-
- chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
- chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
- chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
- chip->nr_allocated_banks = 1;
-
return rc;
out:
if (rc > 0)
@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
return rc;
}
+/**
+ * tpm1_get_pcr_allocation() - initialize the allocated bank
+ * @chip: TPM chip to use.
+ *
+ * The function initializes the SHA1 allocated bank to extend PCR
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_get_pcr_allocation(struct tpm_chip *chip)
+{
+ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks)
+ return -ENOMEM;
+
+ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+ chip->nr_allocated_banks = 1;
+
+ return 0;
+}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d103545e4055..ba9acae83bff 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -840,7 +840,7 @@ struct tpm2_pcr_selection {
u8 pcr_select[3];
} __packed;
-static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
{
struct tpm2_pcr_selection pcr_selection;
struct tpm_buf buf;
@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
goto out;
}
- rc = tpm2_get_pcr_allocation(chip);
- if (rc)
- goto out;
-
rc = tpm2_get_cc_attrs_tbl(chip);
out:
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
new file mode 100644
index 000000000000..6640a14dbe48
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+ UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+ 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf: the buffer to store data.
+ * @count: the number of bytes to read.
+ *
+ * Return:
+ * In case of success the number of bytes received.
+ * On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t len;
+
+ len = pvt_data->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+ __func__, count, len);
+ return -EIO;
+ }
+
+ memcpy(buf, pvt_data->resp_buf, len);
+ pvt_data->resp_len = 0;
+
+ return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf: the buffer to send.
+ * @len: the number of bytes to send.
+ *
+ * Return:
+ * In case of success, returns 0.
+ * On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t resp_len;
+ int rc;
+ u8 *temp_buf;
+ struct tpm_header *resp_header;
+ struct tee_ioctl_invoke_arg transceive_args;
+ struct tee_param command_params[4];
+ struct tee_shm *shm = pvt_data->shm;
+
+ if (len > MAX_COMMAND_SIZE) {
+ dev_err(&chip->dev,
+ "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+ __func__, len);
+ return -EIO;
+ }
+
+ memset(&transceive_args, 0, sizeof(transceive_args));
+ memset(command_params, 0, sizeof(command_params));
+ pvt_data->resp_len = 0;
+
+ /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+ transceive_args = (struct tee_ioctl_invoke_arg) {
+ .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+ .session = pvt_data->session,
+ .num_params = 4,
+ };
+
+ /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+ command_params[0] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+ .u.memref = {
+ .shm = shm,
+ .size = len,
+ .shm_offs = 0,
+ },
+ };
+
+ temp_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+ memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+ memcpy(temp_buf, buf, len);
+
+ command_params[1] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ .u.memref = {
+ .shm = shm,
+ .size = MAX_RESPONSE_SIZE,
+ .shm_offs = MAX_COMMAND_SIZE,
+ },
+ };
+
+ rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+ command_params);
+ if ((rc < 0) || (transceive_args.ret != 0)) {
+ dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+ __func__, transceive_args.ret);
+ return (rc < 0) ? rc : transceive_args.ret;
+ }
+
+ temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+
+ resp_header = (struct tpm_header *)temp_buf;
+ resp_len = be32_to_cpu(resp_header->length);
+
+ /* sanity check resp_len */
+ if (resp_len < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "%s: tpm response header too small\n",
+ __func__);
+ return -EIO;
+ }
+ if (resp_len > MAX_RESPONSE_SIZE) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+ __func__, resp_len);
+ return -EIO;
+ }
+
+ /* sanity checks look good, cache the response */
+ memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+ pvt_data->resp_len = resp_len;
+
+ return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+ return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = ftpm_tee_tpm_op_recv,
+ .send = ftpm_tee_tpm_op_send,
+ .cancel = ftpm_tee_tpm_op_cancel,
+ .status = ftpm_tee_tpm_op_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /*
+ * Currently this driver only support GP Complaint OPTEE based fTPM TA
+ */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ftpm_tee_probe - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct ftpm_tee_private *pvt_data = NULL;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+ GFP_KERNEL);
+ if (!pvt_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, pvt_data);
+
+ /* Open context with TEE driver */
+ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data->ctx)) {
+ if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+ return -EPROBE_DEFER;
+ dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+ return PTR_ERR(pvt_data->ctx);
+ }
+
+ /* Open a session with fTPM TA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+ __func__, sess_arg.ret);
+ rc = -EINVAL;
+ goto out_tee_session;
+ }
+ pvt_data->session = sess_arg.session;
+
+ /* Allocate dynamic shared memory with fTPM TA */
+ pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
+ MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(pvt_data->shm)) {
+ dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto out_shm_alloc;
+ }
+
+ /* Allocate new struct tpm_chip instance */
+ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+ if (IS_ERR(chip)) {
+ dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+ rc = PTR_ERR(chip);
+ goto out_chip_alloc;
+ }
+
+ pvt_data->chip = chip;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ /* Create a character device for the fTPM */
+ rc = tpm_chip_register(pvt_data->chip);
+ if (rc) {
+ dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+ __func__, rc);
+ goto out_chip;
+ }
+
+ return 0;
+
+out_chip:
+ put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+ tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+ tee_client_close_context(pvt_data->ctx);
+
+ return rc;
+}
+
+/**
+ * ftpm_tee_remove - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * 0 always.
+ */
+static int ftpm_tee_remove(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ /* Release the chip */
+ tpm_chip_unregister(pvt_data->chip);
+
+ /* frees chip */
+ put_device(&pvt_data->chip->dev);
+
+ /* Free the shared memory pool */
+ tee_shm_free(pvt_data->shm);
+
+ /* close the existing session with fTPM TA*/
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+ /* close the context with TEE driver */
+ tee_client_close_context(pvt_data->ctx);
+
+ /* memory allocated with devm_kzalloc() is freed automatically */
+
+ return 0;
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+ { .compatible = "microsoft,ftpm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_driver = {
+ .driver = {
+ .name = "ftpm-tee",
+ .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ },
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+};
+
+module_platform_driver(ftpm_tee_driver);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
new file mode 100644
index 000000000000..f98daa7bf68c
--- /dev/null
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI (1)
+
+/* max. buffer size supported by fTPM */
+#define MAX_COMMAND_SIZE 4096
+#define MAX_RESPONSE_SIZE 4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip: struct tpm_chip instance registered with tpm framework.
+ * @state: internal state
+ * @session: fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx: TEE context handler.
+ * @shm: Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+ struct tpm_chip *chip;
+ u32 session;
+ size_t resp_len;
+ u8 resp_buf[MAX_RESPONSE_SIZE];
+ struct tee_context *ctx;
+ struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index c3181ea9f271..270f43acbb77 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -980,6 +980,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
goto out_err;
}
+ tpm_chip_start(chip);
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
@@ -989,6 +991,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
} else {
tpm_tis_probe_irq(chip, intmask);
}
+ tpm_chip_stop(chip);
}
rc = tpm_chip_register(chip);
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 44db83a6d01c..44a46dcc0518 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
continue;
div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+ if (div > GENERATED_MAX_DIV + 1)
+ div = GENERATED_MAX_DIV + 1;
clk_generated_best_diff(req, parent, parent_rate, div,
&best_diff, &best_rate);
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index a2287c770d5c..886f7c5df51a 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -69,7 +69,7 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct scmi_clk *clk = to_scmi_clk(hw);
- return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate);
+ return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
}
static int scmi_clk_enable(struct clk_hw *hw)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..ca99e9db6575 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
return NULL;
}
+#ifdef CONFIG_OF
+static int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name, struct of_phandle_args *out_args);
+static struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
+#else
+static inline int of_parse_clkspec(const struct device_node *np, int index,
+ const char *name,
+ struct of_phandle_args *out_args)
+{
+ return -ENOENT;
+}
+static inline struct clk_hw *
+of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
/**
* clk_core_get - Find the clk_core parent of a clk
* @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
* };
*
* Returns: -ENOENT when the provider can't be found or the clk doesn't
- * exist in the provider. -EINVAL when the name can't be found. NULL when the
- * provider knows about the clk but it isn't provided on this system.
+ * exist in the provider or the name can't be found in the DT node or
+ * in a clkdev lookup. NULL when the provider knows about the clk but it
+ * isn't provided on this system.
* A valid clk_core pointer when the clk can be found in the provider.
*/
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
struct device *dev = core->dev;
const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node;
+ struct of_phandle_args clkspec;
- if (np && (name || index >= 0))
- hw = of_clk_get_hw(np, index, name);
-
- /*
- * If the DT search above couldn't find the provider or the provider
- * didn't know about this clk, fallback to looking up via clkdev based
- * clk_lookups
- */
- if (PTR_ERR(hw) == -ENOENT && name)
+ if (np && (name || index >= 0) &&
+ !of_parse_clkspec(np, index, name, &clkspec)) {
+ hw = of_clk_get_hw_from_clkspec(&clkspec);
+ of_node_put(clkspec.np);
+ } else if (name) {
+ /*
+ * If the DT search above couldn't find the provider fallback to
+ * looking up via clkdev based clk_lookups.
+ */
hw = clk_find_hw(dev_id, name);
+ }
if (IS_ERR(hw))
return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
parent = ERR_PTR(-EPROBE_DEFER);
} else {
parent = clk_core_get(core, index);
- if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
+ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
break;
/* Fallback to comparing globally unique names */
- if (!strcmp(parent->name, core->parents[i].name))
+ if (core->parents[i].name &&
+ !strcmp(parent->name, core->parents[i].name))
break;
}
@@ -2487,6 +2510,12 @@ runtime_put:
return ret;
}
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
+{
+ return clk_core_set_parent_nolock(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_set_parent);
+
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 0eaf41848280..1ac0c7990392 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -14,6 +14,12 @@ config CLK_IMX8MM
help
Build the driver for i.MX8MM CCM Clock Driver
+config CLK_IMX8MN
+ bool "IMX8MN CCM Clock Driver"
+ depends on ARCH_MXC && ARM64
+ help
+ Build the driver for i.MX8MN CCM Clock Driver
+
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
depends on ARCH_MXC && ARM64
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 05641c64b317..77a3d714f1d5 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_MXC_CLK_SCU) += \
clk-lpcg-scu.o
obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
+obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 6b8e75df994d..43fa9c361fcb 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -26,23 +26,6 @@ static u32 share_count_dcss;
static u32 share_count_pdm;
static u32 share_count_nand;
-#define PLL_1416X_RATE(_rate, _m, _p, _s) \
- { \
- .rate = (_rate), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- }
-
-#define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \
- { \
- .rate = (_rate), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- .kdiv = (_k), \
- }
-
static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
PLL_1416X_RATE(1800000000U, 225, 3, 0),
PLL_1416X_RATE(1600000000U, 200, 3, 0),
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
new file mode 100644
index 000000000000..07481a53e336
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018-2019 NXP.
+ */
+
+#include <dt-bindings/clock/imx8mn-clock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_sai5;
+static u32 share_count_sai6;
+static u32 share_count_sai7;
+static u32 share_count_disp;
+static u32 share_count_pdm;
+static u32 share_count_nand;
+
+enum {
+ ARM_PLL,
+ GPU_PLL,
+ VPU_PLL,
+ SYS_PLL1,
+ SYS_PLL2,
+ SYS_PLL3,
+ DRAM_PLL,
+ AUDIO_PLL1,
+ AUDIO_PLL2,
+ VIDEO_PLL2,
+ NR_PLLS,
+};
+
+static const struct imx_pll14xx_rate_table imx8mn_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+static const struct imx_pll14xx_rate_table imx8mn_audiopll_tbl[] = {
+ PLL_1443X_RATE(786432000U, 655, 5, 2, 23593),
+ PLL_1443X_RATE(722534400U, 301, 5, 1, 3670),
+};
+
+static const struct imx_pll14xx_rate_table imx8mn_videopll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+};
+
+static const struct imx_pll14xx_rate_table imx8mn_drampll_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+};
+
+static struct imx_pll14xx_clk imx8mn_audio_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx8mn_audiopll_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_video_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx8mn_videopll_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_dram_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx8mn_drampll_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_arm_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx8mn_pll1416x_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_gpu_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx8mn_pll1416x_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_vpu_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx8mn_pll1416x_tbl,
+};
+
+static struct imx_pll14xx_clk imx8mn_sys_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx8mn_pll1416x_tbl,
+};
+
+static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
+static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
+static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
+static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
+static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
+
+static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
+ "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m",
+ "audio_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mn_gpu_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mn_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "sys_pll1_100m",};
+
+static const char * const imx8mn_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out",
+ "video_pll1_out", "sys_pll3_out", };
+
+static const char * const imx8mn_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m",
+ "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll1_out", };
+
+static const char * const imx8mn_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m",
+ "sys_pll3_out", "sys_pll1_40m", "audio_pll2_out",
+ "clk_ext1", "clk_ext4", };
+
+static const char * const imx8mn_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m",
+ "sys_pll3_out", "sys1_pll_40m", "audio_pll2_out",
+ "clk_ext1", "clk_ext3", };
+
+static const char * const imx8mn_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mn_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mn_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out",
+ "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mn_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", };
+
+static const char * const imx8mn_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m",
+ "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mn_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out",
+ "audio_pll1_out", "video_pll1_out", };
+
+static const char * const imx8mn_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m",
+ "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll1_out", "sys_pll1_266m", };
+
+static const char * const imx8mn_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out",
+ "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m",
+ "sys_pll3_out", "clk_ext4", };
+
+static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext3", "clk_ext4", };
+
+static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+ "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
+ "clk_ext2", "clk_ext3", };
+
+static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+ "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out",
+ "video_pll1_out", "clk_ext4", };
+
+static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "clk_ext1", "clk_ext2", "clk_ext3",
+ "clk_ext4", "video_pll1_out", };
+
+static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m",
+ "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out",
+ "audio_pll2_out", };
+
+static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out",
+ "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out",
+ "sys_pll2_250m", "video_pll1_out", };
+
+static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys1_pll_400m", "sys_pll1_800m",
+ "sys2_pll_500m", "audio_pll2_out", "sys1_pll_266m",
+ "sys3_pll2_out", "sys1_pll_100m", };
+
+static const char * const imx8mn_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mn_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_out", "sys_pll1_100m", };
+
+static const char * const imx8mn_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mn_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mn_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mn_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
+ "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
+ "audio_pll2_out", "sys_pll1_133m", };
+
+static const char * const imx8mn_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mn_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext4", "audio_pll2_out", };
+
+static const char * const imx8mn_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m",
+ "sys_pll2_100m", "sys_pll3_out", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m",
+ "sys_pll2_100m", "sys_pll2_200m", "clk_ext2",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mn_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mn_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mn_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext1",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mn_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys3_pll2_out", "clk_ext2",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m",
+ "sys_pll1_40m", "sys_pll3_out", "clk_ext2",
+ "sys_pll1_80m", "video_pll1_out", };
+
+static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m",
+ "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out",
+ "sys_pll1_80m", "sys_pll2_166m", };
+
+static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out",
+ "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m",
+ "sys_pll2_500m", "sys_pll1_100m", };
+
+static const char * const imx8mn_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m",
+ "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m",
+ "audio_pll2_clk", "sys_pll1_100m", };
+
+static const char * const imx8mn_camera_pixel_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2",
+ "audio_pll2_out", "video_pll1_out", };
+
+static const char * const imx8mn_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m",
+ "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
+ "sys_pll2_250m", "audio_pll2_out", };
+
+static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "clk_ext3", "audio_pll2_out", };
+
+static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m",
+ "sys_pll1_200m", "audio_pll2_clk", "vpu_pll",
+ "sys_pll1_80m", };
+static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
+ "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
+ "video_pll1_out", "osc_32k", };
+
+static struct clk *clks[IMX8MN_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static int imx8mn_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ int ret;
+
+ clks[IMX8MN_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX8MN_CLK_24M] = of_clk_get_by_name(np, "osc_24m");
+ clks[IMX8MN_CLK_32K] = of_clk_get_by_name(np, "osc_32k");
+ clks[IMX8MN_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
+ clks[IMX8MN_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
+ clks[IMX8MN_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
+ clks[IMX8MN_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop");
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base)) {
+ ret = -ENOMEM;
+ goto unregister_clks;
+ }
+
+ clks[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mn_audio_pll);
+ clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mn_audio_pll);
+ clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mn_video_pll);
+ clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mn_dram_pll);
+ clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mn_gpu_pll);
+ clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mn_vpu_pll);
+ clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mn_arm_pll);
+ clks[IMX8MN_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mn_sys_pll);
+ clks[IMX8MN_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mn_sys_pll);
+ clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mn_sys_pll);
+
+ /* PLL bypass out */
+ clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* unbypass all the plls */
+ clk_set_parent(clks[IMX8MN_AUDIO_PLL1_BYPASS], clks[IMX8MN_AUDIO_PLL1]);
+ clk_set_parent(clks[IMX8MN_AUDIO_PLL2_BYPASS], clks[IMX8MN_AUDIO_PLL2]);
+ clk_set_parent(clks[IMX8MN_VIDEO_PLL1_BYPASS], clks[IMX8MN_VIDEO_PLL1]);
+ clk_set_parent(clks[IMX8MN_DRAM_PLL_BYPASS], clks[IMX8MN_DRAM_PLL]);
+ clk_set_parent(clks[IMX8MN_GPU_PLL_BYPASS], clks[IMX8MN_GPU_PLL]);
+ clk_set_parent(clks[IMX8MN_VPU_PLL_BYPASS], clks[IMX8MN_VPU_PLL]);
+ clk_set_parent(clks[IMX8MN_ARM_PLL_BYPASS], clks[IMX8MN_ARM_PLL]);
+ clk_set_parent(clks[IMX8MN_SYS_PLL1_BYPASS], clks[IMX8MN_SYS_PLL1]);
+ clk_set_parent(clks[IMX8MN_SYS_PLL2_BYPASS], clks[IMX8MN_SYS_PLL2]);
+ clk_set_parent(clks[IMX8MN_SYS_PLL3_BYPASS], clks[IMX8MN_SYS_PLL3]);
+
+ /* PLL out gate */
+ clks[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13);
+ clks[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13);
+ clks[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13);
+ clks[IMX8MN_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13);
+ clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13);
+ clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13);
+ clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13);
+ clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13);
+ clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13);
+ clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13);
+
+ /* SYS PLL fixed output */
+ clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
+ clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
+ clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
+ clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
+ clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
+ clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
+ clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
+ clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
+
+ clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
+ clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
+ clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
+ clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
+ clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
+ clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
+ clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
+ clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
+
+ np = dev->of_node;
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base))) {
+ ret = PTR_ERR(base);
+ goto unregister_clks;
+ }
+
+ /* CORE */
+ clks[IMX8MN_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
+ clks[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels));
+ clks[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels));
+ clks[IMX8MN_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
+ clks[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+ clks[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+
+ clks[IMX8MN_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+ clks[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ /* BUS */
+ clks[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
+ clks[IMX8MN_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+ clks[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+ clks[IMX8MN_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+ clks[IMX8MN_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+ clks[IMX8MN_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+ clks[IMX8MN_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+ clks[IMX8MN_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+ clks[IMX8MN_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
+
+ clks[IMX8MN_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
+ clks[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+ clks[IMX8MN_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ clks[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+ clks[IMX8MN_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
+ clks[IMX8MN_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mn_dram_alt_sels, base + 0xa000);
+ clks[IMX8MN_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mn_dram_apb_sels, base + 0xa080);
+ clks[IMX8MN_CLK_DISP_PIXEL] = imx8m_clk_composite("disp_pixel", imx8mn_disp_pixel_sels, base + 0xa500);
+ clks[IMX8MN_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mn_sai2_sels, base + 0xa600);
+ clks[IMX8MN_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mn_sai3_sels, base + 0xa680);
+ clks[IMX8MN_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mn_sai5_sels, base + 0xa780);
+ clks[IMX8MN_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mn_sai6_sels, base + 0xa800);
+ clks[IMX8MN_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mn_spdif1_sels, base + 0xa880);
+ clks[IMX8MN_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mn_enet_ref_sels, base + 0xa980);
+ clks[IMX8MN_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mn_enet_timer_sels, base + 0xaa00);
+ clks[IMX8MN_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mn_enet_phy_sels, base + 0xaa80);
+ clks[IMX8MN_CLK_NAND] = imx8m_clk_composite("nand", imx8mn_nand_sels, base + 0xab00);
+ clks[IMX8MN_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mn_qspi_sels, base + 0xab80);
+ clks[IMX8MN_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mn_usdhc1_sels, base + 0xac00);
+ clks[IMX8MN_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mn_usdhc2_sels, base + 0xac80);
+ clks[IMX8MN_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mn_i2c1_sels, base + 0xad00);
+ clks[IMX8MN_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mn_i2c2_sels, base + 0xad80);
+ clks[IMX8MN_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mn_i2c3_sels, base + 0xae00);
+ clks[IMX8MN_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mn_i2c4_sels, base + 0xae80);
+ clks[IMX8MN_CLK_UART1] = imx8m_clk_composite("uart1", imx8mn_uart1_sels, base + 0xaf00);
+ clks[IMX8MN_CLK_UART2] = imx8m_clk_composite("uart2", imx8mn_uart2_sels, base + 0xaf80);
+ clks[IMX8MN_CLK_UART3] = imx8m_clk_composite("uart3", imx8mn_uart3_sels, base + 0xb000);
+ clks[IMX8MN_CLK_UART4] = imx8m_clk_composite("uart4", imx8mn_uart4_sels, base + 0xb080);
+ clks[IMX8MN_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mn_usb_core_sels, base + 0xb100);
+ clks[IMX8MN_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mn_usb_phy_sels, base + 0xb180);
+ clks[IMX8MN_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mn_ecspi1_sels, base + 0xb280);
+ clks[IMX8MN_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mn_ecspi2_sels, base + 0xb300);
+ clks[IMX8MN_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mn_pwm1_sels, base + 0xb380);
+ clks[IMX8MN_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400);
+ clks[IMX8MN_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480);
+ clks[IMX8MN_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500);
+ clks[IMX8MN_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mn_wdog_sels, base + 0xb900);
+ clks[IMX8MN_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980);
+ clks[IMX8MN_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mn_clko1_sels, base + 0xba00);
+ clks[IMX8MN_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mn_clko2_sels, base + 0xba80);
+ clks[IMX8MN_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mn_dsi_core_sels, base + 0xbb00);
+ clks[IMX8MN_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mn_dsi_phy_sels, base + 0xbb80);
+ clks[IMX8MN_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mn_dsi_dbi_sels, base + 0xbc00);
+ clks[IMX8MN_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mn_usdhc3_sels, base + 0xbc80);
+ clks[IMX8MN_CLK_CAMERA_PIXEL] = imx8m_clk_composite("camera_pixel", imx8mn_camera_pixel_sels, base + 0xbd00);
+ clks[IMX8MN_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mn_csi1_phy_sels, base + 0xbd80);
+ clks[IMX8MN_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mn_csi2_phy_sels, base + 0xbf00);
+ clks[IMX8MN_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mn_csi2_esc_sels, base + 0xbf80);
+ clks[IMX8MN_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mn_ecspi3_sels, base + 0xc180);
+ clks[IMX8MN_CLK_PDM] = imx8m_clk_composite("pdm", imx8mn_pdm_sels, base + 0xc200);
+ clks[IMX8MN_CLK_SAI7] = imx8m_clk_composite("sai7", imx8mn_sai7_sels, base + 0xc300);
+
+ clks[IMX8MN_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ clks[IMX8MN_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ clks[IMX8MN_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ clks[IMX8MN_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MN_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0);
+ clks[IMX8MN_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0);
+ clks[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0);
+ clks[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0);
+ clks[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0);
+ clks[IMX8MN_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ clks[IMX8MN_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ clks[IMX8MN_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ clks[IMX8MN_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ clks[IMX8MN_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ clks[IMX8MN_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ clks[IMX8MN_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ clks[IMX8MN_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ clks[IMX8MN_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ clks[IMX8MN_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ clks[IMX8MN_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ clks[IMX8MN_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MN_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MN_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MN_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MN_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MN_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MN_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MN_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MN_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MN_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
+ clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+ clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ clks[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ clks[IMX8MN_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ clks[IMX8MN_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ clks[IMX8MN_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0);
+ clks[IMX8MN_CLK_ASRC_ROOT] = imx_clk_gate4("asrc_root_clk", "audio_ahb", base + 0x4580, 0);
+ clks[IMX8MN_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MN_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm);
+ clks[IMX8MN_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_disp);
+ clks[IMX8MN_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_disp);
+ clks[IMX8MN_CLK_CAMERA_PIXEL_ROOT] = imx_clk_gate2_shared2("camera_pixel_clk", "camera_pixel", base + 0x45d0, 0, &share_count_disp);
+ clks[IMX8MN_CLK_DISP_PIXEL_ROOT] = imx_clk_gate2_shared2("disp_pixel_clk", "disp_pixel", base + 0x45d0, 0, &share_count_disp);
+ clks[IMX8MN_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0);
+ clks[IMX8MN_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0);
+ clks[IMX8MN_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ clks[IMX8MN_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+ clks[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0);
+ clks[IMX8MN_CLK_SAI7_ROOT] = imx_clk_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7);
+
+ clks[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+ clks[IMX8MN_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div",
+ clks[IMX8MN_CLK_A53_DIV],
+ clks[IMX8MN_CLK_A53_SRC],
+ clks[IMX8MN_ARM_PLL_OUT],
+ clks[IMX8MN_CLK_24M]);
+
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ if (ret < 0) {
+ dev_err(dev, "failed to register clks for i.MX8MN\n");
+ goto unregister_clks;
+ }
+
+ return 0;
+
+unregister_clks:
+ imx_unregister_clocks(clks, ARRAY_SIZE(clks));
+
+ return ret;
+}
+
+static const struct of_device_id imx8mn_clk_of_match[] = {
+ { .compatible = "fsl,imx8mn-ccm" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8mn_clk_of_match);
+
+static struct platform_driver imx8mn_clk_driver = {
+ .probe = imx8mn_clocks_probe,
+ .driver = {
+ .name = "imx8mn-ccm",
+ .of_match_table = of_match_ptr(imx8mn_clk_of_match),
+ },
+};
+module_platform_driver(imx8mn_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index fb6edf1b8aa2..c0aff7ca6374 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -72,6 +72,11 @@ static const struct imx8qxp_lpcg_data imx8qxp_lpcg_adma[] = {
{ IMX_ADMA_LPCG_I2C2_CLK, "i2c2_lpcg_clk", "i2c2_clk", 0, ADMA_LPI2C_2_LPCG, 0, 0, },
{ IMX_ADMA_LPCG_I2C3_IPG_CLK, "i2c3_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPI2C_3_LPCG, 16, 0, },
{ IMX_ADMA_LPCG_I2C3_CLK, "i2c3_lpcg_clk", "i2c3_clk", 0, ADMA_LPI2C_3_LPCG, 0, 0, },
+
+ { IMX_ADMA_LPCG_DSP_CORE_CLK, "dsp_lpcg_core_clk", "dma_ipg_clk_root", 0, ADMA_HIFI_LPCG, 28, 0, },
+ { IMX_ADMA_LPCG_DSP_IPG_CLK, "dsp_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_HIFI_LPCG, 20, 0, },
+ { IMX_ADMA_LPCG_DSP_ADB_CLK, "dsp_lpcg_adb_clk", "dma_ipg_clk_root", 0, ADMA_HIFI_LPCG, 16, 0, },
+ { IMX_ADMA_LPCG_OCRAM_IPG_CLK, "ocram_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_OCRAM_LPCG, 16, 0, },
};
static const struct imx8qxp_ss_lpcg imx8qxp_ss_adma = {
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index f628071f6605..cfc05e43c343 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -14,6 +14,14 @@
DEFINE_SPINLOCK(imx_ccm_lock);
+void imx_unregister_clocks(struct clk *clks[], unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ clk_unregister(clks[i]);
+}
+
void __init imx_mmdc_mask_handshake(void __iomem *ccm_base,
unsigned int chn)
{
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index d94d9cb079d3..bb4ec1b33faf 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -12,6 +12,7 @@ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
void imx_register_uart_clocks(struct clk ** const clks[]);
void imx_register_uart_clocks_hws(struct clk_hw ** const hws[]);
void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn);
+void imx_unregister_clocks(struct clk *clks[], unsigned int count);
extern void imx_cscmr1_fixup(u32 *val);
@@ -153,6 +154,23 @@ enum imx_pllv3_type {
struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
const char *parent_name, void __iomem *base, u32 div_mask);
+#define PLL_1416X_RATE(_rate, _m, _p, _s) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ }
+
struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
void __iomem *base);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 1aa5f4059251..73b7e238eee7 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
};
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
+};
+
static const struct mtk_fixed_factor top_divs[] = {
- FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
- 2),
FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
2),
FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8183_top_init_early(struct device_node *node)
+{
+ int i;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
+ clk_mt8183_top_init_early);
+
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
- struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
+ top_clk_data);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt8183_clk_lock, clk_data);
+ node, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ top_clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
}
static int clk_mt8183_infra_probe(struct platform_device *pdev)
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index a6b20e123e0c..dabeb435d067 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config COMMON_CLK_MESON_INPUT
- tristate
-
config COMMON_CLK_MESON_REGMAP
tristate
select REGMAP
@@ -33,13 +30,15 @@ config COMMON_CLK_MESON_VID_PLL_DIV
config COMMON_CLK_MESON_AO_CLKC
tristate
select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_INPUT
select RESET_CONTROLLER
config COMMON_CLK_MESON_EE_CLKC
tristate
select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_INPUT
+
+config COMMON_CLK_MESON_CPU_DYNDIV
+ tristate
+ select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON8B
bool
@@ -86,7 +85,6 @@ config COMMON_CLK_AXG
config COMMON_CLK_AXG_AUDIO
tristate "Meson AXG Audio Clock Controller Driver"
depends on ARCH_MESON
- select COMMON_CLK_MESON_INPUT
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PHASE
select COMMON_CLK_MESON_SCLK_DIV
@@ -104,6 +102,7 @@ config COMMON_CLK_G12A
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_EE_CLKC
+ select COMMON_CLK_MESON_CPU_DYNDIV
select MFD_SYSCON
help
Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index bc35a4efd6b7..3939f218587a 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,9 +2,9 @@
# Amlogic clock drivers
obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
+obj-$(CONFIG_COMMON_CLK_MESON_CPU_DYNDIV) += clk-cpu-dyndiv.o
obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
-obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o
obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 0086f31288eb..b488b40c9d0e 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -18,8 +18,6 @@
#include "clk-regmap.h"
#include "clk-dualdiv.h"
-#define IN_PREFIX "ao-in-"
-
/*
* AO Configuration Clock registers offsets
* Register offsets from the data sheet must be multiplied by 4.
@@ -42,7 +40,9 @@ static struct clk_regmap axg_aoclk_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "axg_ao_" #_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
+ .parent_data = &(const struct clk_parent_data) { \
+ .fw_name = "mpeg-clk", \
+ }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -64,7 +64,9 @@ static struct clk_regmap axg_aoclk_cts_oscin = {
.hw.init = &(struct clk_init_data){
.name = "cts_oscin",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -77,7 +79,9 @@ static struct clk_regmap axg_aoclk_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_oscin" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_cts_oscin.hw
+ },
.num_parents = 1,
},
};
@@ -124,7 +128,9 @@ static struct clk_regmap axg_aoclk_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_names = (const char *[]){ "axg_ao_32k_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_32k_pre.hw
+ },
.num_parents = 1,
},
};
@@ -139,8 +145,10 @@ static struct clk_regmap axg_aoclk_32k_sel = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "axg_ao_32k_div",
- "axg_ao_32k_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_32k_div.hw,
+ &axg_aoclk_32k_pre.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -154,7 +162,9 @@ static struct clk_regmap axg_aoclk_32k = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "axg_ao_32k_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_32k_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -170,8 +180,10 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "axg_ao_32k",
- IN_PREFIX "ext_32k-0" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .hw = &axg_aoclk_32k.hw },
+ { .fw_name = "ext_32k-0", },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -187,8 +199,10 @@ static struct clk_regmap axg_aoclk_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
- "axg_ao_cts_rtc_oscin"},
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "mpeg-clk", },
+ { .hw = &axg_aoclk_cts_rtc_oscin.hw },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -203,8 +217,10 @@ static struct clk_regmap axg_aoclk_saradc_mux = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "axg_ao_clk81" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &axg_aoclk_clk81.hw },
+ },
.num_parents = 2,
},
};
@@ -218,7 +234,9 @@ static struct clk_regmap axg_aoclk_saradc_div = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_saradc_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "axg_ao_saradc_mux" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_saradc_mux.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -232,7 +250,9 @@ static struct clk_regmap axg_aoclk_saradc_gate = {
.hw.init = &(struct clk_init_data){
.name = "axg_ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "axg_ao_saradc_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_aoclk_saradc_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -290,12 +310,6 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = {
.num = NR_CLKS,
};
-static const struct meson_aoclk_input axg_aoclk_inputs[] = {
- { .name = "xtal", .required = true },
- { .name = "mpeg-clk", .required = true },
- { .name = "ext-32k-0", .required = false },
-};
-
static const struct meson_aoclk_data axg_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(axg_aoclk_reset),
@@ -303,9 +317,6 @@ static const struct meson_aoclk_data axg_aoclkc_data = {
.num_clks = ARRAY_SIZE(axg_aoclk_regmap),
.clks = axg_aoclk_regmap,
.hw_data = &axg_aoclk_onecell_data,
- .inputs = axg_aoclk_inputs,
- .num_inputs = ARRAY_SIZE(axg_aoclk_inputs),
- .input_prefix = IN_PREFIX,
};
static const struct of_device_id axg_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 8028ff6f6610..741df7e955ca 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include "axg-audio.h"
-#include "clk-input.h"
#include "clk-regmap.h"
#include "clk-phase.h"
#include "sclk-div.h"
@@ -24,7 +23,7 @@
#define AUD_SLV_SCLK_COUNT 10
#define AUD_SLV_LRCLK_COUNT 10
-#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+#define AUD_GATE(_name, _reg, _bit, _phws, _iflags) \
struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
@@ -33,13 +32,13 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ _pname }, \
+ .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
-#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pdata, _iflags) \
struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_mux_data){ \
.offset = (_reg), \
@@ -50,13 +49,13 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data){ \
.name = "aud_"#_name, \
.ops = &clk_regmap_mux_ops, \
- .parent_names = (_pnames), \
- .num_parents = ARRAY_SIZE(_pnames), \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
-#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _phws, _iflags) \
struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_div_data){ \
.offset = (_reg), \
@@ -67,15 +66,27 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data){ \
.name = "aud_"#_name, \
.ops = &clk_regmap_divider_ops, \
- .parent_names = (const char *[]) { _pname }, \
+ .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
}
#define AUD_PCLK_GATE(_name, _bit) \
- AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "audio_pclk", 0)
-
+struct clk_regmap aud_##_name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (AUDIO_CLK_GATE_EN), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "aud_"#_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_data = &(const struct clk_parent_data) { \
+ .fw_name = "pclk", \
+ }, \
+ .num_parents = 1, \
+ }, \
+}
/* Audio peripheral clocks */
static AUD_PCLK_GATE(ddr_arb, 0);
static AUD_PCLK_GATE(pdm, 1);
@@ -100,14 +111,20 @@ static AUD_PCLK_GATE(power_detect, 19);
static AUD_PCLK_GATE(spdifout_b, 21);
/* Audio Master Clocks */
-static const char * const mst_mux_parent_names[] = {
- "aud_mst_in0", "aud_mst_in1", "aud_mst_in2", "aud_mst_in3",
- "aud_mst_in4", "aud_mst_in5", "aud_mst_in6", "aud_mst_in7",
+static const struct clk_parent_data mst_mux_parent_data[] = {
+ { .fw_name = "mst_in0", },
+ { .fw_name = "mst_in1", },
+ { .fw_name = "mst_in2", },
+ { .fw_name = "mst_in3", },
+ { .fw_name = "mst_in4", },
+ { .fw_name = "mst_in5", },
+ { .fw_name = "mst_in6", },
+ { .fw_name = "mst_in7", },
};
#define AUD_MST_MUX(_name, _reg, _flag) \
AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
- mst_mux_parent_names, CLK_SET_RATE_PARENT)
+ mst_mux_parent_data, 0)
#define AUD_MST_MCLK_MUX(_name, _reg) \
AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
@@ -129,7 +146,7 @@ static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
#define AUD_MST_DIV(_name, _reg, _flag) \
AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
- "aud_"#_name"_sel", CLK_SET_RATE_PARENT) \
+ aud_##_name##_sel, CLK_SET_RATE_PARENT) \
#define AUD_MST_MCLK_DIV(_name, _reg) \
AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
@@ -150,7 +167,7 @@ static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
#define AUD_MST_MCLK_GATE(_name, _reg) \
- AUD_GATE(_name, _reg, 31, "aud_"#_name"_div", \
+ AUD_GATE(_name, _reg, 31, aud_##_name##_div, \
CLK_SET_RATE_PARENT)
static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
@@ -168,7 +185,7 @@ static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
/* Sample Clocks */
#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
- "aud_mst_"#_name"_mclk", 0)
+ aud_mst_##_name##_mclk, 0)
static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
@@ -178,7 +195,7 @@ static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
- _hi_shift, _hi_width, _pname, _iflags) \
+ _hi_shift, _hi_width, _phws, _iflags) \
struct clk_regmap aud_##_name = { \
.data = &(struct meson_sclk_div_data) { \
.div = { \
@@ -195,7 +212,7 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_sclk_div_ops, \
- .parent_names = (const char *[]) { _pname }, \
+ .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
@@ -203,7 +220,7 @@ struct clk_regmap aud_##_name = { \
#define AUD_MST_SCLK_DIV(_name, _reg) \
AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
- "aud_mst_"#_name"_sclk_pre_en", \
+ aud_mst_##_name##_sclk_pre_en, \
CLK_SET_RATE_PARENT)
static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
@@ -214,8 +231,8 @@ static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
#define AUD_MST_SCLK_POST_EN(_name, _reg) \
- AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
- "aud_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+ AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ aud_mst_##_name##_sclk_div, CLK_SET_RATE_PARENT)
static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
@@ -224,8 +241,8 @@ static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
- _pname, _iflags) \
+#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+ _phws, _iflags) \
struct clk_regmap aud_##_name = { \
.data = &(struct meson_clk_triphase_data) { \
.ph0 = { \
@@ -247,7 +264,7 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_clk_triphase_ops, \
- .parent_names = (const char *[]) { _pname }, \
+ .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
@@ -255,7 +272,7 @@ struct clk_regmap aud_##_name = { \
#define AUD_MST_SCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
- "aud_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+ aud_mst_##_name##_sclk_post_en, CLK_SET_RATE_PARENT)
static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
@@ -266,7 +283,7 @@ static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
#define AUD_MST_LRCLK_DIV(_name, _reg) \
AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
- "aud_mst_"#_name"_sclk_post_en", 0) \
+ aud_mst_##_name##_sclk_post_en, 0) \
static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
@@ -277,7 +294,7 @@ static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
#define AUD_MST_LRCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
- "aud_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+ aud_mst_##_name##_lrclk_div, CLK_SET_RATE_PARENT)
static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
@@ -286,19 +303,29 @@ static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-static const char * const tdm_sclk_parent_names[] = {
- "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
- "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
- "aud_slv_sclk0", "aud_slv_sclk1", "aud_slv_sclk2",
- "aud_slv_sclk3", "aud_slv_sclk4", "aud_slv_sclk5",
- "aud_slv_sclk6", "aud_slv_sclk7", "aud_slv_sclk8",
- "aud_slv_sclk9"
+static const struct clk_parent_data tdm_sclk_parent_data[] = {
+ { .hw = &aud_mst_a_sclk.hw, },
+ { .hw = &aud_mst_b_sclk.hw, },
+ { .hw = &aud_mst_c_sclk.hw, },
+ { .hw = &aud_mst_d_sclk.hw, },
+ { .hw = &aud_mst_e_sclk.hw, },
+ { .hw = &aud_mst_f_sclk.hw, },
+ { .fw_name = "slv_sclk0", },
+ { .fw_name = "slv_sclk1", },
+ { .fw_name = "slv_sclk2", },
+ { .fw_name = "slv_sclk3", },
+ { .fw_name = "slv_sclk4", },
+ { .fw_name = "slv_sclk5", },
+ { .fw_name = "slv_sclk6", },
+ { .fw_name = "slv_sclk7", },
+ { .fw_name = "slv_sclk8", },
+ { .fw_name = "slv_sclk9", },
};
#define AUD_TDM_SCLK_MUX(_name, _reg) \
AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
CLK_MUX_ROUND_CLOSEST, \
- tdm_sclk_parent_names, 0)
+ tdm_sclk_parent_data, 0)
static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
@@ -310,7 +337,7 @@ static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
- "aud_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+ aud_tdm##_name##_sclk_sel, CLK_SET_RATE_PARENT)
static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
@@ -322,7 +349,7 @@ static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
- "aud_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+ aud_tdm##_name##_sclk_pre_en, CLK_SET_RATE_PARENT)
static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
@@ -344,8 +371,9 @@ static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
.hw.init = &(struct clk_init_data) { \
.name = "aud_tdm"#_name"_sclk", \
.ops = &meson_clk_phase_ops, \
- .parent_names = (const char *[]) \
- { "aud_tdm"#_name"_sclk_post_en" }, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &aud_tdm##_name##_sclk_post_en.hw \
+ }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
}, \
@@ -359,19 +387,29 @@ static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-static const char * const tdm_lrclk_parent_names[] = {
- "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
- "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
- "aud_slv_lrclk0", "aud_slv_lrclk1", "aud_slv_lrclk2",
- "aud_slv_lrclk3", "aud_slv_lrclk4", "aud_slv_lrclk5",
- "aud_slv_lrclk6", "aud_slv_lrclk7", "aud_slv_lrclk8",
- "aud_slv_lrclk9"
+static const struct clk_parent_data tdm_lrclk_parent_data[] = {
+ { .hw = &aud_mst_a_lrclk.hw, },
+ { .hw = &aud_mst_b_lrclk.hw, },
+ { .hw = &aud_mst_c_lrclk.hw, },
+ { .hw = &aud_mst_d_lrclk.hw, },
+ { .hw = &aud_mst_e_lrclk.hw, },
+ { .hw = &aud_mst_f_lrclk.hw, },
+ { .fw_name = "slv_lrclk0", },
+ { .fw_name = "slv_lrclk1", },
+ { .fw_name = "slv_lrclk2", },
+ { .fw_name = "slv_lrclk3", },
+ { .fw_name = "slv_lrclk4", },
+ { .fw_name = "slv_lrclk5", },
+ { .fw_name = "slv_lrclk6", },
+ { .fw_name = "slv_lrclk7", },
+ { .fw_name = "slv_lrclk8", },
+ { .fw_name = "slv_lrclk9", },
};
-#define AUD_TDM_LRLCK(_name, _reg) \
- AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_lrclk_parent_names, 0)
+#define AUD_TDM_LRLCK(_name, _reg) \
+ AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_data, 0)
static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
@@ -386,39 +424,51 @@ static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \
CLK_SET_RATE_NO_REPARENT)
-static const char * const mclk_pad_ctrl_parent_names[] = {
- "aud_mst_a_mclk", "aud_mst_b_mclk", "aud_mst_c_mclk",
- "aud_mst_d_mclk", "aud_mst_e_mclk", "aud_mst_f_mclk",
+static const struct clk_parent_data mclk_pad_ctrl_parent_data[] = {
+ { .hw = &aud_mst_a_mclk.hw },
+ { .hw = &aud_mst_b_mclk.hw },
+ { .hw = &aud_mst_c_mclk.hw },
+ { .hw = &aud_mst_d_mclk.hw },
+ { .hw = &aud_mst_e_mclk.hw },
+ { .hw = &aud_mst_f_mclk.hw },
};
static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0,
- mclk_pad_ctrl_parent_names);
+ mclk_pad_ctrl_parent_data);
static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4,
- mclk_pad_ctrl_parent_names);
-
-static const char * const lrclk_pad_ctrl_parent_names[] = {
- "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
- "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
+ mclk_pad_ctrl_parent_data);
+
+static const struct clk_parent_data lrclk_pad_ctrl_parent_data[] = {
+ { .hw = &aud_mst_a_lrclk.hw },
+ { .hw = &aud_mst_b_lrclk.hw },
+ { .hw = &aud_mst_c_lrclk.hw },
+ { .hw = &aud_mst_d_lrclk.hw },
+ { .hw = &aud_mst_e_lrclk.hw },
+ { .hw = &aud_mst_f_lrclk.hw },
};
static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16,
- lrclk_pad_ctrl_parent_names);
+ lrclk_pad_ctrl_parent_data);
static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20,
- lrclk_pad_ctrl_parent_names);
+ lrclk_pad_ctrl_parent_data);
static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24,
- lrclk_pad_ctrl_parent_names);
-
-static const char * const sclk_pad_ctrl_parent_names[] = {
- "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
- "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
+ lrclk_pad_ctrl_parent_data);
+
+static const struct clk_parent_data sclk_pad_ctrl_parent_data[] = {
+ { .hw = &aud_mst_a_sclk.hw },
+ { .hw = &aud_mst_b_sclk.hw },
+ { .hw = &aud_mst_c_sclk.hw },
+ { .hw = &aud_mst_d_sclk.hw },
+ { .hw = &aud_mst_e_sclk.hw },
+ { .hw = &aud_mst_f_sclk.hw },
};
static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0,
- sclk_pad_ctrl_parent_names);
+ sclk_pad_ctrl_parent_data);
static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4,
- sclk_pad_ctrl_parent_names);
+ sclk_pad_ctrl_parent_data);
static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
- sclk_pad_ctrl_parent_names);
+ sclk_pad_ctrl_parent_data);
/*
* Array of all clocks provided by this provider
@@ -868,54 +918,6 @@ static int devm_clk_get_enable(struct device *dev, char *id)
return 0;
}
-static int axg_register_clk_hw_input(struct device *dev,
- const char *name)
-{
- char *clk_name;
- struct clk_hw *hw;
- int err = 0;
-
- clk_name = kasprintf(GFP_KERNEL, "aud_%s", name);
- if (!clk_name)
- return -ENOMEM;
-
- hw = meson_clk_hw_register_input(dev, name, clk_name, 0);
- if (IS_ERR(hw)) {
- /* It is ok if an input clock is missing */
- if (PTR_ERR(hw) == -ENOENT) {
- dev_dbg(dev, "%s not provided", name);
- } else {
- err = PTR_ERR(hw);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get %s clock", name);
- }
- }
-
- kfree(clk_name);
- return err;
-}
-
-static int axg_register_clk_hw_inputs(struct device *dev,
- const char *basename,
- unsigned int count)
-{
- char *name;
- int i, ret;
-
- for (i = 0; i < count; i++) {
- name = kasprintf(GFP_KERNEL, "%s%d", basename, i);
- if (!name)
- return -ENOMEM;
-
- ret = axg_register_clk_hw_input(dev, name);
- kfree(name);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -963,29 +965,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
return ret;
}
- /* Register the peripheral input clock */
- hw = meson_clk_hw_register_input(dev, "pclk", "audio_pclk", 0);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- /* Register optional input master clocks */
- ret = axg_register_clk_hw_inputs(dev, "mst_in",
- AUD_MST_IN_COUNT);
- if (ret)
- return ret;
-
- /* Register optional input slave sclks */
- ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
- AUD_SLV_SCLK_COUNT);
- if (ret)
- return ret;
-
- /* Register optional input slave lrclks */
- ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
- AUD_SLV_LRCLK_COUNT);
- if (ret)
- return ret;
-
/* Populate regmap for the regmap backed clocks */
for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++)
aud_clk_regmaps[i]->map = map;
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 3ddd0efc9ee0..13fc0006f63d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include "clk-input.h"
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
@@ -59,7 +58,9 @@ static struct clk_regmap axg_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -74,7 +75,9 @@ static struct clk_regmap axg_fixed_pll = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fixed_pll_dco.hw
+ },
.num_parents = 1,
/*
* This clock won't ever change at runtime so
@@ -114,7 +117,9 @@ static struct clk_regmap axg_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -129,7 +134,9 @@ static struct clk_regmap axg_sys_pll = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "sys_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_sys_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -215,7 +222,9 @@ static struct clk_regmap axg_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -230,7 +239,9 @@ static struct clk_regmap axg_gp0_pll = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_gp0_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -284,7 +295,9 @@ static struct clk_regmap axg_hifi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -299,7 +312,9 @@ static struct clk_regmap axg_hifi_pll = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "hifi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_hifi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -311,7 +326,7 @@ static struct clk_fixed_factor axg_fclk_div2_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -324,7 +339,9 @@ static struct clk_regmap axg_fclk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fclk_div2_div.hw
+ },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
},
@@ -336,7 +353,7 @@ static struct clk_fixed_factor axg_fclk_div3_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -349,7 +366,9 @@ static struct clk_regmap axg_fclk_div3 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div3_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fclk_div3_div.hw
+ },
.num_parents = 1,
/*
* FIXME:
@@ -372,7 +391,7 @@ static struct clk_fixed_factor axg_fclk_div4_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -385,7 +404,9 @@ static struct clk_regmap axg_fclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div4_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fclk_div4_div.hw
+ },
.num_parents = 1,
},
};
@@ -396,7 +417,7 @@ static struct clk_fixed_factor axg_fclk_div5_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -409,7 +430,9 @@ static struct clk_regmap axg_fclk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div5_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fclk_div5_div.hw
+ },
.num_parents = 1,
},
};
@@ -420,7 +443,9 @@ static struct clk_fixed_factor axg_fclk_div7_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -433,7 +458,9 @@ static struct clk_regmap axg_fclk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div7_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fclk_div7_div.hw
+ },
.num_parents = 1,
},
};
@@ -447,7 +474,9 @@ static struct clk_regmap axg_mpll_prediv = {
.hw.init = &(struct clk_init_data){
.name = "mpll_prediv",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -480,7 +509,9 @@ static struct clk_regmap axg_mpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -493,7 +524,9 @@ static struct clk_regmap axg_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -527,7 +560,9 @@ static struct clk_regmap axg_mpll1_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -540,7 +575,9 @@ static struct clk_regmap axg_mpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -579,7 +616,9 @@ static struct clk_regmap axg_mpll2_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -592,7 +631,9 @@ static struct clk_regmap axg_mpll2 = {
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -626,7 +667,9 @@ static struct clk_regmap axg_mpll3_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll3_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -639,7 +682,9 @@ static struct clk_regmap axg_mpll3 = {
.hw.init = &(struct clk_init_data){
.name = "mpll3",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll3_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpll3_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -702,7 +747,9 @@ static struct clk_regmap axg_pcie_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -717,7 +764,9 @@ static struct clk_regmap axg_pcie_pll_od = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_od",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "pcie_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_pcie_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -733,7 +782,9 @@ static struct clk_regmap axg_pcie_pll = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "pcie_pll_od" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_pcie_pll_od.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -750,7 +801,7 @@ static struct clk_regmap axg_pcie_mux = {
.hw.init = &(struct clk_init_data){
.name = "pcie_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "pcie_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_pcie_pll.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -767,7 +818,7 @@ static struct clk_regmap axg_pcie_ref = {
.hw.init = &(struct clk_init_data){
.name = "pcie_ref",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "pcie_mux" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_pcie_mux.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -781,7 +832,7 @@ static struct clk_regmap axg_pcie_cml_en0 = {
.hw.init = &(struct clk_init_data) {
.name = "pcie_cml_en0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "pcie_ref" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_pcie_ref.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -796,16 +847,21 @@ static struct clk_regmap axg_pcie_cml_en1 = {
.hw.init = &(struct clk_init_data) {
.name = "pcie_cml_en1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "pcie_ref" },
+ .parent_hws = (const struct clk_hw *[]) { &axg_pcie_ref.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const char * const clk81_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
- "fclk_div3", "fclk_div5"
+static const struct clk_parent_data clk81_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &axg_fclk_div7.hw },
+ { .hw = &axg_mpll1.hw },
+ { .hw = &axg_mpll2.hw },
+ { .hw = &axg_fclk_div4.hw },
+ { .hw = &axg_fclk_div3.hw },
+ { .hw = &axg_fclk_div5.hw },
};
static struct clk_regmap axg_mpeg_clk_sel = {
@@ -818,8 +874,8 @@ static struct clk_regmap axg_mpeg_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = clk81_parent_names,
- .num_parents = ARRAY_SIZE(clk81_parent_names),
+ .parent_data = clk81_parent_data,
+ .num_parents = ARRAY_SIZE(clk81_parent_data),
},
};
@@ -832,7 +888,9 @@ static struct clk_regmap axg_mpeg_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpeg_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -846,15 +904,20 @@ static struct clk_regmap axg_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_mpeg_clk_div.hw
+ },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const char * const axg_sd_emmc_clk0_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
-
+static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &axg_fclk_div2.hw },
+ { .hw = &axg_fclk_div3.hw },
+ { .hw = &axg_fclk_div5.hw },
+ { .hw = &axg_fclk_div7.hw },
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
@@ -873,8 +936,8 @@ static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = axg_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
+ .parent_data = axg_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -889,7 +952,9 @@ static struct clk_regmap axg_sd_emmc_b_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_sd_emmc_b_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -903,7 +968,9 @@ static struct clk_regmap axg_sd_emmc_b_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_sd_emmc_b_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -919,8 +986,8 @@ static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = axg_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
+ .parent_data = axg_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -935,7 +1002,9 @@ static struct clk_regmap axg_sd_emmc_c_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_sd_emmc_c_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -949,7 +1018,9 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_sd_emmc_c_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -957,9 +1028,18 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
-static const char * const gen_clk_parent_names[] = {
- IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3",
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+static const struct clk_parent_data gen_clk_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &axg_hifi_pll.hw },
+ { .hw = &axg_mpll0.hw },
+ { .hw = &axg_mpll1.hw },
+ { .hw = &axg_mpll2.hw },
+ { .hw = &axg_mpll3.hw },
+ { .hw = &axg_fclk_div4.hw },
+ { .hw = &axg_fclk_div3.hw },
+ { .hw = &axg_fclk_div5.hw },
+ { .hw = &axg_fclk_div7.hw },
+ { .hw = &axg_gp0_pll.hw },
};
static struct clk_regmap axg_gen_clk_sel = {
@@ -978,8 +1058,8 @@ static struct clk_regmap axg_gen_clk_sel = {
* hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_names = gen_clk_parent_names,
- .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ .parent_data = gen_clk_parent_data,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_data),
},
};
@@ -992,7 +1072,9 @@ static struct clk_regmap axg_gen_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "gen_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gen_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_gen_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1006,12 +1088,17 @@ static struct clk_regmap axg_gen_clk = {
.hw.init = &(struct clk_init_data){
.name = "gen_clk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "gen_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_gen_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
+#define MESON_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &axg_clk81.hw)
+
/* Everything Else (EE) domain gates */
static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
new file mode 100644
index 000000000000..36976927fe82
--- /dev/null
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+
+#include "clk-regmap.h"
+#include "clk-cpu-dyndiv.h"
+
+static inline struct meson_clk_cpu_dyndiv_data *
+meson_clk_cpu_dyndiv_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_cpu_dyndiv_data *)clk->data;
+}
+
+static unsigned long meson_clk_cpu_dyndiv_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_cpu_dyndiv_data *data = meson_clk_cpu_dyndiv_data(clk);
+
+ return divider_recalc_rate(hw, prate,
+ meson_parm_read(clk->map, &data->div),
+ NULL, 0, data->div.width);
+}
+
+static long meson_clk_cpu_dyndiv_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_cpu_dyndiv_data *data = meson_clk_cpu_dyndiv_data(clk);
+
+ return divider_round_rate(hw, rate, prate, NULL, data->div.width, 0);
+}
+
+static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_cpu_dyndiv_data *data = meson_clk_cpu_dyndiv_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = divider_get_val(rate, parent_rate, NULL, data->div.width, 0);
+ if (ret < 0)
+ return ret;
+
+ val = (unsigned int)ret << data->div.shift;
+
+ /* Write the SYS_CPU_DYN_ENABLE bit before changing the divider */
+ meson_parm_write(clk->map, &data->dyn, 1);
+
+ /* Update the divider while removing the SYS_CPU_DYN_ENABLE bit */
+ return regmap_update_bits(clk->map, data->div.reg_off,
+ SETPMASK(data->div.width, data->div.shift) |
+ SETPMASK(data->dyn.width, data->dyn.shift),
+ val);
+};
+
+const struct clk_ops meson_clk_cpu_dyndiv_ops = {
+ .recalc_rate = meson_clk_cpu_dyndiv_recalc_rate,
+ .round_rate = meson_clk_cpu_dyndiv_round_rate,
+ .set_rate = meson_clk_cpu_dyndiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(meson_clk_cpu_dyndiv_ops);
+
+MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.h b/drivers/clk/meson/clk-cpu-dyndiv.h
new file mode 100644
index 000000000000..f4908404792e
--- /dev/null
+++ b/drivers/clk/meson/clk-cpu-dyndiv.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_CLK_CPU_DYNDIV_H
+#define __MESON_CLK_CPU_DYNDIV_H
+
+#include <linux/clk-provider.h>
+#include "parm.h"
+
+struct meson_clk_cpu_dyndiv_data {
+ struct parm div;
+ struct parm dyn;
+};
+
+extern const struct clk_ops meson_clk_cpu_dyndiv_ops;
+
+#endif /* __MESON_CLK_CPU_DYNDIV_H */
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
deleted file mode 100644
index 086226e9dba6..000000000000
--- a/drivers/clk/meson/clk-input.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include "clk-input.h"
-
-static const struct clk_ops meson_clk_no_ops = {};
-
-struct clk_hw *meson_clk_hw_register_input(struct device *dev,
- const char *of_name,
- const char *clk_name,
- unsigned long flags)
-{
- struct clk *parent_clk = devm_clk_get(dev, of_name);
- struct clk_init_data init;
- const char *parent_name;
- struct clk_hw *hw;
- int ret;
-
- if (IS_ERR(parent_clk))
- return (struct clk_hw *)parent_clk;
-
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return ERR_PTR(-ENOMEM);
-
- parent_name = __clk_get_name(parent_clk);
- init.name = clk_name;
- init.ops = &meson_clk_no_ops;
- init.flags = flags;
- init.parent_names = &parent_name;
- init.num_parents = 1;
- hw->init = &init;
-
- ret = devm_clk_hw_register(dev, hw);
-
- return ret ? ERR_PTR(ret) : hw;
-}
-EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
-
-MODULE_DESCRIPTION("Amlogic clock input helper");
-MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h
deleted file mode 100644
index 4a541b9685a6..000000000000
--- a/drivers/clk/meson/clk-input.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __MESON_CLK_INPUT_H
-#define __MESON_CLK_INPUT_H
-
-#include <linux/clk-provider.h>
-
-struct device;
-
-struct clk_hw *meson_clk_hw_register_input(struct device *dev,
- const char *of_name,
- const char *clk_name,
- unsigned long flags);
-
-#endif /* __MESON_CLK_INPUT_H */
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index 1dd0abe3ba91..c4a39604cffd 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -111,7 +111,7 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
-#define __MESON_GATE(_name, _reg, _bit, _ops) \
+#define __MESON_PCLK(_name, _reg, _bit, _ops, _pname) \
struct clk_regmap _name = { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
@@ -120,15 +120,15 @@ struct clk_regmap _name = { \
.hw.init = &(struct clk_init_data) { \
.name = #_name, \
.ops = _ops, \
- .parent_names = (const char *[]){ "clk81" }, \
+ .parent_hws = (const struct clk_hw *[]) { _pname }, \
.num_parents = 1, \
.flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
}, \
}
-#define MESON_GATE(_name, _reg, _bit) \
- __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops)
+#define MESON_PCLK(_name, _reg, _bit, _pname) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pname)
-#define MESON_GATE_RO(_name, _reg, _bit) \
- __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops)
+#define MESON_PCLK_RO(_name, _reg, _bit, _pname) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pname)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 1994e735396b..62499563e4f5 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -18,8 +18,6 @@
#include "clk-regmap.h"
#include "clk-dualdiv.h"
-#define IN_PREFIX "ao-in-"
-
/*
* AO Configuration Clock registers offsets
* Register offsets from the data sheet must be multiplied by 4.
@@ -51,7 +49,9 @@ static struct clk_regmap g12a_aoclk_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "g12a_ao_" #_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
+ .parent_data = &(const struct clk_parent_data) { \
+ .fw_name = "mpeg-clk", \
+ }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -81,7 +81,9 @@ static struct clk_regmap g12a_aoclk_cts_oscin = {
.hw.init = &(struct clk_init_data){
.name = "cts_oscin",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -106,7 +108,9 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_32k_by_oscin_pre",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_oscin" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_cts_oscin.hw
+ },
.num_parents = 1,
},
};
@@ -143,7 +147,9 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_32k_by_oscin_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_32k_by_oscin_pre.hw
+ },
.num_parents = 1,
},
};
@@ -158,8 +164,10 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_32k_by_oscin_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div",
- "g12a_ao_32k_by_oscin_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_32k_by_oscin_div.hw,
+ &g12a_aoclk_32k_by_oscin_pre.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -173,7 +181,9 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_32k_by_oscin",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_32k_by_oscin_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -189,7 +199,9 @@ static struct clk_regmap g12a_aoclk_cec_pre = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_cec_pre",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_oscin" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_cts_oscin.hw
+ },
.num_parents = 1,
},
};
@@ -226,7 +238,9 @@ static struct clk_regmap g12a_aoclk_cec_div = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_cec_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_names = (const char *[]){ "g12a_ao_cec_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_cec_pre.hw
+ },
.num_parents = 1,
},
};
@@ -241,8 +255,10 @@ static struct clk_regmap g12a_aoclk_cec_sel = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_cec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "g12a_ao_cec_div",
- "g12a_ao_cec_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_cec_div.hw,
+ &g12a_aoclk_cec_pre.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -256,7 +272,9 @@ static struct clk_regmap g12a_aoclk_cec = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_cec",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "g12a_ao_cec_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_cec_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -272,8 +290,10 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin",
- IN_PREFIX "ext_32k-0" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .hw = &g12a_aoclk_32k_by_oscin.hw },
+ { .fw_name = "ext-32k-0", },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -289,8 +309,10 @@ static struct clk_regmap g12a_aoclk_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
- "g12a_ao_cts_rtc_oscin"},
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "mpeg-clk", },
+ { .hw = &g12a_aoclk_cts_rtc_oscin.hw },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -305,8 +327,10 @@ static struct clk_regmap g12a_aoclk_saradc_mux = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "g12a_ao_clk81" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_aoclk_clk81.hw },
+ },
.num_parents = 2,
},
};
@@ -320,7 +344,9 @@ static struct clk_regmap g12a_aoclk_saradc_div = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_saradc_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "g12a_ao_saradc_mux" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_saradc_mux.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -334,7 +360,9 @@ static struct clk_regmap g12a_aoclk_saradc_gate = {
.hw.init = &(struct clk_init_data){
.name = "g12a_ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "g12a_ao_saradc_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_aoclk_saradc_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -417,12 +445,6 @@ static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = {
.num = NR_CLKS,
};
-static const struct meson_aoclk_input g12a_aoclk_inputs[] = {
- { .name = "xtal", .required = true },
- { .name = "mpeg-clk", .required = true },
- { .name = "ext-32k-0", .required = false },
-};
-
static const struct meson_aoclk_data g12a_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(g12a_aoclk_reset),
@@ -430,9 +452,6 @@ static const struct meson_aoclk_data g12a_aoclkc_data = {
.num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
.clks = g12a_aoclk_regmap,
.hw_data = &g12a_aoclk_onecell_data,
- .inputs = g12a_aoclk_inputs,
- .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs),
- .input_prefix = IN_PREFIX,
};
static const struct of_device_id g12a_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index db1c4ed9d54e..c3f0ffc3280d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -14,11 +14,12 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
-#include "clk-input.h"
#include "clk-mpll.h"
#include "clk-pll.h"
#include "clk-regmap.h"
+#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
#include "meson-eeclk.h"
#include "g12a.h"
@@ -61,7 +62,9 @@ static struct clk_regmap g12a_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -76,7 +79,9 @@ static struct clk_regmap g12a_fixed_pll = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
.num_parents = 1,
/*
* This clock won't ever change at runtime so
@@ -85,16 +90,9 @@ static struct clk_regmap g12a_fixed_pll = {
},
};
-/*
- * Internal sys pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_sys_init_regs[] = {
- { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 },
- { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 },
- { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 },
+static const struct pll_mult_range g12a_sys_pll_mult_range = {
+ .min = 128,
+ .max = 250,
};
static struct clk_regmap g12a_sys_pll_dco = {
@@ -124,14 +122,17 @@ static struct clk_regmap g12a_sys_pll_dco = {
.shift = 29,
.width = 1,
},
- .init_regs = g12a_sys_init_regs,
- .init_count = ARRAY_SIZE(g12a_sys_init_regs),
+ .range = &g12a_sys_pll_mult_range,
},
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
+ /* This clock feeds the CPU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -144,9 +145,12 @@ static struct clk_regmap g12a_sys_pll = {
},
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "sys_pll_dco" },
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sys_pll_dco.hw
+ },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -177,12 +181,17 @@ static struct clk_regmap g12b_sys1_pll_dco = {
.shift = 29,
.width = 1,
},
+ .range = &g12a_sys_pll_mult_range,
},
.hw.init = &(struct clk_init_data){
.name = "sys1_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
+ /* This clock feeds the CPU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -195,9 +204,12 @@ static struct clk_regmap g12b_sys1_pll = {
},
.hw.init = &(struct clk_init_data){
.name = "sys1_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "sys1_pll_dco" },
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_sys1_pll_dco.hw
+ },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -209,7 +221,7 @@ static struct clk_regmap g12a_sys_pll_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "sys_pll_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "sys_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_sys_pll.hw },
.num_parents = 1,
/*
* This clock is used to debug the sys_pll range
@@ -226,7 +238,9 @@ static struct clk_regmap g12b_sys1_pll_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "sys1_pll_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "sys1_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_sys1_pll.hw
+ },
.num_parents = 1,
/*
* This clock is used to debug the sys_pll range
@@ -241,7 +255,9 @@ static struct clk_fixed_factor g12a_sys_pll_div16 = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_div16",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "sys_pll_div16_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sys_pll_div16_en.hw
+ },
.num_parents = 1,
},
};
@@ -252,11 +268,75 @@ static struct clk_fixed_factor g12b_sys1_pll_div16 = {
.hw.init = &(struct clk_init_data){
.name = "sys1_pll_div16",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "sys1_pll_div16_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_sys1_pll_div16_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2_div.hw
+ },
.num_parents = 1,
},
};
+static struct clk_fixed_factor g12a_fclk_div3_div = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 20,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div3_div.hw
+ },
+ .num_parents = 1,
+ /*
+ * This clock is used by the resident firmware and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) Mark the clock used by a firmware resource, if possible
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
/* Datasheet names this field as "premux0" */
static struct clk_regmap g12a_cpu_clk_premux0 = {
.data = &(struct clk_regmap_mux_data){
@@ -266,26 +346,61 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "fclk_div2",
- "fclk_div3" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
.num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_premux1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
+ .num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
static struct clk_regmap g12a_cpu_clk_mux0_div = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .shift = 4,
- .width = 6,
+ .data = &(struct meson_clk_cpu_dyndiv_data){
+ .div = {
+ .reg_off = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 4,
+ .width = 6,
+ },
+ .dyn = {
+ .reg_off = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 26,
+ .width = 1,
+ },
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0_div",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn0_sel" },
+ .ops = &meson_clk_cpu_dyndiv_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_premux0.hw
+ },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -298,27 +413,13 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn0",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn0_sel",
- "cpu_clk_dyn0_div" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_premux0.hw,
+ &g12a_cpu_clk_mux0_div.hw,
+ },
.num_parents = 2,
- },
-};
-
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12a_cpu_clk_premux1 = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .mask = 0x3,
- .shift = 16,
- },
- .hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "fclk_div2",
- "fclk_div3" },
- .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -332,7 +433,9 @@ static struct clk_regmap g12a_cpu_clk_mux1_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_premux1.hw
+ },
.num_parents = 1,
},
};
@@ -346,10 +449,14 @@ static struct clk_regmap g12a_cpu_clk_postmux1 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn1",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn1_sel",
- "cpu_clk_dyn1_div" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_premux1.hw,
+ &g12a_cpu_clk_mux1_div.hw,
+ },
.num_parents = 2,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -362,10 +469,13 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_dyn",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn0",
- "cpu_clk_dyn1" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_postmux0.hw,
+ &g12a_cpu_clk_postmux1.hw,
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -378,10 +488,13 @@ static struct clk_regmap g12a_cpu_clk = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn",
- "sys_pll" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_dyn.hw,
+ &g12a_sys_pll.hw,
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -394,10 +507,13 @@ static struct clk_regmap g12b_cpu_clk = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_dyn",
- "sys1_pll" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_dyn.hw,
+ &g12b_sys1_pll.hw
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -410,26 +526,38 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "fclk_div2",
- "fclk_div3" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
.num_parents = 3,
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
static struct clk_regmap g12b_cpub_clk_mux0_div = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_SYS_CPUB_CLK_CNTL,
- .shift = 4,
- .width = 6,
+ .data = &(struct meson_clk_cpu_dyndiv_data){
+ .div = {
+ .reg_off = HHI_SYS_CPUB_CLK_CNTL,
+ .shift = 4,
+ .width = 6,
+ },
+ .dyn = {
+ .reg_off = HHI_SYS_CPUB_CLK_CNTL,
+ .shift = 26,
+ .width = 1,
+ },
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0_div",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn0_sel" },
+ .ops = &meson_clk_cpu_dyndiv_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_premux0.hw
+ },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -442,10 +570,13 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn0",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn0_sel",
- "cpub_clk_dyn0_div" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_premux0.hw,
+ &g12b_cpub_clk_mux0_div.hw
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -458,11 +589,15 @@ static struct clk_regmap g12b_cpub_clk_premux1 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "fclk_div2",
- "fclk_div3" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
.num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -476,7 +611,9 @@ static struct clk_regmap g12b_cpub_clk_mux1_div = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_premux1.hw
+ },
.num_parents = 1,
},
};
@@ -490,10 +627,14 @@ static struct clk_regmap g12b_cpub_clk_postmux1 = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn1",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn1_sel",
- "cpub_clk_dyn1_div" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_premux1.hw,
+ &g12b_cpub_clk_mux1_div.hw
+ },
.num_parents = 2,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -506,10 +647,13 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_dyn",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn0",
- "cpub_clk_dyn1" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_postmux0.hw,
+ &g12b_cpub_clk_postmux1.hw
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -522,13 +666,227 @@ static struct clk_regmap g12b_cpub_clk = {
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_dyn",
- "sys_pll" },
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_dyn.hw,
+ &g12a_sys_pll.hw
+ },
.num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
},
};
+static int g12a_cpu_clk_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ if (event == POST_RATE_CHANGE || event == PRE_RATE_CHANGE) {
+ /* Wait for clock propagation before/after changing the mux */
+ udelay(100);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block g12a_cpu_clk_mux_nb = {
+ .notifier_call = g12a_cpu_clk_mux_notifier_cb,
+};
+
+struct g12a_cpu_clk_postmux_nb_data {
+ struct notifier_block nb;
+ struct clk_hw *xtal;
+ struct clk_hw *cpu_clk_dyn;
+ struct clk_hw *cpu_clk_postmux0;
+ struct clk_hw *cpu_clk_postmux1;
+ struct clk_hw *cpu_clk_premux1;
+};
+
+static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct g12a_cpu_clk_postmux_nb_data *nb_data =
+ container_of(nb, struct g12a_cpu_clk_postmux_nb_data, nb);
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /*
+ * This notifier means cpu_clk_postmux0 clock will be changed
+ * to feed cpu_clk, this is the current path :
+ * cpu_clk
+ * \- cpu_clk_dyn
+ * \- cpu_clk_postmux0
+ * \- cpu_clk_muxX_div
+ * \- cpu_clk_premux0
+ * \- fclk_div3 or fclk_div2
+ * OR
+ * \- cpu_clk_premux0
+ * \- fclk_div3 or fclk_div2
+ */
+
+ /* Setup cpu_clk_premux1 to xtal */
+ clk_hw_set_parent(nb_data->cpu_clk_premux1,
+ nb_data->xtal);
+
+ /* Setup cpu_clk_postmux1 to bypass divider */
+ clk_hw_set_parent(nb_data->cpu_clk_postmux1,
+ nb_data->cpu_clk_premux1);
+
+ /* Switch to parking clk on cpu_clk_postmux1 */
+ clk_hw_set_parent(nb_data->cpu_clk_dyn,
+ nb_data->cpu_clk_postmux1);
+
+ /*
+ * Now, cpu_clk is 24MHz in the current path :
+ * cpu_clk
+ * \- cpu_clk_dyn
+ * \- cpu_clk_postmux1
+ * \- cpu_clk_premux1
+ * \- xtal
+ */
+
+ udelay(100);
+
+ return NOTIFY_OK;
+
+ case POST_RATE_CHANGE:
+ /*
+ * The cpu_clk_postmux0 has ben updated, now switch back
+ * cpu_clk_dyn to cpu_clk_postmux0 and take the changes
+ * in account.
+ */
+
+ /* Configure cpu_clk_dyn back to cpu_clk_postmux0 */
+ clk_hw_set_parent(nb_data->cpu_clk_dyn,
+ nb_data->cpu_clk_postmux0);
+
+ /*
+ * new path :
+ * cpu_clk
+ * \- cpu_clk_dyn
+ * \- cpu_clk_postmux0
+ * \- cpu_clk_muxX_div
+ * \- cpu_clk_premux0
+ * \- fclk_div3 or fclk_div2
+ * OR
+ * \- cpu_clk_premux0
+ * \- fclk_div3 or fclk_div2
+ */
+
+ udelay(100);
+
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct g12a_cpu_clk_postmux_nb_data g12a_cpu_clk_postmux0_nb_data = {
+ .cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
+ .cpu_clk_postmux0 = &g12a_cpu_clk_postmux0.hw,
+ .cpu_clk_postmux1 = &g12a_cpu_clk_postmux1.hw,
+ .cpu_clk_premux1 = &g12a_cpu_clk_premux1.hw,
+ .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+};
+
+static struct g12a_cpu_clk_postmux_nb_data g12b_cpub_clk_postmux0_nb_data = {
+ .cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
+ .cpu_clk_postmux0 = &g12b_cpub_clk_postmux0.hw,
+ .cpu_clk_postmux1 = &g12b_cpub_clk_postmux1.hw,
+ .cpu_clk_premux1 = &g12b_cpub_clk_premux1.hw,
+ .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+};
+
+struct g12a_sys_pll_nb_data {
+ struct notifier_block nb;
+ struct clk_hw *sys_pll;
+ struct clk_hw *cpu_clk;
+ struct clk_hw *cpu_clk_dyn;
+};
+
+static int g12a_sys_pll_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct g12a_sys_pll_nb_data *nb_data =
+ container_of(nb, struct g12a_sys_pll_nb_data, nb);
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ /*
+ * This notifier means sys_pll clock will be changed
+ * to feed cpu_clk, this the current path :
+ * cpu_clk
+ * \- sys_pll
+ * \- sys_pll_dco
+ */
+
+ /* Configure cpu_clk to use cpu_clk_dyn */
+ clk_hw_set_parent(nb_data->cpu_clk,
+ nb_data->cpu_clk_dyn);
+
+ /*
+ * Now, cpu_clk uses the dyn path
+ * cpu_clk
+ * \- cpu_clk_dyn
+ * \- cpu_clk_dynX
+ * \- cpu_clk_dynX_sel
+ * \- cpu_clk_dynX_div
+ * \- xtal/fclk_div2/fclk_div3
+ * \- xtal/fclk_div2/fclk_div3
+ */
+
+ udelay(100);
+
+ return NOTIFY_OK;
+
+ case POST_RATE_CHANGE:
+ /*
+ * The sys_pll has ben updated, now switch back cpu_clk to
+ * sys_pll
+ */
+
+ /* Configure cpu_clk to use sys_pll */
+ clk_hw_set_parent(nb_data->cpu_clk,
+ nb_data->sys_pll);
+
+ udelay(100);
+
+ /* new path :
+ * cpu_clk
+ * \- sys_pll
+ * \- sys_pll_dco
+ */
+
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct g12a_sys_pll_nb_data g12a_sys_pll_nb_data = {
+ .sys_pll = &g12a_sys_pll.hw,
+ .cpu_clk = &g12a_cpu_clk.hw,
+ .cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
+ .nb.notifier_call = g12a_sys_pll_notifier_cb,
+};
+
+/* G12B first CPU cluster uses sys1_pll */
+static struct g12a_sys_pll_nb_data g12b_cpu_clk_sys1_pll_nb_data = {
+ .sys_pll = &g12b_sys1_pll.hw,
+ .cpu_clk = &g12b_cpu_clk.hw,
+ .cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
+ .nb.notifier_call = g12a_sys_pll_notifier_cb,
+};
+
+/* G12B second CPU cluster uses sys_pll */
+static struct g12a_sys_pll_nb_data g12b_cpub_clk_sys_pll_nb_data = {
+ .sys_pll = &g12a_sys_pll.hw,
+ .cpu_clk = &g12b_cpub_clk.hw,
+ .cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
+ .nb.notifier_call = g12a_sys_pll_notifier_cb,
+};
+
static struct clk_regmap g12a_cpu_clk_div16_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
@@ -537,7 +895,9 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk.hw
+ },
.num_parents = 1,
/*
* This clock is used to debug the cpu_clk range
@@ -554,7 +914,9 @@ static struct clk_regmap g12b_cpub_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpub_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
/*
* This clock is used to debug the cpu_clk range
@@ -569,7 +931,9 @@ static struct clk_fixed_factor g12a_cpu_clk_div16 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div16",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk_div16_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_div16_en.hw
+ },
.num_parents = 1,
},
};
@@ -580,7 +944,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div16 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div16",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk_div16_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_div16_en.hw
+ },
.num_parents = 1,
},
};
@@ -595,7 +961,7 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_apb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.num_parents = 1,
},
};
@@ -608,7 +974,9 @@ static struct clk_regmap g12a_cpu_clk_apb = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_apb",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_apb_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_apb_div.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -627,7 +995,7 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_atb_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.num_parents = 1,
},
};
@@ -640,7 +1008,9 @@ static struct clk_regmap g12a_cpu_clk_atb = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_atb",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_atb_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_atb_div.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -659,7 +1029,7 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_axi_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.num_parents = 1,
},
};
@@ -672,7 +1042,9 @@ static struct clk_regmap g12a_cpu_clk_axi = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_axi",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_axi_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_axi_div.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -691,7 +1063,17 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_trace_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * G12A and G12B have different cpu_clks (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so cpu_clk_trace_div picks
+ * up the appropriate one.
+ */
+ .name = "cpu_clk",
+ .index = -1,
+ },
.num_parents = 1,
},
};
@@ -704,7 +1086,9 @@ static struct clk_regmap g12a_cpu_clk_trace = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_trace",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpu_clk_trace_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cpu_clk_trace_div.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -719,7 +1103,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -730,7 +1116,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div3 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div3",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -741,7 +1129,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -752,7 +1142,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div5",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -763,7 +1155,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div6 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -774,7 +1168,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div7",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -785,7 +1181,9 @@ static struct clk_fixed_factor g12b_cpub_clk_div8 = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_div8",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpub_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk.hw
+ },
.num_parents = 1,
},
};
@@ -801,13 +1199,15 @@ static struct clk_regmap g12b_cpub_clk_apb_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_apb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_div2",
- "cpub_clk_div3",
- "cpub_clk_div4",
- "cpub_clk_div5",
- "cpub_clk_div6",
- "cpub_clk_div7",
- "cpub_clk_div8" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw
+ },
.num_parents = 7,
},
};
@@ -821,7 +1221,9 @@ static struct clk_regmap g12b_cpub_clk_apb = {
.hw.init = &(struct clk_init_data) {
.name = "cpub_clk_apb",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_apb_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_apb_sel.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -840,13 +1242,15 @@ static struct clk_regmap g12b_cpub_clk_atb_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_atb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_div2",
- "cpub_clk_div3",
- "cpub_clk_div4",
- "cpub_clk_div5",
- "cpub_clk_div6",
- "cpub_clk_div7",
- "cpub_clk_div8" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw
+ },
.num_parents = 7,
},
};
@@ -860,7 +1264,9 @@ static struct clk_regmap g12b_cpub_clk_atb = {
.hw.init = &(struct clk_init_data) {
.name = "cpub_clk_atb",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_atb_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_atb_sel.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -879,13 +1285,15 @@ static struct clk_regmap g12b_cpub_clk_axi_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_axi_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_div2",
- "cpub_clk_div3",
- "cpub_clk_div4",
- "cpub_clk_div5",
- "cpub_clk_div6",
- "cpub_clk_div7",
- "cpub_clk_div8" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw
+ },
.num_parents = 7,
},
};
@@ -899,7 +1307,9 @@ static struct clk_regmap g12b_cpub_clk_axi = {
.hw.init = &(struct clk_init_data) {
.name = "cpub_clk_axi",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_axi_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_axi_sel.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -918,13 +1328,15 @@ static struct clk_regmap g12b_cpub_clk_trace_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_trace_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_div2",
- "cpub_clk_div3",
- "cpub_clk_div4",
- "cpub_clk_div5",
- "cpub_clk_div6",
- "cpub_clk_div7",
- "cpub_clk_div8" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw
+ },
.num_parents = 7,
},
};
@@ -938,7 +1350,9 @@ static struct clk_regmap g12b_cpub_clk_trace = {
.hw.init = &(struct clk_init_data) {
.name = "cpub_clk_trace",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cpub_clk_trace_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_cpub_clk_trace_sel.hw
+ },
.num_parents = 1,
/*
* This clock is set by the ROM monitor code,
@@ -1003,7 +1417,9 @@ static struct clk_regmap g12a_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -1019,7 +1435,9 @@ static struct clk_regmap g12a_gp0_pll = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_gp0_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1077,7 +1495,9 @@ static struct clk_regmap g12a_hifi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -1093,7 +1513,9 @@ static struct clk_regmap g12a_hifi_pll = {
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "hifi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hifi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1164,7 +1586,9 @@ static struct clk_regmap g12a_pcie_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
.ops = &meson_clk_pcie_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -1175,7 +1599,9 @@ static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "pcie_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1193,7 +1619,9 @@ static struct clk_regmap g12a_pcie_pll_od = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_od",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "pcie_pll_dco_div2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco_div2.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1205,7 +1633,9 @@ static struct clk_fixed_factor g12a_pcie_pll = {
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_pll",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "pcie_pll_od" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_od.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1247,7 +1677,9 @@ static struct clk_regmap g12a_hdmi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
/*
* Display directly handle hdmi pll registers ATM, we need
@@ -1267,7 +1699,9 @@ static struct clk_regmap g12a_hdmi_pll_od = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -1283,7 +1717,9 @@ static struct clk_regmap g12a_hdmi_pll_od2 = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od2",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -1299,77 +1735,21 @@ static struct clk_regmap g12a_hdmi_pll = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od2.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
};
-static struct clk_fixed_factor g12a_fclk_div2_div = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2_div",
- .ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div2 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
- .ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div2_div" },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div3_div = {
- .mult = 1,
- .div = 3,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div3_div",
- .ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div3 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 20,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
- .ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div3_div" },
- .num_parents = 1,
- /*
- * This clock is used by the resident firmware and is required
- * by the platform to operate correctly.
- * Until the following condition are met, we need this clock to
- * be marked as critical:
- * a) Mark the clock used by a firmware resource, if possible
- * b) CCF has a clock hand-off mechanism to make the sure the
- * clock stays on until the proper driver comes along
- */
- .flags = CLK_IS_CRITICAL,
- },
-};
-
static struct clk_fixed_factor g12a_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
.name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -1382,7 +1762,9 @@ static struct clk_regmap g12a_fclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div4_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div4_div.hw
+ },
.num_parents = 1,
},
};
@@ -1393,7 +1775,7 @@ static struct clk_fixed_factor g12a_fclk_div5_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -1406,7 +1788,9 @@ static struct clk_regmap g12a_fclk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div5_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div5_div.hw
+ },
.num_parents = 1,
},
};
@@ -1417,7 +1801,7 @@ static struct clk_fixed_factor g12a_fclk_div7_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -1430,7 +1814,9 @@ static struct clk_regmap g12a_fclk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div7_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div7_div.hw
+ },
.num_parents = 1,
},
};
@@ -1441,7 +1827,9 @@ static struct clk_fixed_factor g12a_fclk_div2p5_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2p5_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
.num_parents = 1,
},
};
@@ -1454,7 +1842,9 @@ static struct clk_regmap g12a_fclk_div2p5 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2p5",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div2p5_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2p5_div.hw
+ },
.num_parents = 1,
},
};
@@ -1465,7 +1855,9 @@ static struct clk_fixed_factor g12a_mpll_50m_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll_50m_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
.num_parents = 1,
},
};
@@ -1479,8 +1871,10 @@ static struct clk_regmap g12a_mpll_50m = {
.hw.init = &(struct clk_init_data){
.name = "mpll_50m",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal",
- "mpll_50m_div" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_mpll_50m_div.hw },
+ },
.num_parents = 2,
},
};
@@ -1491,7 +1885,9 @@ static struct clk_fixed_factor g12a_mpll_prediv = {
.hw.init = &(struct clk_init_data){
.name = "mpll_prediv",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
.num_parents = 1,
},
};
@@ -1529,7 +1925,9 @@ static struct clk_regmap g12a_mpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -1542,7 +1940,7 @@ static struct clk_regmap g12a_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll0_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_mpll0_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1581,7 +1979,9 @@ static struct clk_regmap g12a_mpll1_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -1594,7 +1994,7 @@ static struct clk_regmap g12a_mpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll1_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_mpll1_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1633,7 +2033,9 @@ static struct clk_regmap g12a_mpll2_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -1646,7 +2048,7 @@ static struct clk_regmap g12a_mpll2 = {
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll2_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_mpll2_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1685,7 +2087,9 @@ static struct clk_regmap g12a_mpll3_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll3_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -1698,16 +2102,21 @@ static struct clk_regmap g12a_mpll3 = {
.hw.init = &(struct clk_init_data){
.name = "mpll3",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll3_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_mpll3_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const char * const clk81_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
- "fclk_div3", "fclk_div5"
+static const struct clk_parent_data clk81_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div7.hw },
+ { .hw = &g12a_mpll1.hw },
+ { .hw = &g12a_mpll2.hw },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div5.hw },
};
static struct clk_regmap g12a_mpeg_clk_sel = {
@@ -1720,8 +2129,8 @@ static struct clk_regmap g12a_mpeg_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = clk81_parent_names,
- .num_parents = ARRAY_SIZE(clk81_parent_names),
+ .parent_data = clk81_parent_data,
+ .num_parents = ARRAY_SIZE(clk81_parent_data),
},
};
@@ -1734,7 +2143,9 @@ static struct clk_regmap g12a_mpeg_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpeg_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1748,15 +2159,20 @@ static struct clk_regmap g12a_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mpeg_clk_div.hw
+ },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const char * const g12a_sd_emmc_clk0_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
-
+static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div5.hw },
+ { .hw = &g12a_fclk_div7.hw },
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
@@ -1775,8 +2191,8 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .parent_data = g12a_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1790,7 +2206,9 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_a_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1804,7 +2222,9 @@ static struct clk_regmap g12a_sd_emmc_a_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_a_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_a_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1820,8 +2240,8 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .parent_data = g12a_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1835,7 +2255,9 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_b_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1849,7 +2271,9 @@ static struct clk_regmap g12a_sd_emmc_b_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_b_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1865,8 +2289,8 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names),
+ .parent_data = g12a_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1880,7 +2304,9 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_c_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1894,17 +2320,89 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_sd_emmc_c_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
+/* Video Clocks */
+
+static struct clk_regmap g12a_vid_pll_div = {
+ .data = &(struct meson_vid_pll_div_data){
+ .val = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 0,
+ .width = 15,
+ },
+ .sel = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 16,
+ .width = 2,
+ },
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll_div",
+ .ops = &meson_vid_pll_div_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_hdmi_pll.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct clk_hw *g12a_vid_pll_parent_hws[] = {
+ &g12a_vid_pll_div.hw,
+ &g12a_hdmi_pll.hw,
+};
+
+static struct clk_regmap g12a_vid_pll_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 18 selects from 2 possible parents:
+ * vid_pll_div or hdmi_pll
+ */
+ .parent_hws = g12a_vid_pll_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_vid_pll = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vid_pll_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
/* VPU Clock */
-static const char * const g12a_vpu_parent_names[] = {
- "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
- "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
+static const struct clk_hw *g12a_vpu_parent_hws[] = {
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div4.hw,
+ &g12a_fclk_div5.hw,
+ &g12a_fclk_div7.hw,
+ &g12a_mpll1.hw,
+ &g12a_vid_pll.hw,
+ &g12a_hifi_pll.hw,
+ &g12a_gp0_pll.hw,
};
static struct clk_regmap g12a_vpu_0_sel = {
@@ -1916,8 +2414,8 @@ static struct clk_regmap g12a_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vpu_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .parent_hws = g12a_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1931,7 +2429,7 @@ static struct clk_regmap g12a_vpu_0_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vpu_0_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1945,7 +2443,7 @@ static struct clk_regmap g12a_vpu_0 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_0_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vpu_0_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1960,8 +2458,8 @@ static struct clk_regmap g12a_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vpu_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_names),
+ .parent_hws = g12a_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1975,7 +2473,7 @@ static struct clk_regmap g12a_vpu_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vpu_1_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1989,7 +2487,7 @@ static struct clk_regmap g12a_vpu_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_1_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vpu_1_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2008,7 +2506,10 @@ static struct clk_regmap g12a_vpu = {
* bit 31 selects from 2 possible parents:
* vpu_0 or vpu_1
*/
- .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vpu_0.hw,
+ &g12a_vpu_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2016,9 +2517,14 @@ static struct clk_regmap g12a_vpu = {
/* VDEC clocks */
-static const char * const g12a_vdec_parent_names[] = {
- "fclk_div2p5", "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
- "hifi_pll", "gp0_pll",
+static const struct clk_hw *g12a_vdec_parent_hws[] = {
+ &g12a_fclk_div2p5.hw,
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div4.hw,
+ &g12a_fclk_div5.hw,
+ &g12a_fclk_div7.hw,
+ &g12a_hifi_pll.hw,
+ &g12a_gp0_pll.hw,
};
static struct clk_regmap g12a_vdec_1_sel = {
@@ -2031,8 +2537,8 @@ static struct clk_regmap g12a_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vdec_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .parent_hws = g12a_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2047,7 +2553,9 @@ static struct clk_regmap g12a_vdec_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2061,7 +2569,9 @@ static struct clk_regmap g12a_vdec_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2077,8 +2587,8 @@ static struct clk_regmap g12a_vdec_hevcf_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vdec_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .parent_hws = g12a_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2093,7 +2603,9 @@ static struct clk_regmap g12a_vdec_hevcf_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_hevcf_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_hevcf_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2107,7 +2619,9 @@ static struct clk_regmap g12a_vdec_hevcf = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_hevcf",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_hevcf_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_hevcf_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2123,8 +2637,8 @@ static struct clk_regmap g12a_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vdec_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .parent_hws = g12a_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2139,7 +2653,9 @@ static struct clk_regmap g12a_vdec_hevc_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_hevc_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2153,7 +2669,9 @@ static struct clk_regmap g12a_vdec_hevc = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_hevc",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vdec_hevc_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2161,9 +2679,15 @@ static struct clk_regmap g12a_vdec_hevc = {
/* VAPB Clock */
-static const char * const g12a_vapb_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
- "mpll1", "vid_pll", "mpll2", "fclk_div2p5",
+static const struct clk_hw *g12a_vapb_parent_hws[] = {
+ &g12a_fclk_div4.hw,
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div5.hw,
+ &g12a_fclk_div7.hw,
+ &g12a_mpll1.hw,
+ &g12a_vid_pll.hw,
+ &g12a_mpll2.hw,
+ &g12a_fclk_div2p5.hw,
};
static struct clk_regmap g12a_vapb_0_sel = {
@@ -2175,8 +2699,8 @@ static struct clk_regmap g12a_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vapb_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .parent_hws = g12a_vapb_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2190,7 +2714,9 @@ static struct clk_regmap g12a_vapb_0_div = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vapb_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vapb_0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2204,7 +2730,9 @@ static struct clk_regmap g12a_vapb_0 = {
.hw.init = &(struct clk_init_data) {
.name = "vapb_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vapb_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2219,8 +2747,8 @@ static struct clk_regmap g12a_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vapb_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_names),
+ .parent_hws = g12a_vapb_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2234,7 +2762,9 @@ static struct clk_regmap g12a_vapb_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vapb_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vapb_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2248,7 +2778,9 @@ static struct clk_regmap g12a_vapb_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vapb_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vapb_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2267,7 +2799,10 @@ static struct clk_regmap g12a_vapb_sel = {
* bit 31 selects from 2 possible parents:
* vapb_0 or vapb_1
*/
- .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vapb_0.hw,
+ &g12a_vapb_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2281,75 +2816,21 @@ static struct clk_regmap g12a_vapb = {
.hw.init = &(struct clk_init_data) {
.name = "vapb",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vapb_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-/* Video Clocks */
-
-static struct clk_regmap g12a_vid_pll_div = {
- .data = &(struct meson_vid_pll_div_data){
- .val = {
- .reg_off = HHI_VID_PLL_CLK_DIV,
- .shift = 0,
- .width = 15,
- },
- .sel = {
- .reg_off = HHI_VID_PLL_CLK_DIV,
- .shift = 16,
- .width = 2,
- },
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vid_pll_div",
- .ops = &meson_vid_pll_div_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div",
- "hdmi_pll" };
-
-static struct clk_regmap g12a_vid_pll_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_VID_PLL_CLK_DIV,
- .mask = 0x1,
- .shift = 18,
- },
- .hw.init = &(struct clk_init_data){
- .name = "vid_pll_sel",
- .ops = &clk_regmap_mux_ops,
- /*
- * bit 18 selects from 2 possible parents:
- * vid_pll_div or hdmi_pll
- */
- .parent_names = g12a_vid_pll_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-static struct clk_regmap g12a_vid_pll = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_PLL_CLK_DIV,
- .bit_idx = 19,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vid_pll",
- .ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vid_pll_sel" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
- },
-};
-
-static const char * const g12a_vclk_parent_names[] = {
- "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4",
- "fclk_div5", "fclk_div7"
+static const struct clk_hw *g12a_vclk_parent_hws[] = {
+ &g12a_vid_pll.hw,
+ &g12a_gp0_pll.hw,
+ &g12a_hifi_pll.hw,
+ &g12a_mpll1.hw,
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div4.hw,
+ &g12a_fclk_div5.hw,
+ &g12a_fclk_div7.hw,
};
static struct clk_regmap g12a_vclk_sel = {
@@ -2361,8 +2842,8 @@ static struct clk_regmap g12a_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vclk_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .parent_hws = g12a_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2376,8 +2857,8 @@ static struct clk_regmap g12a_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_vclk_parent_names,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_names),
+ .parent_hws = g12a_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2390,7 +2871,7 @@ static struct clk_regmap g12a_vclk_input = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_input",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2404,7 +2885,7 @@ static struct clk_regmap g12a_vclk2_input = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_input",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2419,7 +2900,9 @@ static struct clk_regmap g12a_vclk_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vclk_input" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk_input.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -2434,7 +2917,9 @@ static struct clk_regmap g12a_vclk2_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vclk2_input" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk2_input.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -2448,7 +2933,7 @@ static struct clk_regmap g12a_vclk = {
.hw.init = &(struct clk_init_data) {
.name = "vclk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2462,7 +2947,7 @@ static struct clk_regmap g12a_vclk2 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2476,7 +2961,7 @@ static struct clk_regmap g12a_vclk_div1 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2490,7 +2975,7 @@ static struct clk_regmap g12a_vclk_div2_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div2_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2504,7 +2989,7 @@ static struct clk_regmap g12a_vclk_div4_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div4_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2518,7 +3003,7 @@ static struct clk_regmap g12a_vclk_div6_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div6_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2532,7 +3017,7 @@ static struct clk_regmap g12a_vclk_div12_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div12_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2546,7 +3031,7 @@ static struct clk_regmap g12a_vclk2_div1 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2560,7 +3045,7 @@ static struct clk_regmap g12a_vclk2_div2_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div2_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2574,7 +3059,7 @@ static struct clk_regmap g12a_vclk2_div4_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div4_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2588,7 +3073,7 @@ static struct clk_regmap g12a_vclk2_div6_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div6_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2602,7 +3087,7 @@ static struct clk_regmap g12a_vclk2_div12_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div12_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2614,7 +3099,9 @@ static struct clk_fixed_factor g12a_vclk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div2_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk_div2_en.hw
+ },
.num_parents = 1,
},
};
@@ -2625,7 +3112,9 @@ static struct clk_fixed_factor g12a_vclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div4_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk_div4_en.hw
+ },
.num_parents = 1,
},
};
@@ -2636,7 +3125,9 @@ static struct clk_fixed_factor g12a_vclk_div6 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div6_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk_div6_en.hw
+ },
.num_parents = 1,
},
};
@@ -2647,7 +3138,9 @@ static struct clk_fixed_factor g12a_vclk_div12 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div12_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk_div12_en.hw
+ },
.num_parents = 1,
},
};
@@ -2658,7 +3151,9 @@ static struct clk_fixed_factor g12a_vclk2_div2 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div2_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk2_div2_en.hw
+ },
.num_parents = 1,
},
};
@@ -2669,7 +3164,9 @@ static struct clk_fixed_factor g12a_vclk2_div4 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div4_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk2_div4_en.hw
+ },
.num_parents = 1,
},
};
@@ -2680,7 +3177,9 @@ static struct clk_fixed_factor g12a_vclk2_div6 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div6_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk2_div6_en.hw
+ },
.num_parents = 1,
},
};
@@ -2691,16 +3190,25 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div12_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_vclk2_div12_en.hw
+ },
.num_parents = 1,
},
};
static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const char * const g12a_cts_parent_names[] = {
- "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
- "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
- "vclk2_div6", "vclk2_div12"
+static const struct clk_hw *g12a_cts_parent_hws[] = {
+ &g12a_vclk_div1.hw,
+ &g12a_vclk_div2.hw,
+ &g12a_vclk_div4.hw,
+ &g12a_vclk_div6.hw,
+ &g12a_vclk_div12.hw,
+ &g12a_vclk2_div1.hw,
+ &g12a_vclk2_div2.hw,
+ &g12a_vclk2_div4.hw,
+ &g12a_vclk2_div6.hw,
+ &g12a_vclk2_div12.hw,
};
static struct clk_regmap g12a_cts_enci_sel = {
@@ -2713,8 +3221,8 @@ static struct clk_regmap g12a_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_cts_parent_names,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .parent_hws = g12a_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2729,8 +3237,8 @@ static struct clk_regmap g12a_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_cts_parent_names,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .parent_hws = g12a_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2745,18 +3253,25 @@ static struct clk_regmap g12a_cts_vdac_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_cts_parent_names,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_names),
+ .parent_hws = g12a_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const char * const g12a_cts_hdmi_tx_parent_names[] = {
- "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
- "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
- "vclk2_div6", "vclk2_div12"
+static const struct clk_hw *g12a_cts_hdmi_tx_parent_hws[] = {
+ &g12a_vclk_div1.hw,
+ &g12a_vclk_div2.hw,
+ &g12a_vclk_div4.hw,
+ &g12a_vclk_div6.hw,
+ &g12a_vclk_div12.hw,
+ &g12a_vclk2_div1.hw,
+ &g12a_vclk2_div2.hw,
+ &g12a_vclk2_div4.hw,
+ &g12a_vclk2_div6.hw,
+ &g12a_vclk2_div12.hw,
};
static struct clk_regmap g12a_hdmi_tx_sel = {
@@ -2769,8 +3284,8 @@ static struct clk_regmap g12a_hdmi_tx_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_cts_hdmi_tx_parent_names,
- .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names),
+ .parent_hws = g12a_cts_hdmi_tx_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2783,7 +3298,9 @@ static struct clk_regmap g12a_cts_enci = {
.hw.init = &(struct clk_init_data) {
.name = "cts_enci",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_enci_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cts_enci_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2797,7 +3314,9 @@ static struct clk_regmap g12a_cts_encp = {
.hw.init = &(struct clk_init_data) {
.name = "cts_encp",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_encp_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cts_encp_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2811,7 +3330,9 @@ static struct clk_regmap g12a_cts_vdac = {
.hw.init = &(struct clk_init_data) {
.name = "cts_vdac",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_vdac_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cts_vdac_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2825,7 +3346,9 @@ static struct clk_regmap g12a_hdmi_tx = {
.hw.init = &(struct clk_init_data) {
.name = "hdmi_tx",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "hdmi_tx_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_tx_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2833,8 +3356,11 @@ static struct clk_regmap g12a_hdmi_tx = {
/* HDMI Clocks */
-static const char * const g12a_hdmi_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+static const struct clk_parent_data g12a_hdmi_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div5.hw },
};
static struct clk_regmap g12a_hdmi_sel = {
@@ -2847,8 +3373,8 @@ static struct clk_regmap g12a_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_hdmi_parent_names,
- .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names),
+ .parent_data = g12a_hdmi_parent_data,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parent_data),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2862,7 +3388,7 @@ static struct clk_regmap g12a_hdmi_div = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "hdmi_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_hdmi_sel.hw },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -2876,7 +3402,7 @@ static struct clk_regmap g12a_hdmi = {
.hw.init = &(struct clk_init_data) {
.name = "hdmi",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "hdmi_div" },
+ .parent_hws = (const struct clk_hw *[]) { &g12a_hdmi_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2886,10 +3412,15 @@ static struct clk_regmap g12a_hdmi = {
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
* muxed by a glitch-free switch.
*/
-
-static const char * const g12a_mali_0_1_parent_names[] = {
- IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5",
- "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7"
+static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_gp0_pll.hw },
+ { .hw = &g12a_hifi_pll.hw },
+ { .hw = &g12a_fclk_div2p5.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div5.hw },
+ { .hw = &g12a_fclk_div7.hw },
};
static struct clk_regmap g12a_mali_0_sel = {
@@ -2901,7 +3432,7 @@ static struct clk_regmap g12a_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_mali_0_1_parent_names,
+ .parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2916,7 +3447,9 @@ static struct clk_regmap g12a_mali_0_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2930,7 +3463,9 @@ static struct clk_regmap g12a_mali_0 = {
.hw.init = &(struct clk_init_data){
.name = "mali_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2945,7 +3480,7 @@ static struct clk_regmap g12a_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_mali_0_1_parent_names,
+ .parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2960,7 +3495,9 @@ static struct clk_regmap g12a_mali_1_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -2974,14 +3511,17 @@ static struct clk_regmap g12a_mali_1 = {
.hw.init = &(struct clk_init_data){
.name = "mali_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const g12a_mali_parent_names[] = {
- "mali_0", "mali_1"
+static const struct clk_hw *g12a_mali_parent_hws[] = {
+ &g12a_mali_0.hw,
+ &g12a_mali_1.hw,
};
static struct clk_regmap g12a_mali = {
@@ -2993,7 +3533,7 @@ static struct clk_regmap g12a_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_names = g12a_mali_parent_names,
+ .parent_hws = g12a_mali_parent_hws,
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -3008,7 +3548,9 @@ static struct clk_regmap g12a_ts_div = {
.hw.init = &(struct clk_init_data){
.name = "ts_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -3021,11 +3563,19 @@ static struct clk_regmap g12a_ts = {
.hw.init = &(struct clk_init_data){
.name = "ts",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "ts_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_ts_div.hw
+ },
.num_parents = 1,
},
};
+#define MESON_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
+
+#define MESON_GATE_RO(_name, _reg, _bit) \
+ MESON_PCLK_RO(_name, _reg, _bit, &g12a_clk81.hw)
+
/* Everything Else (EE) domain gates */
static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
@@ -3792,28 +4342,210 @@ static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
-static const struct meson_eeclkc_data g12a_clkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
- .hw_onecell_data = &g12a_hw_onecell_data,
- .init_regs = g12a_init_regs,
- .init_count = ARRAY_SIZE(g12a_init_regs),
-};
-
-static const struct meson_eeclkc_data g12b_clkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
- .hw_onecell_data = &g12b_hw_onecell_data
+static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
+ struct clk_hw **hws)
+{
+ const char *notifier_clk_name;
+ struct clk *notifier_clk;
+ struct clk_hw *xtal;
+ int ret;
+
+ xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
+
+ /* Setup clock notifier for cpu_clk_postmux0 */
+ g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
+ notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_postmux0.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk,
+ &g12a_cpu_clk_postmux0_nb_data.nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for cpu_clk_dyn mux */
+ notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_dyn.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpu_clk_dyn notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int meson_g12b_dvfs_setup(struct platform_device *pdev)
+{
+ struct clk_hw **hws = g12b_hw_onecell_data.hws;
+ const char *notifier_clk_name;
+ struct clk *notifier_clk;
+ struct clk_hw *xtal;
+ int ret;
+
+ ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ if (ret)
+ return ret;
+
+ xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
+
+ /* Setup clock notifier for cpu_clk mux */
+ notifier_clk_name = clk_hw_get_name(&g12b_cpu_clk.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for sys1_pll */
+ notifier_clk_name = clk_hw_get_name(&g12b_sys1_pll.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk,
+ &g12b_cpu_clk_sys1_pll_nb_data.nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the sys1_pll notifier\n");
+ return ret;
+ }
+
+ /* Add notifiers for the second CPU cluster */
+
+ /* Setup clock notifier for cpub_clk_postmux0 */
+ g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
+ notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_postmux0.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk,
+ &g12b_cpub_clk_postmux0_nb_data.nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for cpub_clk_dyn mux */
+ notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_dyn.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpub_clk_dyn notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for cpub_clk mux */
+ notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpub_clk notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for sys_pll */
+ notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk,
+ &g12b_cpub_clk_sys_pll_nb_data.nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int meson_g12a_dvfs_setup(struct platform_device *pdev)
+{
+ struct clk_hw **hws = g12a_hw_onecell_data.hws;
+ const char *notifier_clk_name;
+ struct clk *notifier_clk;
+ int ret;
+
+ ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ if (ret)
+ return ret;
+
+ /* Setup clock notifier for cpu_clk mux */
+ notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ return ret;
+ }
+
+ /* Setup clock notifier for sys_pll */
+ notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &g12a_sys_pll_nb_data.nb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+struct meson_g12a_data {
+ const struct meson_eeclkc_data eeclkc_data;
+ int (*dvfs_setup)(struct platform_device *pdev);
+};
+
+static int meson_g12a_probe(struct platform_device *pdev)
+{
+ const struct meson_eeclkc_data *eeclkc_data;
+ const struct meson_g12a_data *g12a_data;
+ int ret;
+
+ eeclkc_data = of_device_get_match_data(&pdev->dev);
+ if (!eeclkc_data)
+ return -EINVAL;
+
+ ret = meson_eeclkc_probe(pdev);
+ if (ret)
+ return ret;
+
+ g12a_data = container_of(eeclkc_data, struct meson_g12a_data,
+ eeclkc_data);
+
+ if (g12a_data->dvfs_setup)
+ return g12a_data->dvfs_setup(pdev);
+
+ return 0;
+}
+
+static const struct meson_g12a_data g12a_clkc_data = {
+ .eeclkc_data = {
+ .regmap_clks = g12a_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+ .hw_onecell_data = &g12a_hw_onecell_data,
+ .init_regs = g12a_init_regs,
+ .init_count = ARRAY_SIZE(g12a_init_regs),
+ },
+ .dvfs_setup = meson_g12a_dvfs_setup,
+};
+
+static const struct meson_g12a_data g12b_clkc_data = {
+ .eeclkc_data = {
+ .regmap_clks = g12a_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+ .hw_onecell_data = &g12b_hw_onecell_data,
+ },
+ .dvfs_setup = meson_g12b_dvfs_setup,
};
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data },
- { .compatible = "amlogic,g12b-clkc", .data = &g12b_clkc_data },
+ {
+ .compatible = "amlogic,g12a-clkc",
+ .data = &g12a_clkc_data.eeclkc_data
+ },
+ {
+ .compatible = "amlogic,g12b-clkc",
+ .data = &g12b_clkc_data.eeclkc_data
+ },
{}
};
static struct platform_driver g12a_driver = {
- .probe = meson_eeclkc_probe,
+ .probe = meson_g12a_probe,
.driver = {
.name = "g12a-clkc",
.of_match_table = clkc_match_table,
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index c8aed31fbe17..559a34cfdfeb 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -216,7 +216,6 @@
#define CLKID_CPUB_CLK_DYN1_DIV 221
#define CLKID_CPUB_CLK_DYN1 222
#define CLKID_CPUB_CLK_DYN 223
-#define CLKID_CPUB_CLK 224
#define CLKID_CPUB_CLK_DIV16_EN 225
#define CLKID_CPUB_CLK_DIV16 226
#define CLKID_CPUB_CLK_DIV2 227
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 449f6ac189d8..e940861a396b 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -11,8 +11,6 @@
#include "clk-regmap.h"
#include "clk-dualdiv.h"
-#define IN_PREFIX "ao-in-"
-
/* AO Configuration Clock registers offsets */
#define AO_RTI_PWR_CNTL_REG1 0x0c
#define AO_RTI_PWR_CNTL_REG0 0x10
@@ -31,7 +29,9 @@ static struct clk_regmap _name##_ao = { \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
.ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \
+ .parent_data = &(const struct clk_parent_data) { \
+ .fw_name = "mpeg-clk", \
+ }, \
.num_parents = 1, \
.flags = CLK_IGNORE_UNUSED, \
}, \
@@ -52,7 +52,9 @@ static struct clk_regmap ao_cts_oscin = {
.hw.init = &(struct clk_init_data){
.name = "ao_cts_oscin",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -65,7 +67,7 @@ static struct clk_regmap ao_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "ao_cts_oscin" },
+ .parent_hws = (const struct clk_hw *[]) { &ao_cts_oscin.hw },
.num_parents = 1,
},
};
@@ -112,7 +114,7 @@ static struct clk_regmap ao_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_names = (const char *[]){ "ao_32k_pre" },
+ .parent_hws = (const struct clk_hw *[]) { &ao_32k_pre.hw },
.num_parents = 1,
},
};
@@ -127,8 +129,10 @@ static struct clk_regmap ao_32k_sel = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "ao_32k_div",
- "ao_32k_pre" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &ao_32k_div.hw,
+ &ao_32k_pre.hw
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -142,7 +146,7 @@ static struct clk_regmap ao_32k = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "ao_32k_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &ao_32k_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -159,10 +163,12 @@ static struct clk_regmap ao_cts_rtc_oscin = {
.hw.init = &(struct clk_init_data){
.name = "ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0",
- IN_PREFIX "ext-32k-1",
- IN_PREFIX "ext-32k-2",
- "ao_32k" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "ext-32k-0", },
+ { .fw_name = "ext-32k-1", },
+ { .fw_name = "ext-32k-2", },
+ { .hw = &ao_32k.hw },
+ },
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
@@ -178,8 +184,10 @@ static struct clk_regmap ao_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk",
- "ao_cts_rtc_oscin" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "mpeg-clk", },
+ { .hw = &ao_cts_rtc_oscin.hw },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -208,8 +216,10 @@ static struct clk_regmap ao_cts_cec = {
* Until CCF gets fixed, adding this fake parent that won't
* ever be registered should work around the problem
*/
- .parent_names = (const char *[]){ "fixme",
- "ao_cts_rtc_oscin" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .name = "fixme", .index = -1, },
+ { .hw = &ao_cts_rtc_oscin.hw },
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -261,14 +271,6 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = {
.num = NR_CLKS,
};
-static const struct meson_aoclk_input gxbb_aoclk_inputs[] = {
- { .name = "xtal", .required = true, },
- { .name = "mpeg-clk", .required = true, },
- {. name = "ext-32k-0", .required = false, },
- {. name = "ext-32k-1", .required = false, },
- {. name = "ext-32k-2", .required = false, },
-};
-
static const struct meson_aoclk_data gxbb_aoclkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
.num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
@@ -276,9 +278,6 @@ static const struct meson_aoclk_data gxbb_aoclkc_data = {
.num_clks = ARRAY_SIZE(gxbb_aoclk),
.clks = gxbb_aoclk,
.hw_data = &gxbb_aoclk_onecell_data,
- .inputs = gxbb_aoclk_inputs,
- .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs),
- .input_prefix = IN_PREFIX,
};
static const struct of_device_id gxbb_aoclkc_match_table[] = {
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index dab16d9b1af8..7cfb998eeb3e 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -10,15 +10,12 @@
#include <linux/platform_device.h>
#include "gxbb.h"
-#include "clk-input.h"
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
#include "meson-eeclk.h"
#include "vid-pll-div.h"
-#define IN_PREFIX "ee-in-"
-
static DEFINE_SPINLOCK(meson_clk_lock);
static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
@@ -121,7 +118,9 @@ static struct clk_regmap gxbb_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -136,7 +135,9 @@ static struct clk_regmap gxbb_fixed_pll = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fixed_pll_dco.hw
+ },
.num_parents = 1,
/*
* This clock won't ever change at runtime so
@@ -151,7 +152,9 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_pre_mult",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -192,7 +195,9 @@ static struct clk_regmap gxbb_hdmi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_pre_mult" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_hdmi_pll_pre_mult.hw
+ },
.num_parents = 1,
/*
* Display directly handle hdmi pll registers ATM, we need
@@ -244,7 +249,9 @@ static struct clk_regmap gxl_hdmi_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
/*
* Display directly handle hdmi pll registers ATM, we need
@@ -264,7 +271,9 @@ static struct clk_regmap gxbb_hdmi_pll_od = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -280,7 +289,9 @@ static struct clk_regmap gxbb_hdmi_pll_od2 = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od2",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_hdmi_pll_od.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -296,7 +307,9 @@ static struct clk_regmap gxbb_hdmi_pll = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_hdmi_pll_od2.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -312,7 +325,9 @@ static struct clk_regmap gxl_hdmi_pll_od = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxl_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -328,7 +343,9 @@ static struct clk_regmap gxl_hdmi_pll_od2 = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_od2",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxl_hdmi_pll_od.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -344,7 +361,9 @@ static struct clk_regmap gxl_hdmi_pll = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_od2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxl_hdmi_pll_od2.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -381,7 +400,9 @@ static struct clk_regmap gxbb_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -396,7 +417,9 @@ static struct clk_regmap gxbb_sys_pll = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "sys_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sys_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -442,7 +465,9 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -494,7 +519,9 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
.num_parents = 1,
},
};
@@ -509,7 +536,17 @@ static struct clk_regmap gxbb_gp0_pll = {
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gp0_pll_dco" },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * GXL and GXBB have different gp0_pll_dco (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so gp0_pll picks up the
+ * appropriate one.
+ */
+ .name = "gp0_pll_dco",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -521,7 +558,9 @@ static struct clk_fixed_factor gxbb_fclk_div2_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -534,7 +573,9 @@ static struct clk_regmap gxbb_fclk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fclk_div2_div.hw
+ },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
},
@@ -546,7 +587,7 @@ static struct clk_fixed_factor gxbb_fclk_div3_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -559,7 +600,9 @@ static struct clk_regmap gxbb_fclk_div3 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div3_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fclk_div3_div.hw
+ },
.num_parents = 1,
/*
* FIXME:
@@ -582,7 +625,7 @@ static struct clk_fixed_factor gxbb_fclk_div4_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -595,7 +638,9 @@ static struct clk_regmap gxbb_fclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div4_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fclk_div4_div.hw
+ },
.num_parents = 1,
},
};
@@ -606,7 +651,7 @@ static struct clk_fixed_factor gxbb_fclk_div5_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -619,7 +664,9 @@ static struct clk_regmap gxbb_fclk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div5_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fclk_div5_div.hw
+ },
.num_parents = 1,
},
};
@@ -630,7 +677,7 @@ static struct clk_fixed_factor gxbb_fclk_div7_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -643,7 +690,9 @@ static struct clk_regmap gxbb_fclk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div7_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_fclk_div7_div.hw
+ },
.num_parents = 1,
},
};
@@ -657,7 +706,7 @@ static struct clk_regmap gxbb_mpll_prediv = {
.hw.init = &(struct clk_init_data){
.name = "mpll_prediv",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_fixed_pll.hw },
.num_parents = 1,
},
};
@@ -684,7 +733,9 @@ static struct clk_regmap gxbb_mpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -697,7 +748,7 @@ static struct clk_regmap gxbb_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll0_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -725,7 +776,9 @@ static struct clk_regmap gxbb_mpll1_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -738,7 +791,7 @@ static struct clk_regmap gxbb_mpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll1_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll1_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -766,7 +819,9 @@ static struct clk_regmap gxbb_mpll2_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -779,16 +834,21 @@ static struct clk_regmap gxbb_mpll2 = {
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll2_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll2_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const char * const clk81_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
- "fclk_div3", "fclk_div5"
+static const struct clk_parent_data clk81_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_fclk_div7.hw },
+ { .hw = &gxbb_mpll1.hw },
+ { .hw = &gxbb_mpll2.hw },
+ { .hw = &gxbb_fclk_div4.hw },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
};
static struct clk_regmap gxbb_mpeg_clk_sel = {
@@ -806,8 +866,8 @@ static struct clk_regmap gxbb_mpeg_clk_sel = {
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_names = clk81_parent_names,
- .num_parents = ARRAY_SIZE(clk81_parent_names),
+ .parent_data = clk81_parent_data,
+ .num_parents = ARRAY_SIZE(clk81_parent_data),
},
};
@@ -820,7 +880,9 @@ static struct clk_regmap gxbb_mpeg_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpeg_clk_sel.hw
+ },
.num_parents = 1,
},
};
@@ -834,7 +896,9 @@ static struct clk_regmap gxbb_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpeg_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
},
@@ -850,7 +914,10 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = {
.name = "sar_adc_clk_sel",
.ops = &clk_regmap_mux_ops,
/* NOTE: The datasheet doesn't list the parents for bit 10 */
- .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_clk81.hw },
+ },
.num_parents = 2,
},
};
@@ -864,7 +931,9 @@ static struct clk_regmap gxbb_sar_adc_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sar_adc_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sar_adc_clk_sel.hw
+ },
.num_parents = 1,
},
};
@@ -877,7 +946,9 @@ static struct clk_regmap gxbb_sar_adc_clk = {
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sar_adc_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sar_adc_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -888,9 +959,15 @@ static struct clk_regmap gxbb_sar_adc_clk = {
* muxed by a glitch-free switch.
*/
-static const char * const gxbb_mali_0_1_parent_names[] = {
- IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
- "fclk_div4", "fclk_div3", "fclk_div5"
+static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_gp0_pll.hw },
+ { .hw = &gxbb_mpll2.hw },
+ { .hw = &gxbb_mpll1.hw },
+ { .hw = &gxbb_fclk_div7.hw },
+ { .hw = &gxbb_fclk_div4.hw },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
};
static struct clk_regmap gxbb_mali_0_sel = {
@@ -907,7 +984,7 @@ static struct clk_regmap gxbb_mali_0_sel = {
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_names = gxbb_mali_0_1_parent_names,
+ .parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -922,7 +999,9 @@ static struct clk_regmap gxbb_mali_0_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -936,7 +1015,9 @@ static struct clk_regmap gxbb_mali_0 = {
.hw.init = &(struct clk_init_data){
.name = "mali_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -956,7 +1037,7 @@ static struct clk_regmap gxbb_mali_1_sel = {
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_names = gxbb_mali_0_1_parent_names,
+ .parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -971,7 +1052,9 @@ static struct clk_regmap gxbb_mali_1_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -985,14 +1068,17 @@ static struct clk_regmap gxbb_mali_1 = {
.hw.init = &(struct clk_init_data){
.name = "mali_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const gxbb_mali_parent_names[] = {
- "mali_0", "mali_1"
+static const struct clk_hw *gxbb_mali_parent_hws[] = {
+ &gxbb_mali_0.hw,
+ &gxbb_mali_1.hw,
};
static struct clk_regmap gxbb_mali = {
@@ -1004,7 +1090,7 @@ static struct clk_regmap gxbb_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_mali_parent_names,
+ .parent_hws = gxbb_mali_parent_hws,
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1021,7 +1107,11 @@ static struct clk_regmap gxbb_cts_amclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+ },
.num_parents = 3,
},
};
@@ -1036,7 +1126,9 @@ static struct clk_regmap gxbb_cts_amclk_div = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "cts_amclk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_amclk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1050,7 +1142,9 @@ static struct clk_regmap gxbb_cts_amclk = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_amclk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_amclk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1067,7 +1161,11 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+ },
.num_parents = 3,
},
};
@@ -1082,7 +1180,9 @@ static struct clk_regmap gxbb_cts_mclk_i958_div = {
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "cts_mclk_i958_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_mclk_i958_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1096,7 +1196,9 @@ static struct clk_regmap gxbb_cts_mclk_i958 = {
.hw.init = &(struct clk_init_data){
.name = "cts_mclk_i958",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_mclk_i958_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_mclk_i958_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1111,7 +1213,10 @@ static struct clk_regmap gxbb_cts_i958 = {
.hw.init = &(struct clk_init_data){
.name = "cts_i958",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cts_amclk", "cts_mclk_i958" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_amclk.hw,
+ &gxbb_cts_mclk_i958.hw
+ },
.num_parents = 2,
/*
*The parent is specific to origin of the audio data. Let the
@@ -1121,6 +1226,33 @@ static struct clk_regmap gxbb_cts_i958 = {
},
};
+static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+ { .fw_name = "xtal", },
+ /*
+ * FIXME: This clock is provided by the ao clock controller but the
+ * clock is not yet part of the binding of this controller, so string
+ * name must be use to set this parent.
+ */
+ { .name = "cts_slow_oscin", .index = -1 },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
+};
+
+static struct clk_regmap gxbb_32k_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "32k_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = gxbb_32k_clk_parent_data,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap gxbb_32k_clk_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_32K_CLK_CNTL,
@@ -1130,7 +1262,9 @@ static struct clk_regmap gxbb_32k_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "32k_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "32k_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_32k_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
},
@@ -1144,34 +1278,20 @@ static struct clk_regmap gxbb_32k_clk = {
.hw.init = &(struct clk_init_data){
.name = "32k_clk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "32k_clk_div" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const char * const gxbb_32k_clk_parent_names[] = {
- IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
-};
-
-static struct clk_regmap gxbb_32k_clk_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_32K_CLK_CNTL,
- .mask = 0x3,
- .shift = 16,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_32k_clk_div.hw
},
- .hw.init = &(struct clk_init_data){
- .name = "32k_clk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_32k_clk_parent_names,
- .num_parents = 4,
+ .num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
-
+static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_fclk_div2.hw },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
+ { .hw = &gxbb_fclk_div7.hw },
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
@@ -1190,8 +1310,8 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .parent_data = gxbb_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1206,7 +1326,9 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_a_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1220,7 +1342,9 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_a_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_a_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1236,8 +1360,8 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .parent_data = gxbb_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1252,7 +1376,9 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_b_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1266,7 +1392,9 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_b_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1282,8 +1410,8 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_sd_emmc_clk0_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
+ .parent_data = gxbb_sd_emmc_clk0_parent_data,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1298,7 +1426,9 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_div = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_c_clk0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1312,7 +1442,9 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_sd_emmc_c_clk0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1320,8 +1452,11 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const char * const gxbb_vpu_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+static const struct clk_hw *gxbb_vpu_parent_hws[] = {
+ &gxbb_fclk_div4.hw,
+ &gxbb_fclk_div3.hw,
+ &gxbb_fclk_div5.hw,
+ &gxbb_fclk_div7.hw,
};
static struct clk_regmap gxbb_vpu_0_sel = {
@@ -1337,8 +1472,8 @@ static struct clk_regmap gxbb_vpu_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_names = gxbb_vpu_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .parent_hws = gxbb_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1352,7 +1487,7 @@ static struct clk_regmap gxbb_vpu_0_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vpu_0_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1366,7 +1501,7 @@ static struct clk_regmap gxbb_vpu_0 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_0_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vpu_0_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1385,8 +1520,8 @@ static struct clk_regmap gxbb_vpu_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_names = gxbb_vpu_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .parent_hws = gxbb_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1400,7 +1535,7 @@ static struct clk_regmap gxbb_vpu_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vpu_1_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1414,7 +1549,7 @@ static struct clk_regmap gxbb_vpu_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_1_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vpu_1_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1433,7 +1568,10 @@ static struct clk_regmap gxbb_vpu = {
* bit 31 selects from 2 possible parents:
* vpu_0 or vpu_1
*/
- .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vpu_0.hw,
+ &gxbb_vpu_1.hw
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1441,8 +1579,11 @@ static struct clk_regmap gxbb_vpu = {
/* VAPB Clock */
-static const char * const gxbb_vapb_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+static const struct clk_hw *gxbb_vapb_parent_hws[] = {
+ &gxbb_fclk_div4.hw,
+ &gxbb_fclk_div3.hw,
+ &gxbb_fclk_div5.hw,
+ &gxbb_fclk_div7.hw,
};
static struct clk_regmap gxbb_vapb_0_sel = {
@@ -1458,8 +1599,8 @@ static struct clk_regmap gxbb_vapb_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_names = gxbb_vapb_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .parent_hws = gxbb_vapb_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1473,7 +1614,9 @@ static struct clk_regmap gxbb_vapb_0_div = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vapb_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vapb_0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1487,7 +1630,9 @@ static struct clk_regmap gxbb_vapb_0 = {
.hw.init = &(struct clk_init_data) {
.name = "vapb_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vapb_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1506,8 +1651,8 @@ static struct clk_regmap gxbb_vapb_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_names = gxbb_vapb_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .parent_hws = gxbb_vapb_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1521,7 +1666,9 @@ static struct clk_regmap gxbb_vapb_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vapb_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vapb_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1535,7 +1682,9 @@ static struct clk_regmap gxbb_vapb_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vapb_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vapb_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1554,7 +1703,10 @@ static struct clk_regmap gxbb_vapb_sel = {
* bit 31 selects from 2 possible parents:
* vapb_0 or vapb_1
*/
- .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vapb_0.hw,
+ &gxbb_vapb_1.hw
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1568,7 +1720,7 @@ static struct clk_regmap gxbb_vapb = {
.hw.init = &(struct clk_init_data) {
.name = "vapb",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vapb_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vapb_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1592,13 +1744,33 @@ static struct clk_regmap gxbb_vid_pll_div = {
.hw.init = &(struct clk_init_data) {
.name = "vid_pll_div",
.ops = &meson_vid_pll_div_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll" },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * GXL and GXBB have different hdmi_plls (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so vid_pll_div picks up the
+ * appropriate one.
+ */
+ .name = "hdmi_pll",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
};
-static const char * const gxbb_vid_pll_parent_names[] = { "vid_pll_div", "hdmi_pll" };
+static const struct clk_parent_data gxbb_vid_pll_parent_data[] = {
+ { .hw = &gxbb_vid_pll_div.hw },
+ /*
+ * Note:
+ * GXL and GXBB have different hdmi_plls (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so vid_pll_div picks up the
+ * appropriate one.
+ */
+ { .name = "hdmi_pll", .index = -1 },
+};
static struct clk_regmap gxbb_vid_pll_sel = {
.data = &(struct clk_regmap_mux_data){
@@ -1613,8 +1785,8 @@ static struct clk_regmap gxbb_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_names = gxbb_vid_pll_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_names),
+ .parent_data = gxbb_vid_pll_parent_data,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_data),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1627,15 +1799,22 @@ static struct clk_regmap gxbb_vid_pll = {
.hw.init = &(struct clk_init_data) {
.name = "vid_pll",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vid_pll_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vid_pll_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static const char * const gxbb_vclk_parent_names[] = {
- "vid_pll", "fclk_div4", "fclk_div3", "fclk_div5", "vid_pll",
- "fclk_div7", "mpll1",
+static const struct clk_hw *gxbb_vclk_parent_hws[] = {
+ &gxbb_vid_pll.hw,
+ &gxbb_fclk_div4.hw,
+ &gxbb_fclk_div3.hw,
+ &gxbb_fclk_div5.hw,
+ &gxbb_vid_pll.hw,
+ &gxbb_fclk_div7.hw,
+ &gxbb_mpll1.hw,
};
static struct clk_regmap gxbb_vclk_sel = {
@@ -1652,8 +1831,8 @@ static struct clk_regmap gxbb_vclk_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_names = gxbb_vclk_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_names),
+ .parent_hws = gxbb_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1672,8 +1851,8 @@ static struct clk_regmap gxbb_vclk2_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_names = gxbb_vclk_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_names),
+ .parent_hws = gxbb_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1686,7 +1865,7 @@ static struct clk_regmap gxbb_vclk_input = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_input",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1700,7 +1879,7 @@ static struct clk_regmap gxbb_vclk2_input = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_input",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1715,7 +1894,9 @@ static struct clk_regmap gxbb_vclk_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vclk_input" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk_input.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -1730,7 +1911,9 @@ static struct clk_regmap gxbb_vclk2_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vclk2_input" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk2_input.hw
+ },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -1744,7 +1927,7 @@ static struct clk_regmap gxbb_vclk = {
.hw.init = &(struct clk_init_data) {
.name = "vclk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1758,7 +1941,7 @@ static struct clk_regmap gxbb_vclk2 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1772,7 +1955,7 @@ static struct clk_regmap gxbb_vclk_div1 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1786,7 +1969,7 @@ static struct clk_regmap gxbb_vclk_div2_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div2_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1800,7 +1983,7 @@ static struct clk_regmap gxbb_vclk_div4_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div4_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1814,7 +1997,7 @@ static struct clk_regmap gxbb_vclk_div6_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div6_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1828,7 +2011,7 @@ static struct clk_regmap gxbb_vclk_div12_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk_div12_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1842,7 +2025,7 @@ static struct clk_regmap gxbb_vclk2_div1 = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1856,7 +2039,7 @@ static struct clk_regmap gxbb_vclk2_div2_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div2_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1870,7 +2053,7 @@ static struct clk_regmap gxbb_vclk2_div4_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div4_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1884,7 +2067,7 @@ static struct clk_regmap gxbb_vclk2_div6_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div6_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1898,7 +2081,7 @@ static struct clk_regmap gxbb_vclk2_div12_en = {
.hw.init = &(struct clk_init_data) {
.name = "vclk2_div12_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vclk2" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_vclk2.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -1910,7 +2093,9 @@ static struct clk_fixed_factor gxbb_vclk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div2_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk_div2_en.hw
+ },
.num_parents = 1,
},
};
@@ -1921,7 +2106,9 @@ static struct clk_fixed_factor gxbb_vclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div4_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk_div4_en.hw
+ },
.num_parents = 1,
},
};
@@ -1932,7 +2119,9 @@ static struct clk_fixed_factor gxbb_vclk_div6 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div6_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk_div6_en.hw
+ },
.num_parents = 1,
},
};
@@ -1943,7 +2132,9 @@ static struct clk_fixed_factor gxbb_vclk_div12 = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_div12_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk_div12_en.hw
+ },
.num_parents = 1,
},
};
@@ -1954,7 +2145,9 @@ static struct clk_fixed_factor gxbb_vclk2_div2 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div2_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk2_div2_en.hw
+ },
.num_parents = 1,
},
};
@@ -1965,7 +2158,9 @@ static struct clk_fixed_factor gxbb_vclk2_div4 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div4_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk2_div4_en.hw
+ },
.num_parents = 1,
},
};
@@ -1976,7 +2171,9 @@ static struct clk_fixed_factor gxbb_vclk2_div6 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div6_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk2_div6_en.hw
+ },
.num_parents = 1,
},
};
@@ -1987,16 +2184,25 @@ static struct clk_fixed_factor gxbb_vclk2_div12 = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_div12_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vclk2_div12_en.hw
+ },
.num_parents = 1,
},
};
static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const char * const gxbb_cts_parent_names[] = {
- "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
- "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
- "vclk2_div6", "vclk2_div12"
+static const struct clk_hw *gxbb_cts_parent_hws[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
};
static struct clk_regmap gxbb_cts_enci_sel = {
@@ -2009,8 +2215,8 @@ static struct clk_regmap gxbb_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_cts_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .parent_hws = gxbb_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2025,8 +2231,8 @@ static struct clk_regmap gxbb_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_cts_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .parent_hws = gxbb_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2041,18 +2247,25 @@ static struct clk_regmap gxbb_cts_vdac_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_cts_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .parent_hws = gxbb_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const char * const gxbb_cts_hdmi_tx_parent_names[] = {
- "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
- "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
- "vclk2_div6", "vclk2_div12"
+static const struct clk_hw *gxbb_cts_hdmi_tx_parent_hws[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
};
static struct clk_regmap gxbb_hdmi_tx_sel = {
@@ -2071,8 +2284,8 @@ static struct clk_regmap gxbb_hdmi_tx_sel = {
* vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
* cts_tcon
*/
- .parent_names = gxbb_cts_hdmi_tx_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_names),
+ .parent_hws = gxbb_cts_hdmi_tx_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_hws),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2085,7 +2298,9 @@ static struct clk_regmap gxbb_cts_enci = {
.hw.init = &(struct clk_init_data) {
.name = "cts_enci",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_enci_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_enci_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2099,7 +2314,9 @@ static struct clk_regmap gxbb_cts_encp = {
.hw.init = &(struct clk_init_data) {
.name = "cts_encp",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_encp_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_encp_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2113,7 +2330,9 @@ static struct clk_regmap gxbb_cts_vdac = {
.hw.init = &(struct clk_init_data) {
.name = "cts_vdac",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_vdac_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_cts_vdac_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2127,7 +2346,9 @@ static struct clk_regmap gxbb_hdmi_tx = {
.hw.init = &(struct clk_init_data) {
.name = "hdmi_tx",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "hdmi_tx_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_hdmi_tx_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2135,8 +2356,11 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
-static const char * const gxbb_hdmi_parent_names[] = {
- IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+static const struct clk_parent_data gxbb_hdmi_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_fclk_div4.hw },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
};
static struct clk_regmap gxbb_hdmi_sel = {
@@ -2149,8 +2373,8 @@ static struct clk_regmap gxbb_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_hdmi_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_names),
+ .parent_data = gxbb_hdmi_parent_data,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_data),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2164,7 +2388,7 @@ static struct clk_regmap gxbb_hdmi_div = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "hdmi_sel" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_hdmi_sel.hw },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -2178,7 +2402,7 @@ static struct clk_regmap gxbb_hdmi = {
.hw.init = &(struct clk_init_data) {
.name = "hdmi",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "hdmi_div" },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_hdmi_div.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
@@ -2186,8 +2410,11 @@ static struct clk_regmap gxbb_hdmi = {
/* VDEC clocks */
-static const char * const gxbb_vdec_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+static const struct clk_hw *gxbb_vdec_parent_hws[] = {
+ &gxbb_fclk_div4.hw,
+ &gxbb_fclk_div3.hw,
+ &gxbb_fclk_div5.hw,
+ &gxbb_fclk_div7.hw,
};
static struct clk_regmap gxbb_vdec_1_sel = {
@@ -2200,8 +2427,8 @@ static struct clk_regmap gxbb_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_vdec_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_names),
+ .parent_hws = gxbb_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2216,7 +2443,9 @@ static struct clk_regmap gxbb_vdec_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vdec_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2230,7 +2459,9 @@ static struct clk_regmap gxbb_vdec_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vdec_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2246,8 +2477,8 @@ static struct clk_regmap gxbb_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = gxbb_vdec_parent_names,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_names),
+ .parent_hws = gxbb_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2262,7 +2493,9 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vdec_hevc_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2276,7 +2509,9 @@ static struct clk_regmap gxbb_vdec_hevc = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_hevc",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_vdec_hevc_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2284,9 +2519,18 @@ static struct clk_regmap gxbb_vdec_hevc = {
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
-static const char * const gen_clk_parent_names[] = {
- IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2",
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll",
+static const struct clk_parent_data gen_clk_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &gxbb_vdec_1.hw },
+ { .hw = &gxbb_vdec_hevc.hw },
+ { .hw = &gxbb_mpll0.hw },
+ { .hw = &gxbb_mpll1.hw },
+ { .hw = &gxbb_mpll2.hw },
+ { .hw = &gxbb_fclk_div4.hw },
+ { .hw = &gxbb_fclk_div3.hw },
+ { .hw = &gxbb_fclk_div5.hw },
+ { .hw = &gxbb_fclk_div7.hw },
+ { .hw = &gxbb_gp0_pll.hw },
};
static struct clk_regmap gxbb_gen_clk_sel = {
@@ -2305,8 +2549,8 @@ static struct clk_regmap gxbb_gen_clk_sel = {
* vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_names = gen_clk_parent_names,
- .num_parents = ARRAY_SIZE(gen_clk_parent_names),
+ .parent_data = gen_clk_parent_data,
+ .num_parents = ARRAY_SIZE(gen_clk_parent_data),
},
};
@@ -2319,7 +2563,9 @@ static struct clk_regmap gxbb_gen_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "gen_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gen_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_gen_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2333,12 +2579,17 @@ static struct clk_regmap gxbb_gen_clk = {
.hw.init = &(struct clk_init_data){
.name = "gen_clk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "gen_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_gen_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
+#define MESON_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &gxbb_clk81.hw)
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index b67951909e04..bf8bea675d24 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include "meson-aoclk.h"
-#include "clk-input.h"
-
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -33,37 +31,6 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
.reset = meson_aoclk_do_reset,
};
-static int meson_aoclkc_register_inputs(struct device *dev,
- struct meson_aoclk_data *data)
-{
- struct clk_hw *hw;
- char *str;
- int i;
-
- for (i = 0; i < data->num_inputs; i++) {
- const struct meson_aoclk_input *in = &data->inputs[i];
-
- str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix,
- in->name);
- if (!str)
- return -ENOMEM;
-
- hw = meson_clk_hw_register_input(dev, in->name, str, 0);
- kfree(str);
-
- if (IS_ERR(hw)) {
- if (!in->required && PTR_ERR(hw) == -ENOENT)
- continue;
- else if (PTR_ERR(hw) != -EPROBE_DEFER)
- dev_err(dev, "failed to register input %s\n",
- in->name);
- return PTR_ERR(hw);
- }
- }
-
- return 0;
-}
-
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
@@ -86,10 +53,6 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- ret = meson_aoclkc_register_inputs(dev, data);
- if (ret)
- return ret;
-
/* Reset Controller */
rstc->data = data;
rstc->regmap = regmap;
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index 999cde3868f7..605b43855a69 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -18,20 +18,12 @@
#include "clk-regmap.h"
-struct meson_aoclk_input {
- const char *name;
- bool required;
-};
-
struct meson_aoclk_data {
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
const int num_clks;
struct clk_regmap **clks;
- const int num_inputs;
- const struct meson_aoclk_input *inputs;
- const char *input_prefix;
const struct clk_hw_onecell_data *hw_data;
};
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 6ba2094be257..a7cb1e7aedc4 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -10,7 +10,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include "clk-input.h"
#include "clk-regmap.h"
#include "meson-eeclk.h"
@@ -18,7 +17,6 @@ int meson_eeclkc_probe(struct platform_device *pdev)
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
- struct clk_hw *input;
struct regmap *map;
int ret, i;
@@ -37,14 +35,6 @@ int meson_eeclkc_probe(struct platform_device *pdev)
if (data->init_count)
regmap_multi_reg_write(map, data->init_regs, data->init_count);
- input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0);
- if (IS_ERR(input)) {
- ret = PTR_ERR(input);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get input clock");
- return ret;
- }
-
/* Populate regmap for the regmap backed clocks */
for (i = 0; i < data->regmap_clk_num; i++)
data->regmap_clks[i]->map = map;
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
index 9ab5d6fa7ccb..77316207bde1 100644
--- a/drivers/clk/meson/meson-eeclk.h
+++ b/drivers/clk/meson/meson-eeclk.h
@@ -10,8 +10,6 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
-#define IN_PREFIX "ee-in-"
-
struct platform_device;
struct meson_eeclkc_data {
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 537219fa573e..67e6691e080c 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -97,7 +97,9 @@ static struct clk_regmap meson8b_fixed_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw
+ },
.num_parents = 1,
},
};
@@ -112,7 +114,9 @@ static struct clk_regmap meson8b_fixed_pll = {
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll_dco.hw
+ },
.num_parents = 1,
/*
* This clock won't ever change at runtime so
@@ -158,7 +162,9 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
/* sometimes also called "HPLL" or "HPLL PLL" */
.name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw
+ },
.num_parents = 1,
},
};
@@ -173,7 +179,9 @@ static struct clk_regmap meson8b_hdmi_pll_lvds_out = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_lvds_out",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -189,7 +197,9 @@ static struct clk_regmap meson8b_hdmi_pll_hdmi_out = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_hdmi_out",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -227,7 +237,9 @@ static struct clk_regmap meson8b_sys_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw
+ },
.num_parents = 1,
},
};
@@ -242,7 +254,9 @@ static struct clk_regmap meson8b_sys_pll = {
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "sys_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_sys_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -254,7 +268,9 @@ static struct clk_fixed_factor meson8b_fclk_div2_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -267,7 +283,9 @@ static struct clk_regmap meson8b_fclk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div2_div.hw
+ },
.num_parents = 1,
/*
* FIXME: Ethernet with a RGMII PHYs is not working if
@@ -285,7 +303,9 @@ static struct clk_fixed_factor meson8b_fclk_div3_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -298,7 +318,9 @@ static struct clk_regmap meson8b_fclk_div3 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div3_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div3_div.hw
+ },
.num_parents = 1,
},
};
@@ -309,7 +331,9 @@ static struct clk_fixed_factor meson8b_fclk_div4_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -322,7 +346,9 @@ static struct clk_regmap meson8b_fclk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div4",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div4_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div4_div.hw
+ },
.num_parents = 1,
},
};
@@ -333,7 +359,9 @@ static struct clk_fixed_factor meson8b_fclk_div5_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -346,7 +374,9 @@ static struct clk_regmap meson8b_fclk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div5_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div5_div.hw
+ },
.num_parents = 1,
},
};
@@ -357,7 +387,9 @@ static struct clk_fixed_factor meson8b_fclk_div7_div = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -370,7 +402,9 @@ static struct clk_regmap meson8b_fclk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "fclk_div7_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div7_div.hw
+ },
.num_parents = 1,
},
};
@@ -384,7 +418,9 @@ static struct clk_regmap meson8b_mpll_prediv = {
.hw.init = &(struct clk_init_data){
.name = "mpll_prediv",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fixed_pll.hw
+ },
.num_parents = 1,
},
};
@@ -416,7 +452,9 @@ static struct clk_regmap meson8b_mpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -429,7 +467,9 @@ static struct clk_regmap meson8b_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -457,7 +497,9 @@ static struct clk_regmap meson8b_mpll1_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -470,7 +512,9 @@ static struct clk_regmap meson8b_mpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -498,7 +542,9 @@ static struct clk_regmap meson8b_mpll2_div = {
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "mpll_prediv" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll_prediv.hw
+ },
.num_parents = 1,
},
};
@@ -511,7 +557,9 @@ static struct clk_regmap meson8b_mpll2 = {
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpll2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpll2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -533,8 +581,11 @@ static struct clk_regmap meson8b_mpeg_clk_sel = {
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
- "fclk_div5" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div5.hw,
+ },
.num_parents = 3,
},
};
@@ -548,7 +599,9 @@ static struct clk_regmap meson8b_mpeg_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpeg_clk_sel.hw
+ },
.num_parents = 1,
},
};
@@ -561,7 +614,9 @@ static struct clk_regmap meson8b_clk81 = {
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mpeg_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
},
@@ -576,7 +631,10 @@ static struct clk_regmap meson8b_cpu_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "cpu_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "xtal", "sys_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw,
+ &meson8b_sys_pll.hw,
+ },
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT),
@@ -589,7 +647,9 @@ static struct clk_fixed_factor meson8b_cpu_in_div2 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_in_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -601,7 +661,9 @@ static struct clk_fixed_factor meson8b_cpu_in_div3 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_in_div3",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -630,7 +692,9 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "cpu_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -649,13 +713,15 @@ static struct clk_regmap meson8b_cpu_scale_out_sel = {
.ops = &clk_regmap_mux_ops,
/*
* NOTE: We are skipping the parent with value 0x2 (which is
- * "cpu_in_div3") because it results in a duty cycle of 33%
- * which makes the system unstable and can result in a lockup
- * of the whole system.
+ * meson8b_cpu_in_div3) because it results in a duty cycle of
+ * 33% which makes the system unstable and can result in a
+ * lockup of the whole system.
*/
- .parent_names = (const char *[]) { "cpu_in_sel",
- "cpu_in_div2",
- "cpu_scale_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_in_sel.hw,
+ &meson8b_cpu_in_div2.hw,
+ &meson8b_cpu_scale_div.hw,
+ },
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
@@ -670,8 +736,10 @@ static struct clk_regmap meson8b_cpu_clk = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "xtal",
- "cpu_scale_out_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw,
+ &meson8b_cpu_scale_out_sel.hw,
+ },
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT |
@@ -690,8 +758,13 @@ static struct clk_regmap meson8b_nand_clk_sel = {
.name = "nand_clk_sel",
.ops = &clk_regmap_mux_ops,
/* FIXME all other parents are unknown: */
- .parent_names = (const char *[]){ "fclk_div4", "fclk_div3",
- "fclk_div5", "fclk_div7", "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8b_fclk_div7.hw,
+ &meson8b_xtal.hw,
+ },
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
},
@@ -707,7 +780,9 @@ static struct clk_regmap meson8b_nand_clk_div = {
.hw.init = &(struct clk_init_data){
.name = "nand_clk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "nand_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_nand_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -721,7 +796,9 @@ static struct clk_regmap meson8b_nand_clk_gate = {
.hw.init = &(struct clk_init_data){
.name = "nand_clk_gate",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "nand_clk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_nand_clk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -733,7 +810,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div2 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -744,7 +823,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div3 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div3",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -755,7 +836,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div4 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -766,7 +849,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div5 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div5",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -777,7 +862,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div6 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -788,7 +875,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div7 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div7",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -799,7 +888,9 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_div8",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "cpu_clk" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk.hw
+ },
.num_parents = 1,
},
};
@@ -815,13 +906,15 @@ static struct clk_regmap meson8b_apb_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "apb_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cpu_clk_div2",
- "cpu_clk_div3",
- "cpu_clk_div4",
- "cpu_clk_div5",
- "cpu_clk_div6",
- "cpu_clk_div7",
- "cpu_clk_div8", },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+ },
.num_parents = 7,
},
};
@@ -835,7 +928,9 @@ static struct clk_regmap meson8b_apb_clk_gate = {
.hw.init = &(struct clk_init_data){
.name = "apb_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "apb_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_apb_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -850,13 +945,15 @@ static struct clk_regmap meson8b_periph_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "periph_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cpu_clk_div2",
- "cpu_clk_div3",
- "cpu_clk_div4",
- "cpu_clk_div5",
- "cpu_clk_div6",
- "cpu_clk_div7",
- "cpu_clk_div8", },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+ },
.num_parents = 7,
},
};
@@ -870,7 +967,9 @@ static struct clk_regmap meson8b_periph_clk_gate = {
.hw.init = &(struct clk_init_data){
.name = "periph_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "periph_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_periph_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -887,13 +986,15 @@ static struct clk_regmap meson8b_axi_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "axi_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cpu_clk_div2",
- "cpu_clk_div3",
- "cpu_clk_div4",
- "cpu_clk_div5",
- "cpu_clk_div6",
- "cpu_clk_div7",
- "cpu_clk_div8", },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+ },
.num_parents = 7,
},
};
@@ -907,7 +1008,9 @@ static struct clk_regmap meson8b_axi_clk_gate = {
.hw.init = &(struct clk_init_data){
.name = "axi_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "axi_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_axi_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -922,13 +1025,15 @@ static struct clk_regmap meson8b_l2_dram_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "l2_dram_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cpu_clk_div2",
- "cpu_clk_div3",
- "cpu_clk_div4",
- "cpu_clk_div5",
- "cpu_clk_div6",
- "cpu_clk_div7",
- "cpu_clk_div8", },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+ },
.num_parents = 7,
},
};
@@ -942,7 +1047,9 @@ static struct clk_regmap meson8b_l2_dram_clk_gate = {
.hw.init = &(struct clk_init_data){
.name = "l2_dram_clk_dis",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "l2_dram_clk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_l2_dram_clk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -963,7 +1070,9 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
* Meson8b: hdmi_pll_dco
* Meson8m2: vid2_pll
*/
- .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -977,7 +1086,9 @@ static struct clk_regmap meson8b_vid_pll_in_en = {
.hw.init = &(struct clk_init_data){
.name = "vid_pll_in_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vid_pll_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vid_pll_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -992,7 +1103,9 @@ static struct clk_regmap meson8b_vid_pll_pre_div = {
.hw.init = &(struct clk_init_data){
.name = "vid_pll_pre_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "vid_pll_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vid_pll_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1007,7 +1120,9 @@ static struct clk_regmap meson8b_vid_pll_post_div = {
.hw.init = &(struct clk_init_data){
.name = "vid_pll_post_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "vid_pll_pre_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vid_pll_pre_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1023,8 +1138,10 @@ static struct clk_regmap meson8b_vid_pll = {
.name = "vid_pll",
.ops = &clk_regmap_mux_ro_ops,
/* TODO: parent 0x2 is vid_pll_pre_div_mult7_div2 */
- .parent_names = (const char *[]){ "vid_pll_pre_div",
- "vid_pll_post_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vid_pll_pre_div.hw,
+ &meson8b_vid_pll_post_div.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1039,15 +1156,22 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
.hw.init = &(struct clk_init_data){
.name = "vid_pll_final_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "vid_pll" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vid_pll.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const meson8b_vclk_mux_parents[] = {
- "vid_pll_final_div", "fclk_div4", "fclk_div3", "fclk_div5",
- "vid_pll_final_div", "fclk_div7", "mpll1"
+static const struct clk_hw *meson8b_vclk_mux_parent_hws[] = {
+ &meson8b_vid_pll_final_div.hw,
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8b_vid_pll_final_div.hw,
+ &meson8b_fclk_div7.hw,
+ &meson8b_mpll1.hw,
};
static struct clk_regmap meson8b_vclk_in_sel = {
@@ -1059,8 +1183,8 @@ static struct clk_regmap meson8b_vclk_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parents),
+ .parent_hws = meson8b_vclk_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1073,7 +1197,9 @@ static struct clk_regmap meson8b_vclk_in_en = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1087,7 +1213,9 @@ static struct clk_regmap meson8b_vclk_div1_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div1_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1099,7 +1227,9 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1113,7 +1243,9 @@ static struct clk_regmap meson8b_vclk_div2_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div2_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_div2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_div2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1125,7 +1257,9 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1139,7 +1273,9 @@ static struct clk_regmap meson8b_vclk_div4_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div4_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_div4" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_div4_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1151,7 +1287,9 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1165,7 +1303,9 @@ static struct clk_regmap meson8b_vclk_div6_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div6_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_div6" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_div6_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1177,7 +1317,9 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1191,7 +1333,9 @@ static struct clk_regmap meson8b_vclk_div12_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk_div12_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk_div12" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk_div12_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1206,8 +1350,8 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parents),
+ .parent_hws = meson8b_vclk_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1220,7 +1364,9 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_in_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_in_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1234,7 +1380,9 @@ static struct clk_regmap meson8b_vclk2_div1_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div1_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_clk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1246,7 +1394,9 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_clk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1260,7 +1410,9 @@ static struct clk_regmap meson8b_vclk2_div2_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_div2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_div2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1272,7 +1424,9 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_clk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1286,7 +1440,9 @@ static struct clk_regmap meson8b_vclk2_div4_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_div4" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_div4_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1298,7 +1454,9 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_clk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1312,7 +1470,9 @@ static struct clk_regmap meson8b_vclk2_div6_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_div6" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_div6_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1324,7 +1484,9 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12",
.ops = &clk_fixed_factor_ops,
- .parent_names = (const char *[]){ "vclk2_in_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_clk_in_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
@@ -1338,15 +1500,20 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12_en",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "vclk2_div12" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vclk2_div12_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const meson8b_vclk_enc_mux_parents[] = {
- "vclk_div1_en", "vclk_div2_en", "vclk_div4_en", "vclk_div6_en",
- "vclk_div12_en",
+static const struct clk_hw *meson8b_vclk_enc_mux_parent_hws[] = {
+ &meson8b_vclk_div1_gate.hw,
+ &meson8b_vclk_div2_div_gate.hw,
+ &meson8b_vclk_div4_div_gate.hw,
+ &meson8b_vclk_div6_div_gate.hw,
+ &meson8b_vclk_div12_div_gate.hw,
};
static struct clk_regmap meson8b_cts_enct_sel = {
@@ -1358,8 +1525,8 @@ static struct clk_regmap meson8b_cts_enct_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .parent_hws = meson8b_vclk_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1372,7 +1539,9 @@ static struct clk_regmap meson8b_cts_enct = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cts_enct_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_enct_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1387,8 +1556,8 @@ static struct clk_regmap meson8b_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .parent_hws = meson8b_vclk_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1401,7 +1570,9 @@ static struct clk_regmap meson8b_cts_encp = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cts_encp_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_encp_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1416,8 +1587,8 @@ static struct clk_regmap meson8b_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .parent_hws = meson8b_vclk_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1430,7 +1601,9 @@ static struct clk_regmap meson8b_cts_enci = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cts_enci_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_enci_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1445,8 +1618,8 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .parent_hws = meson8b_vclk_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1459,15 +1632,20 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "hdmi_tx_pixel_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_tx_pixel_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const meson8b_vclk2_enc_mux_parents[] = {
- "vclk2_div1_en", "vclk2_div2_en", "vclk2_div4_en", "vclk2_div6_en",
- "vclk2_div12_en",
+static const struct clk_hw *meson8b_vclk2_enc_mux_parent_hws[] = {
+ &meson8b_vclk2_div1_gate.hw,
+ &meson8b_vclk2_div2_div_gate.hw,
+ &meson8b_vclk2_div4_div_gate.hw,
+ &meson8b_vclk2_div6_div_gate.hw,
+ &meson8b_vclk2_div12_div_gate.hw,
};
static struct clk_regmap meson8b_cts_encl_sel = {
@@ -1479,8 +1657,8 @@ static struct clk_regmap meson8b_cts_encl_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk2_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parents),
+ .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1493,7 +1671,9 @@ static struct clk_regmap meson8b_cts_encl = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cts_encl_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_encl_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1508,8 +1688,8 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_names = meson8b_vclk2_enc_mux_parents,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parents),
+ .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1522,7 +1702,9 @@ static struct clk_regmap meson8b_cts_vdac0 = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "cts_vdac0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_vdac0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1539,7 +1721,9 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
.name = "hdmi_sys_sel",
.ops = &clk_regmap_mux_ro_ops,
/* FIXME: all other parents are unknown */
- .parent_names = (const char *[]){ "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1554,7 +1738,9 @@ static struct clk_regmap meson8b_hdmi_sys_div = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sys_div",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "hdmi_sys_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_sys_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1568,7 +1754,9 @@ static struct clk_regmap meson8b_hdmi_sys = {
.hw.init = &(struct clk_init_data) {
.name = "hdmi_sys",
.ops = &clk_regmap_gate_ro_ops,
- .parent_names = (const char *[]){ "hdmi_sys_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_sys_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1579,9 +1767,14 @@ static struct clk_regmap meson8b_hdmi_sys = {
* muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only
* has mali_0 and no glitch-free mux.
*/
-static const char * const meson8b_mali_0_1_parent_names[] = {
- "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3",
- "fclk_div5"
+static const struct clk_hw *meson8b_mali_0_1_parent_hws[] = {
+ &meson8b_xtal.hw,
+ &meson8b_mpll2.hw,
+ &meson8b_mpll1.hw,
+ &meson8b_fclk_div7.hw,
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
};
static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
@@ -1596,8 +1789,8 @@ static struct clk_regmap meson8b_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_mali_0_1_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ .parent_hws = meson8b_mali_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1617,7 +1810,9 @@ static struct clk_regmap meson8b_mali_0_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_0_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mali_0_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1631,7 +1826,9 @@ static struct clk_regmap meson8b_mali_0 = {
.hw.init = &(struct clk_init_data){
.name = "mali_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mali_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1647,8 +1844,8 @@ static struct clk_regmap meson8b_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_mali_0_1_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names),
+ .parent_hws = meson8b_mali_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_hws),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1668,7 +1865,9 @@ static struct clk_regmap meson8b_mali_1_div = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "mali_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mali_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1682,7 +1881,9 @@ static struct clk_regmap meson8b_mali_1 = {
.hw.init = &(struct clk_init_data){
.name = "mali_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "mali_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mali_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1697,7 +1898,10 @@ static struct clk_regmap meson8b_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "mali_0", "mali_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_mali_0.hw,
+ &meson8b_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1740,7 +1944,9 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
.hw.init = &(struct clk_init_data){
.name = "gp_pll_dco",
.ops = &meson_clk_pll_ops,
- .parent_names = (const char *[]){ "xtal" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_xtal.hw
+ },
.num_parents = 1,
},
};
@@ -1755,18 +1961,26 @@ static struct clk_regmap meson8m2_gp_pll = {
.hw.init = &(struct clk_init_data){
.name = "gp_pll",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "gp_pll_dco" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8m2_gp_pll_dco.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const char * const meson8b_vpu_0_1_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+static const struct clk_hw *meson8b_vpu_0_1_parent_hws[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8b_fclk_div7.hw,
};
-static const char * const mmeson8m2_vpu_0_1_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "gp_pll"
+static const struct clk_hw *mmeson8m2_vpu_0_1_parent_hws[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8m2_gp_pll.hw,
};
static struct clk_regmap meson8b_vpu_0_sel = {
@@ -1778,8 +1992,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
+ .parent_hws = meson8b_vpu_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1793,8 +2007,8 @@ static struct clk_regmap meson8m2_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8m2_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1808,7 +2022,17 @@ static struct clk_regmap meson8b_vpu_0_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_0_sel" },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * meson8b and meson8m2 have different vpu_0_sels (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so vpu_0_div picks up the
+ * appropriate one.
+ */
+ .name = "vpu_0_sel",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1822,7 +2046,9 @@ static struct clk_regmap meson8b_vpu_0 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_0_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vpu_0_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1837,8 +2063,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
+ .parent_hws = meson8b_vpu_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1852,8 +2078,8 @@ static struct clk_regmap meson8m2_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = mmeson8m2_vpu_0_1_parent_names,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1867,7 +2093,17 @@ static struct clk_regmap meson8b_vpu_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vpu_1_sel" },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * meson8b and meson8m2 have different vpu_1_sels (with
+ * different struct clk_hw). We fallback to the global
+ * naming string mechanism so vpu_1_div picks up the
+ * appropriate one.
+ */
+ .name = "vpu_1_sel",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1881,7 +2117,9 @@ static struct clk_regmap meson8b_vpu_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vpu_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vpu_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1896,14 +2134,22 @@ static struct clk_regmap meson8b_vpu = {
.hw.init = &(struct clk_init_data){
.name = "vpu",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vpu_0.hw,
+ &meson8b_vpu_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static const char * const meson8b_vdec_parent_names[] = {
- "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "mpll2", "mpll1"
+static const struct clk_hw *meson8b_vdec_parent_hws[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8b_fclk_div7.hw,
+ &meson8b_mpll2.hw,
+ &meson8b_mpll1.hw,
};
static struct clk_regmap meson8b_vdec_1_sel = {
@@ -1916,8 +2162,8 @@ static struct clk_regmap meson8b_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vdec_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .parent_hws = meson8b_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1932,7 +2178,9 @@ static struct clk_regmap meson8b_vdec_1_1_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_1_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1946,7 +2194,9 @@ static struct clk_regmap meson8b_vdec_1_1 = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_1_1",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_1_1_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_1_1_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1962,7 +2212,9 @@ static struct clk_regmap meson8b_vdec_1_2_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_2_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_1_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_1_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1976,7 +2228,9 @@ static struct clk_regmap meson8b_vdec_1_2 = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_1_2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_1_2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_1_2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1992,7 +2246,10 @@ static struct clk_regmap meson8b_vdec_1 = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "vdec_1_1", "vdec_1_2" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_1_1.hw,
+ &meson8b_vdec_1_2.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2008,8 +2265,8 @@ static struct clk_regmap meson8b_vdec_hcodec_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vdec_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .parent_hws = meson8b_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2024,7 +2281,9 @@ static struct clk_regmap meson8b_vdec_hcodec_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_hcodec_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_hcodec_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2038,7 +2297,9 @@ static struct clk_regmap meson8b_vdec_hcodec = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_hcodec",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_hcodec_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_hcodec_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2054,8 +2315,8 @@ static struct clk_regmap meson8b_vdec_2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vdec_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .parent_hws = meson8b_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2070,7 +2331,9 @@ static struct clk_regmap meson8b_vdec_2_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_2_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_2_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2084,7 +2347,9 @@ static struct clk_regmap meson8b_vdec_2 = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_2",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_2_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_2_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2100,8 +2365,8 @@ static struct clk_regmap meson8b_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_vdec_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .parent_hws = meson8b_vdec_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2116,7 +2381,9 @@ static struct clk_regmap meson8b_vdec_hevc_div = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_hevc_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2130,7 +2397,9 @@ static struct clk_regmap meson8b_vdec_hevc_en = {
.hw.init = &(struct clk_init_data) {
.name = "vdec_hevc_en",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_hevc_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2147,15 +2416,19 @@ static struct clk_regmap meson8b_vdec_hevc = {
.name = "vdec_hevc",
.ops = &clk_regmap_mux_ops,
/* TODO: The second parent is currently unknown */
- .parent_names = (const char *[]){ "vdec_hevc_en" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_vdec_hevc_en.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const char * const meson8b_cts_amclk_parent_names[] = {
- "mpll0", "mpll1", "mpll2"
+static const struct clk_hw *meson8b_cts_amclk_parent_hws[] = {
+ &meson8b_mpll0.hw,
+ &meson8b_mpll1.hw,
+ &meson8b_mpll2.hw
};
static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 };
@@ -2171,8 +2444,8 @@ static struct clk_regmap meson8b_cts_amclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_cts_amclk_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_names),
+ .parent_hws = meson8b_cts_amclk_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_hws),
},
};
@@ -2186,7 +2459,9 @@ static struct clk_regmap meson8b_cts_amclk_div = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "cts_amclk_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_amclk_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2200,15 +2475,19 @@ static struct clk_regmap meson8b_cts_amclk = {
.hw.init = &(struct clk_init_data){
.name = "cts_amclk",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_amclk_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_amclk_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const char * const meson8b_cts_mclk_i958_parent_names[] = {
- "mpll0", "mpll1", "mpll2"
+static const struct clk_hw *meson8b_cts_mclk_i958_parent_hws[] = {
+ &meson8b_mpll0.hw,
+ &meson8b_mpll1.hw,
+ &meson8b_mpll2.hw
};
static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 };
@@ -2224,8 +2503,8 @@ static struct clk_regmap meson8b_cts_mclk_i958_sel = {
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_names = meson8b_cts_mclk_i958_parent_names,
- .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_names),
+ .parent_hws = meson8b_cts_mclk_i958_parent_hws,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_hws),
},
};
@@ -2239,7 +2518,9 @@ static struct clk_regmap meson8b_cts_mclk_i958_div = {
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
.ops = &clk_regmap_divider_ops,
- .parent_names = (const char *[]){ "cts_mclk_i958_sel" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_mclk_i958_sel.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2253,7 +2534,9 @@ static struct clk_regmap meson8b_cts_mclk_i958 = {
.hw.init = &(struct clk_init_data){
.name = "cts_mclk_i958",
.ops = &clk_regmap_gate_ops,
- .parent_names = (const char *[]){ "cts_mclk_i958_div" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_mclk_i958_div.hw
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2268,8 +2551,10 @@ static struct clk_regmap meson8b_cts_i958 = {
.hw.init = &(struct clk_init_data){
.name = "cts_i958",
.ops = &clk_regmap_mux_ops,
- .parent_names = (const char *[]){ "cts_amclk",
- "cts_mclk_i958" },
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_cts_amclk.hw,
+ &meson8b_cts_mclk_i958.hw
+ },
.num_parents = 2,
/*
* The parent is specific to origin of the audio data. Let the
@@ -2279,6 +2564,9 @@ static struct clk_regmap meson8b_cts_i958 = {
},
};
+#define MESON_GATE(_name, _reg, _bit) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_clk81.hw)
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c3fd632af119..a32bfaeb7e61 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -12,23 +12,13 @@
#include <linux/platform_device.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#define CLK_RPMH_ARC_EN_OFFSET 0
#define CLK_RPMH_VRM_EN_OFFSET 4
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHIFT 29
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_SHIFT 0
-
-#define BCM_TCS_CMD(valid, vote) \
- (BCM_TCS_CMD_COMMIT_MASK | \
- ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
- ((vote & BCM_TCS_CMD_VOTE_MASK) \
- << BCM_TCS_CMD_VOTE_SHIFT))
-
/**
* struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
* @unit: divisor used to convert Hz value to an RPMh msg
@@ -269,7 +259,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
}
cmd.addr = c->res_addr;
- cmd.data = BCM_TCS_CMD(enable, cmd_state);
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
if (ret) {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 52bbb9ce3807..d4075b130674 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
return 0;
}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
#include "clk-exynos5-subcmu.h"
static struct samsung_clk_provider *ctx;
-static const struct exynos5_subcmu_info *cmu;
+static const struct exynos5_subcmu_info **cmu;
static int nr_cmus;
static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
* when OF-core populates all device-tree nodes.
*/
void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
- const struct exynos5_subcmu_info *_cmu)
+ const struct exynos5_subcmu_info **_cmu)
{
ctx = _ctx;
cmu = _cmu;
nr_cmus = _nr_cmus;
for (; _nr_cmus--; _cmu++) {
- exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
- _cmu->nr_gate_clks);
- exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
- _cmu->nr_suspend_regs);
+ exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
+ (*_cmu)->nr_gate_clks);
+ exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
+ (*_cmu)->nr_suspend_regs);
}
}
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
if (of_property_read_string(np, "label", &name) < 0)
continue;
for (i = 0; i < nr_cmus; i++)
- if (strcmp(cmu[i].pd_name, name) == 0)
+ if (strcmp(cmu[i]->pd_name, name) == 0)
exynos5_clk_register_subcmu(&pdev->dev,
- &cmu[i], np);
+ cmu[i], np);
}
return 0;
}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
};
void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
- const struct exynos5_subcmu_info *cmu);
+ const struct exynos5_subcmu_info **cmu);
#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
.pd_name = "DISP1",
};
+static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
+ &exynos5250_disp_subcmu,
+};
+
static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
ARRAY_SIZE(exynos5250_clk_regs));
- exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
+ exynos5250_subcmus);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
- GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
- SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+ /* Maudio Block */
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
+ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
+ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
/* GSCL Block */
DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
- /* MSCL Block */
- DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
-
/* PSGEN */
DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
- /* Maudio Block */
- GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
- GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
- GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
- GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
-
/* FSYS Block */
GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
- /* MSCL Block */
- GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
- GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
- GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
- GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
- GATE_IP_MSCL, 8, 0, 0),
- GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
- GATE_IP_MSCL, 9, 0, 0),
- GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
- GATE_IP_MSCL, 10, 0, 0),
-
/* ISP */
GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
{ DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
};
-static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
- {
- .div_clks = exynos5x_disp_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
- .gate_clks = exynos5x_disp_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
- .suspend_regs = exynos5x_disp_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
- .pd_name = "DISP",
- }, {
- .div_clks = exynos5x_gsc_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
- .gate_clks = exynos5x_gsc_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
- .suspend_regs = exynos5x_gsc_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
- .pd_name = "GSC",
- }, {
- .div_clks = exynos5x_mfc_div_clks,
- .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
- .gate_clks = exynos5x_mfc_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
- .suspend_regs = exynos5x_mfc_suspend_regs,
- .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
- .pd_name = "MFC",
- },
+static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
+ /* MSCL Block */
+ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
+ GATE_IP_MSCL, 8, 0, 0),
+ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
+ GATE_IP_MSCL, 9, 0, 0),
+ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
+ GATE_IP_MSCL, 10, 0, 0),
+};
+
+static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
+ DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
+ { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
+ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
+ { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
+};
+
+static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
+ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
+ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
+ { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
+};
+
+static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
+ .div_clks = exynos5x_disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
+ .gate_clks = exynos5x_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
+ .suspend_regs = exynos5x_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
+ .pd_name = "DISP",
+};
+
+static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
+ .div_clks = exynos5x_gsc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
+ .gate_clks = exynos5x_gsc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
+ .suspend_regs = exynos5x_gsc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
+ .pd_name = "GSC",
+};
+
+static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
+ .div_clks = exynos5x_mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
+ .gate_clks = exynos5x_mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
+ .suspend_regs = exynos5x_mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
+ .pd_name = "MFC",
+};
+
+static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
+ .div_clks = exynos5x_mscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
+ .gate_clks = exynos5x_mscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
+ .suspend_regs = exynos5x_mscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
+ .pd_name = "MSC",
+};
+
+static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
+ .gate_clks = exynos5800_mau_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
+ .suspend_regs = exynos5800_mau_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
+ .pd_name = "MAU",
+};
+
+static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
+ &exynos5x_disp_subcmu,
+ &exynos5x_gsc_subcmu,
+ &exynos5x_mfc_subcmu,
+ &exynos5x_mscl_subcmu,
+};
+
+static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
+ &exynos5x_disp_subcmu,
+ &exynos5x_gsc_subcmu,
+ &exynos5x_mfc_subcmu,
+ &exynos5x_mscl_subcmu,
+ &exynos5800_mau_subcmu,
};
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
samsung_clk_extended_sleep_init(reg_base,
exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
- if (soc == EXYNOS5800)
+
+ if (soc == EXYNOS5800) {
samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
ARRAY_SIZE(exynos5800_clk_regs));
- exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
- exynos5x_subcmus);
+
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
+ exynos5800_subcmus);
+ } else {
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
+ exynos5x_subcmus);
+ }
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
if (socfpgaclk->fixed_div) {
div = socfpgaclk->fixed_div;
} else {
- if (!socfpgaclk->bypass_reg)
+ if (socfpgaclk->hw.reg)
div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
}
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 91d3d721c801..3c219af25100 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
tristate "Clock support for Spreadtrum SoCs"
depends on ARCH_SPRD || COMPILE_TEST
default ARCH_SPRD
+ select REGMAP_MMIO
if SPRD_COMMON_CLK
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5e9317dc3d39..a642c23b2fba 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -429,7 +429,7 @@ config ATMEL_ST
config ATMEL_TCB_CLKSRC
bool "Atmel TC Block timer driver" if COMPILE_TEST
- depends on HAS_IOMEM
+ depends on ARM && HAS_IOMEM
select TIMER_OF if OF
help
Support for Timer Counter Blocks on Atmel SoCs.
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 8e12b11e81b0..9039df4f90e2 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -291,10 +291,8 @@ static int em_sti_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
/* map memory, let base point to the STI instance */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ba2c79e6a0ee..2317d4e3daaf 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
+static u64 hv_sched_clock_offset __ro_after_init;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -212,19 +213,17 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
struct clocksource *hyperv_cs;
EXPORT_SYMBOL_GPL(hyperv_cs);
-#ifdef CONFIG_HYPERV_TSCPAGE
-
-static struct ms_hyperv_tsc_page *tsc_pg;
+static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
- return tsc_pg;
+ return &tsc_pg;
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_sched_clock_tsc(void)
+static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
{
- u64 current_tick = hv_read_tsc_page(tsc_pg);
+ u64 current_tick = hv_read_tsc_page(&tsc_pg);
if (current_tick == U64_MAX)
hv_get_time_ref_count(current_tick);
@@ -232,9 +231,9 @@ static u64 notrace read_hv_sched_clock_tsc(void)
return current_tick;
}
-static u64 read_hv_clock_tsc(struct clocksource *arg)
+static u64 read_hv_sched_clock_tsc(void)
{
- return read_hv_sched_clock_tsc();
+ return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_tsc = {
@@ -244,9 +243,8 @@ static struct clocksource hyperv_cs_tsc = {
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-#endif
-static u64 notrace read_hv_sched_clock_msr(void)
+static u64 notrace read_hv_clock_msr(struct clocksource *arg)
{
u64 current_tick;
/*
@@ -258,9 +256,9 @@ static u64 notrace read_hv_sched_clock_msr(void)
return current_tick;
}
-static u64 read_hv_clock_msr(struct clocksource *arg)
+static u64 read_hv_sched_clock_msr(void)
{
- return read_hv_sched_clock_msr();
+ return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
@@ -271,7 +269,6 @@ static struct clocksource hyperv_cs_msr = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-#ifdef CONFIG_HYPERV_TSCPAGE
static bool __init hv_init_tsc_clocksource(void)
{
u64 tsc_msr;
@@ -280,12 +277,8 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
- tsc_pg = vmalloc(PAGE_SIZE);
- if (!tsc_pg)
- return false;
-
hyperv_cs = &hyperv_cs_tsc;
- phys_addr = page_to_phys(vmalloc_to_page(tsc_pg));
+ phys_addr = virt_to_phys(&tsc_pg);
/*
* The Hyper-V TLFS specifies to preserve the value of reserved
@@ -302,17 +295,11 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
- sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ);
+ hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_setup_sched_clock(read_hv_sched_clock_tsc);
+
return true;
}
-#else
-static bool __init hv_init_tsc_clocksource(void)
-{
- return false;
-}
-#endif
-
void __init hv_init_clocksource(void)
{
@@ -333,7 +320,7 @@ void __init hv_init_clocksource(void)
hyperv_cs = &hyperv_cs_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
- /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
- sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ);
+ hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 61d5f3b539ce..37c39b901bb1 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -221,7 +221,7 @@ static int __init ostm_init(struct device_node *np)
}
rate = clk_get_rate(ostm_clk);
- ostm->ticks_per_jiffy = (rate + HZ / 2) / HZ;
+ ostm->ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
/*
* First probed device will be used as system clocksource. Any
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 55d3e03f2cd4..ef773db080e9 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -776,11 +776,8 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
int ret;
irq = platform_get_irq(ch->cmt->pdev, ch->index);
- if (irq < 0) {
- dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
- ch->index);
+ if (irq < 0)
return irq;
- }
ret = request_irq(irq, sh_cmt_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
@@ -921,13 +918,25 @@ static const struct platform_device_id sh_cmt_id_table[] = {
MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
- { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
+ {
+ /* deprecated, preserved for backward compatibility */
+ .compatible = "renesas,cmt-48",
+ .data = &sh_cmt_info[SH_CMT_48BIT]
+ },
{
/* deprecated, preserved for backward compatibility */
.compatible = "renesas,cmt-48-gen2",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
{
+ .compatible = "renesas,r8a7740-cmt1",
+ .data = &sh_cmt_info[SH_CMT_48BIT]
+ },
+ {
+ .compatible = "renesas,sh73a0-cmt1",
+ .data = &sh_cmt_info[SH_CMT_48BIT]
+ },
+ {
.compatible = "renesas,rcar-gen2-cmt0",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 49f1c805fc95..8c4f3753b36e 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -462,11 +462,8 @@ static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
ch->base = tmu->mapbase + 8 + ch->index * 12;
ch->irq = platform_get_irq(tmu->pdev, index);
- if (ch->irq < 0) {
- dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
- ch->index);
+ if (ch->irq < 0)
return ch->irq;
- }
ch->cs_enabled = false;
ch->enable_count = 0;
diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
index 6ed31f9def7e..7427b07495a8 100644
--- a/drivers/clocksource/timer-atmel-tcb.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -125,6 +126,18 @@ static u64 notrace tc_sched_clock_read32(void)
return tc_get_cycles32(&clksrc);
}
+static struct delay_timer tc_delay_timer;
+
+static unsigned long tc_delay_timer_read(void)
+{
+ return tc_get_cycles(&clksrc);
+}
+
+static unsigned long notrace tc_delay_timer_read32(void)
+{
+ return tc_get_cycles32(&clksrc);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -432,6 +445,7 @@ static int __init tcb_clksrc_init(struct device_node *node)
/* setup ony channel 0 */
tcb_setup_single_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read32;
+ tc_delay_timer.read_current_timer = tc_delay_timer_read32;
} else {
/* we have three clocks no matter what the
* underlying platform supports.
@@ -444,6 +458,7 @@ static int __init tcb_clksrc_init(struct device_node *node)
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(&tc, best_divisor_idx);
tc_sched_clock = tc_sched_clock_read;
+ tc_delay_timer.read_current_timer = tc_delay_timer_read;
}
/* and away we go! */
@@ -458,6 +473,9 @@ static int __init tcb_clksrc_init(struct device_node *node)
sched_clock_register(tc_sched_clock, 32, divided_rate);
+ tc_delay_timer.freq = divided_rate;
+ register_current_timer_delay(&tc_delay_timer);
+
return 0;
err_unregister_clksrc:
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index fd7d68066efb..b7c80a368a1b 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -20,6 +20,8 @@
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
+#define SYS_CTR_CLK_DIV 0x3
+
static void __iomem *sys_ctr_base;
static u32 cmpcr;
@@ -134,6 +136,9 @@ static int __init sysctr_timer_init(struct device_node *np)
if (ret)
return ret;
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
cmpcr &= ~SYS_CTR_EN;
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
index 8a30da7f083b..9780ffd8010e 100644
--- a/drivers/clocksource/timer-npcm7xx.c
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -32,7 +32,7 @@
#define NPCM7XX_Tx_INTEN BIT(29)
#define NPCM7XX_Tx_COUNTEN BIT(30)
#define NPCM7XX_Tx_ONESHOT 0x0
-#define NPCM7XX_Tx_OPER GENMASK(27, 3)
+#define NPCM7XX_Tx_OPER GENMASK(28, 27)
#define NPCM7XX_Tx_MIN_PRESCALE 0x1
#define NPCM7XX_Tx_TDR_MASK_BITS 24
#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
@@ -84,8 +84,6 @@ static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
-
- val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_START_ONESHOT_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
@@ -97,12 +95,11 @@ static int npcm7xx_timer_periodic(struct clock_event_device *evt)
struct timer_of *to = to_timer_of(evt);
u32 val;
+ writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
+
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
-
- writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
val |= NPCM7XX_START_PERIODIC_Tx;
-
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 80542289fae7..d8c2bd4391d0 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -113,8 +113,10 @@ static __init int timer_of_clk_init(struct device_node *np,
of_clk->clk = of_clk->name ? of_clk_get_by_name(np, of_clk->name) :
of_clk_get(np, of_clk->index);
if (IS_ERR(of_clk->clk)) {
- pr_err("Failed to get clock for %pOF\n", np);
- return PTR_ERR(of_clk->clk);
+ ret = PTR_ERR(of_clk->clk);
+ if (ret != -EPROBE_DEFER)
+ pr_err("Failed to get clock for %pOF\n", np);
+ goto out;
}
ret = clk_prepare_enable(of_clk->clk);
diff --git a/drivers/clocksource/timer-probe.c b/drivers/clocksource/timer-probe.c
index dda1946e84dd..ee9574da53c0 100644
--- a/drivers/clocksource/timer-probe.c
+++ b/drivers/clocksource/timer-probe.c
@@ -29,7 +29,9 @@ void __init timer_probe(void)
ret = init_func_ret(np);
if (ret) {
- pr_err("Failed to initialize '%pOF': %d\n", np, ret);
+ if (ret != -EPROBE_DEFER)
+ pr_err("Failed to initialize '%pOF': %d\n", np,
+ ret);
continue;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 5e6038fbf115..470c7ef02ea4 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
+ *
+ * All RISC-V systems have a timer attached to every hart. These timers can be
+ * read from the "time" and "timeh" CSRs, and can use the SBI to setup
+ * events.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -12,19 +16,6 @@
#include <asm/smp.h>
#include <asm/sbi.h>
-/*
- * All RISC-V systems have a timer attached to every hart. These timers can be
- * read by the 'rdcycle' pseudo instruction, and can use the SBI to setup
- * events. In order to abstract the architecture-specific timer reading and
- * setting functions away from the clock event insertion code, we provide
- * function pointers to the clockevent subsystem that perform two basic
- * operations: rdtime() reads the timer on the current CPU, and
- * next_event(delta) sets the next timer event to 'delta' cycles in the future.
- * As the timers are inherently a per-cpu resource, these callbacks perform
- * operations on the current hart. There is guaranteed to be exactly one timer
- * per hart on all RISC-V systems.
- */
-
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
@@ -55,7 +46,7 @@ static u64 riscv_sched_clock(void)
return get_cycles64();
}
-static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
+static struct clocksource riscv_clocksource = {
.name = "riscv_clocksource",
.rating = 300,
.mask = CLOCKSOURCE_MASK(64),
@@ -92,7 +83,6 @@ void riscv_timer_interrupt(void)
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, hartid, error;
- struct clocksource *cs;
hartid = riscv_of_processor_hartid(n);
if (hartid < 0) {
@@ -112,8 +102,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
__func__, cpuid, hartid);
- cs = per_cpu_ptr(&riscv_clocksource, cpuid);
- error = clocksource_register_hz(cs, riscv_timebase);
+ error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
error, cpuid);
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 65f38f6ca714..0ba8155b8287 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -219,5 +219,9 @@ static int __init sun4i_timer_init(struct device_node *node)
}
TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
+TIMER_OF_DECLARE(sun8i_a23, "allwinner,sun8i-a23-timer",
+ sun4i_timer_init);
+TIMER_OF_DECLARE(sun8i_v3s, "allwinner,sun8i-v3s-timer",
+ sun4i_timer_init);
TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
sun4i_timer_init);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 23553ed6b548..2d22d6bf52f2 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -248,16 +248,12 @@ static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
return 0;
}
-static struct cn_dev cdev = {
- .input = cn_rx_skb,
-};
-
static int cn_init(void)
{
struct cn_dev *dev = &cdev;
struct netlink_kernel_cfg cfg = {
.groups = CN_NETLINK_USERS + 0xf,
- .input = dev->input,
+ .input = cn_rx_skb,
};
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 68a9b7393457..4046aa9f9234 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -100,16 +100,18 @@ static void ftm_quaddec_init(struct ftm_quaddec *ftm)
ftm_set_write_protection(ftm);
}
-static void ftm_quaddec_disable(struct ftm_quaddec *ftm)
+static void ftm_quaddec_disable(void *ftm)
{
- ftm_clear_write_protection(ftm);
- ftm_write(ftm, FTM_MODE, 0);
- ftm_write(ftm, FTM_QDCTRL, 0);
+ struct ftm_quaddec *ftm_qua = ftm;
+
+ ftm_clear_write_protection(ftm_qua);
+ ftm_write(ftm_qua, FTM_MODE, 0);
+ ftm_write(ftm_qua, FTM_QDCTRL, 0);
/*
* This is enough to disable the counter. No clock has been
* selected by writing to FTM_SC in init()
*/
- ftm_set_write_protection(ftm);
+ ftm_set_write_protection(ftm_qua);
}
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
@@ -317,20 +319,13 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
ftm_quaddec_init(ftm);
- ret = counter_register(&ftm->counter);
+ ret = devm_add_action_or_reset(&pdev->dev, ftm_quaddec_disable, ftm);
if (ret)
- ftm_quaddec_disable(ftm);
-
- return ret;
-}
+ return ret;
-static int ftm_quaddec_remove(struct platform_device *pdev)
-{
- struct ftm_quaddec *ftm = platform_get_drvdata(pdev);
-
- counter_unregister(&ftm->counter);
-
- ftm_quaddec_disable(ftm);
+ ret = devm_counter_register(&pdev->dev, &ftm->counter);
+ if (ret)
+ return ret;
return 0;
}
@@ -346,7 +341,6 @@ static struct platform_driver ftm_quaddec_driver = {
.of_match_table = ftm_quaddec_match,
},
.probe = ftm_quaddec_probe,
- .remove = ftm_quaddec_remove,
};
module_platform_driver(ftm_quaddec_driver);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 56c31a78c692..a905796f7f85 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -19,6 +19,18 @@ config ACPI_CPPC_CPUFREQ
If in doubt, say N.
+config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
+ tristate "Allwinner nvmem based SUN50I CPUFreq driver"
+ depends on ARCH_SUNXI
+ depends on NVMEM_SUNXI_SID
+ select PM_OPP
+ help
+ This adds the nvmem based CPUFreq driver for Allwinner
+ h6 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun50i-cpufreq-nvmem.
+
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
depends on ARCH_MVEBU && CPUFREQ_DT
@@ -120,8 +132,8 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
-config ARM_QCOM_CPUFREQ_KRYO
- tristate "Qualcomm Kryo based CPUFreq"
+config ARM_QCOM_CPUFREQ_NVMEM
+ tristate "Qualcomm nvmem based CPUFreq"
depends on ARM64
depends on QCOM_QFPROM
depends on QCOM_SMEM
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 5a6c70d26c98..9a9f5ccd13d9 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -64,7 +64,7 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
-obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
@@ -80,6 +80,7 @@ obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
+obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 988ebc326bdb..39e34f5066d3 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void)
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
+ if (!freq_tables)
+ return -ENOMEM;
cpumask_copy(&cpus, cpu_possible_mask);
/*
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 03dc4244ab00..bca8d1f47fd2 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -101,12 +101,15 @@ static const struct of_device_id whitelist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blacklist[] __initconst = {
+ { .compatible = "allwinner,sun50i-h6", },
+
{ .compatible = "calxeda,highbank", },
{ .compatible = "calxeda,ecx-2000", },
{ .compatible = "fsl,imx7d", },
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
+ { .compatible = "fsl,imx8mn", },
{ .compatible = "marvell,armadaxp", },
@@ -117,12 +120,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt817x", },
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
+ { .compatible = "mediatek,mt8183", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
+ { .compatible = "qcom,qcs404", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c52d6fa32aac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1266,7 +1266,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
DEV_PM_QOS_MAX_FREQUENCY);
dev_pm_qos_remove_notifier(dev, &policy->nb_min,
DEV_PM_QOS_MIN_FREQUENCY);
- dev_pm_qos_remove_request(policy->max_freq_req);
+
+ if (policy->max_freq_req) {
+ /*
+ * CPUFREQ_CREATE_POLICY notification is sent only after
+ * successfully adding max_freq_req request.
+ */
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+ dev_pm_qos_remove_request(policy->max_freq_req);
+ }
+
dev_pm_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
@@ -1391,6 +1401,9 @@ static int cpufreq_online(unsigned int cpu)
ret);
goto out_destroy_policy;
}
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
}
if (cpufreq_driver->get && has_target()) {
@@ -1807,8 +1820,8 @@ void cpufreq_suspend(void)
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
- pr_err("%s: Failed to suspend driver: %p\n", __func__,
- policy);
+ pr_err("%s: Failed to suspend driver: %s\n", __func__,
+ cpufreq_driver->name);
}
suspend:
@@ -2140,7 +2153,7 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret = -EINVAL;
+ int ret;
down_write(&policy->rwsem);
@@ -2347,15 +2360,13 @@ EXPORT_SYMBOL(cpufreq_get_policy);
* @policy: Policy object to modify.
* @new_policy: New policy data.
*
- * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
- * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
- * the driver's ->verify() callback again and run the notifiers for it again
- * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
- * of @new_policy to @policy and either invoke the driver's ->setpolicy()
- * callback (if present) or carry out a governor update for @policy. That is,
- * run the current governor's ->limits() callback (if the governor field in
- * @new_policy points to the same object as the one in @policy) or replace the
- * governor for @policy with the new one stored in @new_policy.
+ * Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
+ * min and max parameters of @new_policy to @policy and either invoke the
+ * driver's ->setpolicy() callback (if present) or carry out a governor update
+ * for @policy. That is, run the current governor's ->limits() callback (if the
+ * governor field in @new_policy points to the same object as the one in
+ * @policy) or replace the governor for @policy with the new one stored in
+ * @new_policy.
*
* The cpuinfo part of @policy is not updated by this function.
*/
@@ -2383,26 +2394,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
if (ret)
return ret;
- /*
- * The notifier-chain shall be removed once all the users of
- * CPUFREQ_ADJUST are moved to use the QoS framework.
- */
- /* adjust if necessary - all reasons */
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_ADJUST, new_policy);
-
- /*
- * verify the cpu speed can be set within this limit, which might be
- * different to the first one
- */
- ret = cpufreq_driver->verify(new_policy);
- if (ret)
- return ret;
-
- /* notification of the new policy */
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_NOTIFY, new_policy);
-
policy->min = new_policy->min;
policy->max = new_policy->max;
trace_cpu_frequency_limits(policy);
@@ -2528,7 +2519,7 @@ static int cpufreq_boost_set_sw(int state)
}
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
- if (ret)
+ if (ret < 0)
break;
}
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 4f85f3112784..35db14cf3102 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -16,6 +16,7 @@
#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
+#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
@@ -34,7 +35,12 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
- speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
+ if (of_machine_is_compatible("fsl,imx8mn"))
+ speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
+ >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
+ else
+ speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
+ >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc27d4c59dca..9f02de9a1b47 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/acpi.h>
#include <linux/vmalloc.h>
+#include <linux/pm_qos.h>
#include <trace/events/power.h>
#include <asm/div64.h>
@@ -1085,6 +1086,47 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
return count;
}
+static struct cpufreq_driver intel_pstate;
+
+static void update_qos_request(enum dev_pm_qos_req_type type)
+{
+ int max_state, turbo_max, freq, i, perf_pct;
+ struct dev_pm_qos_request *req;
+ struct cpufreq_policy *policy;
+
+ for_each_possible_cpu(i) {
+ struct cpudata *cpu = all_cpu_data[i];
+
+ policy = cpufreq_cpu_get(i);
+ if (!policy)
+ continue;
+
+ req = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
+ if (!req)
+ continue;
+
+ if (hwp_active)
+ intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
+ else
+ turbo_max = cpu->pstate.turbo_pstate;
+
+ if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+ perf_pct = global.min_perf_pct;
+ } else {
+ req++;
+ perf_pct = global.max_perf_pct;
+ }
+
+ freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
+ freq *= cpu->pstate.scaling;
+
+ if (dev_pm_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", i);
+ }
+}
+
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
@@ -1108,7 +1150,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_update_policies();
+ else
+ update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1139,7 +1184,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_update_policies();
+ else
+ update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1867,22 +1915,22 @@ static const struct pstate_funcs knl_funcs = {
(unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
+ ICPU(INTEL_FAM6_HASWELL, core_funcs),
+ ICPU(INTEL_FAM6_BROADWELL, core_funcs),
ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs),
- ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs),
+ ICPU(INTEL_FAM6_HASWELL_L, core_funcs),
+ ICPU(INTEL_FAM6_HASWELL_G, core_funcs),
+ ICPU(INTEL_FAM6_BROADWELL_G, core_funcs),
ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs),
ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
+ ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
@@ -1893,20 +1941,20 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
+ ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
- ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs),
+ ICPU(INTEL_FAM6_KABYLAKE, core_funcs),
{}
};
static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
- ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
{}
};
@@ -2332,8 +2380,16 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int ret = __intel_pstate_cpu_init(policy);
+ int max_state, turbo_max, min_freq, max_freq, ret;
+ struct dev_pm_qos_request *req;
+ struct cpudata *cpu;
+ struct device *dev;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+ ret = __intel_pstate_cpu_init(policy);
if (ret)
return ret;
@@ -2342,7 +2398,63 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* This reflects the intel_pstate_get_cpu_pstates() setting. */
policy->cur = policy->cpuinfo.min_freq;
+ req = kcalloc(2, sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto pstate_exit;
+ }
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (hwp_active)
+ intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
+ else
+ turbo_max = cpu->pstate.turbo_pstate;
+
+ min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
+ min_freq *= cpu->pstate.scaling;
+ max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
+ max_freq *= cpu->pstate.scaling;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
+ min_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
+ goto free_req;
+ }
+
+ ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
+ max_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
+ goto remove_min_req;
+ }
+
+ policy->driver_data = req;
+
return 0;
+
+remove_min_req:
+ dev_pm_qos_remove_request(req);
+free_req:
+ kfree(req);
+pstate_exit:
+ intel_pstate_exit_perf_limits(policy);
+
+ return ret;
+}
+
+static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct dev_pm_qos_request *req;
+
+ req = policy->driver_data;
+
+ dev_pm_qos_remove_request(req + 1);
+ dev_pm_qos_remove_request(req);
+ kfree(req);
+
+ return intel_pstate_cpu_exit(policy);
}
static struct cpufreq_driver intel_cpufreq = {
@@ -2351,7 +2463,7 @@ static struct cpufreq_driver intel_cpufreq = {
.target = intel_cpufreq_target,
.fast_switch = intel_cpufreq_fast_switch,
.init = intel_cpufreq_cpu_init,
- .exit = intel_pstate_cpu_exit,
+ .exit = intel_cpufreq_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
@@ -2624,7 +2736,7 @@ static inline void intel_pstate_request_control_from_smm(void) {}
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
- ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
ICPU_HWP(X86_MODEL_ANY, 0),
{}
};
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f14f3a85f2f7..0c98dd08273d 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -338,7 +338,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
goto out_free_resources;
}
- proc_reg = regulator_get_exclusive(cpu_dev, "proc");
+ proc_reg = regulator_get_optional(cpu_dev, "proc");
if (IS_ERR(proc_reg)) {
if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
pr_warn("proc regulator for cpu%d not ready, retry.\n",
@@ -535,6 +535,8 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt817x", },
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
+ { .compatible = "mediatek,mt8183", },
+ { .compatible = "mediatek,mt8516", },
{ }
};
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 93f39a1d4c3d..c66f566a854c 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
+ if (!cpu)
+ goto out;
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
of_node_put(cpu);
- if (!cpu)
+ if (!max_freqp) {
+ err = -EINVAL;
goto out;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn)
@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
- if (!max_freqp) {
- err = -EINVAL;
- goto out_unmap_sdcpwr;
- }
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
@@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
return 0;
-out_unmap_sdcpwr:
- iounmap(sdcpwr_mapbase);
-
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index b83f36febf03..c58abb4cca3a 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -110,6 +110,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
#endif
policy->freq_table = cbe_freqs;
+ cbe_cpufreq_pmi_policy_init(policy);
+ return 0;
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cbe_cpufreq_pmi_policy_exit(policy);
return 0;
}
@@ -129,6 +136,7 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
+ .exit = cbe_cpufreq_cpu_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
@@ -139,15 +147,24 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
static int __init cbe_cpufreq_init(void)
{
+ int ret;
+
if (!machine_is(cell))
return -ENODEV;
- return cpufreq_register_driver(&cbe_cpufreq_driver);
+ cbe_cpufreq_pmi_init();
+
+ ret = cpufreq_register_driver(&cbe_cpufreq_driver);
+ if (ret)
+ cbe_cpufreq_pmi_exit();
+
+ return ret;
}
static void __exit cbe_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cbe_cpufreq_driver);
+ cbe_cpufreq_pmi_exit();
}
module_init(cbe_cpufreq_init);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index 9d973519d669..00cd8633b0d9 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -20,6 +20,14 @@ int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
extern bool cbe_cpufreq_has_pmi;
+void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
+void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
+void cbe_cpufreq_pmi_init(void);
+void cbe_cpufreq_pmi_exit(void);
#else
#define cbe_cpufreq_has_pmi (0)
+static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
+static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
+static inline void cbe_cpufreq_pmi_init(void) {}
+static inline void cbe_cpufreq_pmi_exit(void) {}
#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 97c8ee4614b7..bc9dd30395c4 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -12,6 +12,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/of_platform.h>
+#include <linux/pm_qos.h>
#include <asm/processor.h>
#include <asm/prom.h>
@@ -24,8 +25,6 @@
#include "ppc_cbe_cpufreq.h"
-static u8 pmi_slow_mode_limit[MAX_CBE];
-
bool cbe_cpufreq_has_pmi = false;
EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
@@ -65,64 +64,89 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{
+ struct cpufreq_policy *policy;
+ struct dev_pm_qos_request *req;
u8 node, slow_mode;
+ int cpu, ret;
BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
node = pmi_msg.data1;
slow_mode = pmi_msg.data2;
- pmi_slow_mode_limit[node] = slow_mode;
+ cpu = cbe_node_to_cpu(node);
pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-}
-
-static int pmi_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
- u8 node;
-
- /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
- * policy events?)
- */
- node = cbe_cpu_to_node(policy->cpu);
-
- pr_debug("got notified, event=%lu, node=%u\n", event, node);
- if (pmi_slow_mode_limit[node] != 0) {
- pr_debug("limiting node %d to slow mode %d\n",
- node, pmi_slow_mode_limit[node]);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_warn("cpufreq policy not found cpu%d\n", cpu);
+ return;
+ }
- cpufreq_verify_within_limits(policy, 0,
+ req = policy->driver_data;
- cbe_freqs[pmi_slow_mode_limit[node]].frequency);
- }
+ ret = dev_pm_qos_update_request(req,
+ policy->freq_table[slow_mode].frequency);
+ if (ret < 0)
+ pr_warn("Failed to update freq constraint: %d\n", ret);
+ else
+ pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
- return 0;
+ cpufreq_cpu_put(policy);
}
-static struct notifier_block pmi_notifier_block = {
- .notifier_call = pmi_notifier,
-};
-
static struct pmi_handler cbe_pmi_handler = {
.type = PMI_TYPE_FREQ_CHANGE,
.handle_pmi_message = cbe_cpufreq_handle_pmi,
};
+void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!cbe_cpufreq_has_pmi)
+ return;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return;
+
+ ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ policy->freq_table[0].frequency);
+ if (ret < 0) {
+ pr_err("Failed to add freq constraint (%d)\n", ret);
+ kfree(req);
+ return;
+ }
+ policy->driver_data = req;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-static int __init cbe_cpufreq_pmi_init(void)
+void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
{
- cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
+ struct dev_pm_qos_request *req = policy->driver_data;
- if (!cbe_cpufreq_has_pmi)
- return -ENODEV;
+ if (cbe_cpufreq_has_pmi) {
+ dev_pm_qos_remove_request(req);
+ kfree(req);
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
- cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+void cbe_cpufreq_pmi_init(void)
+{
+ if (!pmi_register_handler(&cbe_pmi_handler))
+ cbe_cpufreq_has_pmi = true;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
- return 0;
+void cbe_cpufreq_pmi_exit(void)
+{
+ pmi_unregister_handler(&cbe_pmi_handler);
+ cbe_cpufreq_has_pmi = false;
}
-device_initcall(cbe_cpufreq_pmi_init);
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 4b0b50403901..a9ae2f84a4ef 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -20,6 +20,7 @@
#define LUT_VOLT GENMASK(11, 0)
#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
+#define LUT_TURBO_IND 1
/* Register offsets */
#define REG_ENABLE 0x0
@@ -34,9 +35,12 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
void __iomem *perf_state_reg = policy->driver_data;
+ unsigned long freq = policy->freq_table[index].frequency;
writel_relaxed(index, perf_state_reg);
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
return 0;
}
@@ -63,6 +67,7 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
{
void __iomem *perf_state_reg = policy->driver_data;
int index;
+ unsigned long freq;
index = policy->cached_resolved_idx;
if (index < 0)
@@ -70,16 +75,19 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
writel_relaxed(index, perf_state_reg);
- return policy->freq_table[index].frequency;
+ freq = policy->freq_table[index].frequency;
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+
+ return freq;
}
static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
struct cpufreq_policy *policy,
void __iomem *base)
{
- u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
+ u32 data, src, lval, i, core_count, prev_freq = 0, freq;
u32 volt;
- unsigned int max_cores = cpumask_weight(policy->cpus);
struct cpufreq_frequency_table *table;
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
@@ -102,12 +110,12 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
else
freq = cpu_hw_rate / 1000;
- if (freq != prev_freq && core_count == max_cores) {
+ if (freq != prev_freq && core_count != LUT_TURBO_IND) {
table[i].frequency = freq;
dev_pm_opp_add(cpu_dev, freq * 1000, volt);
dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
freq, core_count);
- } else {
+ } else if (core_count == LUT_TURBO_IND) {
table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -115,14 +123,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
* Two of the same frequencies with the same core counts means
* end of table
*/
- if (i > 0 && prev_freq == freq && prev_cc == core_count) {
+ if (i > 0 && prev_freq == freq) {
struct cpufreq_frequency_table *prev = &table[i - 1];
/*
* Only treat the last frequency that might be a boost
* as the boost frequency
*/
- if (prev_cc != max_cores) {
+ if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
prev->frequency = prev_freq;
prev->flags = CPUFREQ_BOOST_FREQ;
dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
@@ -131,7 +139,6 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
break;
}
- prev_cc = core_count;
prev_freq = freq;
}
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
deleted file mode 100644
index dd64dcf89c74..000000000000
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
- */
-
-/*
- * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
- * the CPU frequency subset and voltage value of each OPP varies
- * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
- * defines the voltage and frequency value based on the msm-id in SMEM
- * and speedbin blown in the efuse combination.
- * The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC
- * to provide the OPP framework with required information.
- * This is used to determine the voltage and frequency value for each OPP of
- * operating-points-v2 table when it is parsed by the OPP framework.
- */
-
-#include <linux/cpu.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/soc/qcom/smem.h>
-
-#define MSM_ID_SMEM 137
-
-enum _msm_id {
- MSM8996V3 = 0xF6ul,
- APQ8096V3 = 0x123ul,
- MSM8996SG = 0x131ul,
- APQ8096SG = 0x138ul,
-};
-
-enum _msm8996_version {
- MSM8996_V3,
- MSM8996_SG,
- NUM_OF_MSM8996_VERSIONS,
-};
-
-static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
-
-static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
-{
- size_t len;
- u32 *msm_id;
- enum _msm8996_version version;
-
- msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
- if (IS_ERR(msm_id))
- return NUM_OF_MSM8996_VERSIONS;
-
- /* The first 4 bytes are format, next to them is the actual msm-id */
- msm_id++;
-
- switch ((enum _msm_id)*msm_id) {
- case MSM8996V3:
- case APQ8096V3:
- version = MSM8996_V3;
- break;
- case MSM8996SG:
- case APQ8096SG:
- version = MSM8996_SG;
- break;
- default:
- version = NUM_OF_MSM8996_VERSIONS;
- }
-
- return version;
-}
-
-static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
-{
- struct opp_table **opp_tables;
- enum _msm8996_version msm8996_version;
- struct nvmem_cell *speedbin_nvmem;
- struct device_node *np;
- struct device *cpu_dev;
- unsigned cpu;
- u8 *speedbin;
- u32 versions;
- size_t len;
- int ret;
-
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev)
- return -ENODEV;
-
- msm8996_version = qcom_cpufreq_kryo_get_msm_id();
- if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
- dev_err(cpu_dev, "Not Snapdragon 820/821!");
- return -ENODEV;
- }
-
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
- if (!np)
- return -ENOENT;
-
- ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
- if (!ret) {
- of_node_put(np);
- return -ENOENT;
- }
-
- speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- of_node_put(np);
- if (IS_ERR(speedbin_nvmem)) {
- if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
- dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
- PTR_ERR(speedbin_nvmem));
- return PTR_ERR(speedbin_nvmem);
- }
-
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
- nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
-
- switch (msm8996_version) {
- case MSM8996_V3:
- versions = 1 << (unsigned int)(*speedbin);
- break;
- case MSM8996_SG:
- versions = 1 << ((unsigned int)(*speedbin) + 4);
- break;
- default:
- BUG();
- break;
- }
- kfree(speedbin);
-
- opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
- if (!opp_tables)
- return -ENOMEM;
-
- for_each_possible_cpu(cpu) {
- cpu_dev = get_cpu_device(cpu);
- if (NULL == cpu_dev) {
- ret = -ENODEV;
- goto free_opp;
- }
-
- opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev,
- &versions, 1);
- if (IS_ERR(opp_tables[cpu])) {
- ret = PTR_ERR(opp_tables[cpu]);
- dev_err(cpu_dev, "Failed to set supported hardware\n");
- goto free_opp;
- }
- }
-
- cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
- NULL, 0);
- if (!IS_ERR(cpufreq_dt_pdev)) {
- platform_set_drvdata(pdev, opp_tables);
- return 0;
- }
-
- ret = PTR_ERR(cpufreq_dt_pdev);
- dev_err(cpu_dev, "Failed to register platform device\n");
-
-free_opp:
- for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(opp_tables[cpu]))
- break;
- dev_pm_opp_put_supported_hw(opp_tables[cpu]);
- }
- kfree(opp_tables);
-
- return ret;
-}
-
-static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
-{
- struct opp_table **opp_tables = platform_get_drvdata(pdev);
- unsigned int cpu;
-
- platform_device_unregister(cpufreq_dt_pdev);
-
- for_each_possible_cpu(cpu)
- dev_pm_opp_put_supported_hw(opp_tables[cpu]);
-
- kfree(opp_tables);
-
- return 0;
-}
-
-static struct platform_driver qcom_cpufreq_kryo_driver = {
- .probe = qcom_cpufreq_kryo_probe,
- .remove = qcom_cpufreq_kryo_remove,
- .driver = {
- .name = "qcom-cpufreq-kryo",
- },
-};
-
-static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
- { .compatible = "qcom,apq8096", },
- { .compatible = "qcom,msm8996", },
- {}
-};
-
-/*
- * Since the driver depends on smem and nvmem drivers, which may
- * return EPROBE_DEFER, all the real activity is done in the probe,
- * which may be defered as well. The init here is only registering
- * the driver and the platform device.
- */
-static int __init qcom_cpufreq_kryo_init(void)
-{
- struct device_node *np = of_find_node_by_path("/");
- const struct of_device_id *match;
- int ret;
-
- if (!np)
- return -ENODEV;
-
- match = of_match_node(qcom_cpufreq_kryo_match_list, np);
- of_node_put(np);
- if (!match)
- return -ENODEV;
-
- ret = platform_driver_register(&qcom_cpufreq_kryo_driver);
- if (unlikely(ret < 0))
- return ret;
-
- kryo_cpufreq_pdev = platform_device_register_simple(
- "qcom-cpufreq-kryo", -1, NULL, 0);
- ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
- if (0 == ret)
- return 0;
-
- platform_driver_unregister(&qcom_cpufreq_kryo_driver);
- return ret;
-}
-module_init(qcom_cpufreq_kryo_init);
-
-static void __exit qcom_cpufreq_kryo_exit(void)
-{
- platform_device_unregister(kryo_cpufreq_pdev);
- platform_driver_unregister(&qcom_cpufreq_kryo_driver);
-}
-module_exit(qcom_cpufreq_kryo_exit);
-
-MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
new file mode 100644
index 000000000000..f0d2d5035413
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
+ * the CPU frequency subset and voltage value of each OPP varies
+ * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
+ * defines the voltage and frequency value based on the msm-id in SMEM
+ * and speedbin blown in the efuse combination.
+ * The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
+ * to provide the OPP framework with required information.
+ * This is used to determine the voltage and frequency value for each OPP of
+ * operating-points-v2 table when it is parsed by the OPP framework.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+#define MSM_ID_SMEM 137
+
+enum _msm_id {
+ MSM8996V3 = 0xF6ul,
+ APQ8096V3 = 0x123ul,
+ MSM8996SG = 0x131ul,
+ APQ8096SG = 0x138ul,
+};
+
+enum _msm8996_version {
+ MSM8996_V3,
+ MSM8996_SG,
+ NUM_OF_MSM8996_VERSIONS,
+};
+
+struct qcom_cpufreq_drv;
+
+struct qcom_cpufreq_match_data {
+ int (*get_version)(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ struct qcom_cpufreq_drv *drv);
+ const char **genpd_names;
+};
+
+struct qcom_cpufreq_drv {
+ struct opp_table **opp_tables;
+ struct opp_table **genpd_opp_tables;
+ u32 versions;
+ const struct qcom_cpufreq_match_data *data;
+};
+
+static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+
+static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
+{
+ size_t len;
+ u32 *msm_id;
+ enum _msm8996_version version;
+
+ msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
+ if (IS_ERR(msm_id))
+ return NUM_OF_MSM8996_VERSIONS;
+
+ /* The first 4 bytes are format, next to them is the actual msm-id */
+ msm_id++;
+
+ switch ((enum _msm_id)*msm_id) {
+ case MSM8996V3:
+ case APQ8096V3:
+ version = MSM8996_V3;
+ break;
+ case MSM8996SG:
+ case APQ8096SG:
+ version = MSM8996_SG;
+ break;
+ default:
+ version = NUM_OF_MSM8996_VERSIONS;
+ }
+
+ return version;
+}
+
+static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ struct qcom_cpufreq_drv *drv)
+{
+ size_t len;
+ u8 *speedbin;
+ enum _msm8996_version msm8996_version;
+
+ msm8996_version = qcom_cpufreq_get_msm_id();
+ if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
+ dev_err(cpu_dev, "Not Snapdragon 820/821!");
+ return -ENODEV;
+ }
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (msm8996_version) {
+ case MSM8996_V3:
+ drv->versions = 1 << (unsigned int)(*speedbin);
+ break;
+ case MSM8996_SG:
+ drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ kfree(speedbin);
+ return 0;
+}
+
+static const struct qcom_cpufreq_match_data match_data_kryo = {
+ .get_version = qcom_cpufreq_kryo_name_version,
+};
+
+static const char *qcs404_genpd_names[] = { "cpr", NULL };
+
+static const struct qcom_cpufreq_match_data match_data_qcs404 = {
+ .genpd_names = qcs404_genpd_names,
+};
+
+static int qcom_cpufreq_probe(struct platform_device *pdev)
+{
+ struct qcom_cpufreq_drv *drv;
+ struct nvmem_cell *speedbin_nvmem;
+ struct device_node *np;
+ struct device *cpu_dev;
+ unsigned cpu;
+ const struct of_device_id *match;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return -ENOENT;
+
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ if (!ret) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ match = pdev->dev.platform_data;
+ drv->data = match->data;
+ if (!drv->data) {
+ ret = -ENODEV;
+ goto free_drv;
+ }
+
+ if (drv->data->get_version) {
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ if (IS_ERR(speedbin_nvmem)) {
+ if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
+ dev_err(cpu_dev,
+ "Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
+ ret = PTR_ERR(speedbin_nvmem);
+ goto free_drv;
+ }
+
+ ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv);
+ if (ret) {
+ nvmem_cell_put(speedbin_nvmem);
+ goto free_drv;
+ }
+ nvmem_cell_put(speedbin_nvmem);
+ }
+ of_node_put(np);
+
+ drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables),
+ GFP_KERNEL);
+ if (!drv->opp_tables) {
+ ret = -ENOMEM;
+ goto free_drv;
+ }
+
+ drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
+ sizeof(*drv->genpd_opp_tables),
+ GFP_KERNEL);
+ if (!drv->genpd_opp_tables) {
+ ret = -ENOMEM;
+ goto free_opp;
+ }
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (NULL == cpu_dev) {
+ ret = -ENODEV;
+ goto free_genpd_opp;
+ }
+
+ if (drv->data->get_version) {
+ drv->opp_tables[cpu] =
+ dev_pm_opp_set_supported_hw(cpu_dev,
+ &drv->versions, 1);
+ if (IS_ERR(drv->opp_tables[cpu])) {
+ ret = PTR_ERR(drv->opp_tables[cpu]);
+ dev_err(cpu_dev,
+ "Failed to set supported hardware\n");
+ goto free_genpd_opp;
+ }
+ }
+
+ if (drv->data->genpd_names) {
+ drv->genpd_opp_tables[cpu] =
+ dev_pm_opp_attach_genpd(cpu_dev,
+ drv->data->genpd_names,
+ NULL);
+ if (IS_ERR(drv->genpd_opp_tables[cpu])) {
+ ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cpu_dev,
+ "Could not attach to pm_domain: %d\n",
+ ret);
+ goto free_genpd_opp;
+ }
+ }
+ }
+
+ cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, drv);
+ return 0;
+ }
+
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ dev_err(cpu_dev, "Failed to register platform device\n");
+
+free_genpd_opp:
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
+ break;
+ dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
+ }
+ kfree(drv->genpd_opp_tables);
+free_opp:
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
+ break;
+ dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ }
+ kfree(drv->opp_tables);
+free_drv:
+ kfree(drv);
+
+ return ret;
+}
+
+static int qcom_cpufreq_remove(struct platform_device *pdev)
+{
+ struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
+ platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu) {
+ if (drv->opp_tables[cpu])
+ dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ if (drv->genpd_opp_tables[cpu])
+ dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
+ }
+
+ kfree(drv->opp_tables);
+ kfree(drv->genpd_opp_tables);
+ kfree(drv);
+
+ return 0;
+}
+
+static struct platform_driver qcom_cpufreq_driver = {
+ .probe = qcom_cpufreq_probe,
+ .remove = qcom_cpufreq_remove,
+ .driver = {
+ .name = "qcom-cpufreq-nvmem",
+ },
+};
+
+static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
+ { .compatible = "qcom,apq8096", .data = &match_data_kryo },
+ { .compatible = "qcom,msm8996", .data = &match_data_kryo },
+ { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+ {},
+};
+
+/*
+ * Since the driver depends on smem and nvmem drivers, which may
+ * return EPROBE_DEFER, all the real activity is done in the probe,
+ * which may be defered as well. The init here is only registering
+ * the driver and the platform device.
+ */
+static int __init qcom_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(qcom_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&qcom_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (0 == ret)
+ return 0;
+
+ platform_driver_unregister(&qcom_cpufreq_driver);
+ return ret;
+}
+module_init(qcom_cpufreq_init);
+
+static void __exit qcom_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&qcom_cpufreq_driver);
+}
+module_exit(qcom_cpufreq_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
new file mode 100644
index 000000000000..eca32e443716
--- /dev/null
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner CPUFreq nvmem based driver
+ *
+ * The sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
+ * provide the OPP framework with required information.
+ *
+ * Copyright (C) 2019 Yangtao Li <tiny.windzz@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define MAX_NAME_LEN 7
+
+#define NVMEM_MASK 0x7
+#define NVMEM_SHIFT 5
+
+static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+
+/**
+ * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * @versions: Set to the value parsed from efuse
+ *
+ * Returns 0 if success.
+ */
+static int sun50i_cpufreq_get_efuse(u32 *versions)
+{
+ struct nvmem_cell *speedbin_nvmem;
+ struct device_node *np;
+ struct device *cpu_dev;
+ u32 *speedbin, efuse_value;
+ size_t len;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return -ENOENT;
+
+ ret = of_device_is_compatible(np,
+ "allwinner,sun50i-h6-operating-points");
+ if (!ret) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ of_node_put(np);
+ if (IS_ERR(speedbin_nvmem)) {
+ if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER)
+ pr_err("Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
+ return PTR_ERR(speedbin_nvmem);
+ }
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ nvmem_cell_put(speedbin_nvmem);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+ switch (efuse_value) {
+ case 0b0001:
+ *versions = 1;
+ break;
+ case 0b0011:
+ *versions = 2;
+ break;
+ default:
+ /*
+ * For other situations, we treat it as bin0.
+ * This vf table can be run for any good cpu.
+ */
+ *versions = 0;
+ break;
+ }
+
+ kfree(speedbin);
+ return 0;
+};
+
+static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
+{
+ struct opp_table **opp_tables;
+ char name[MAX_NAME_LEN];
+ unsigned int cpu;
+ u32 speed = 0;
+ int ret;
+
+ opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables),
+ GFP_KERNEL);
+ if (!opp_tables)
+ return -ENOMEM;
+
+ ret = sun50i_cpufreq_get_efuse(&speed);
+ if (ret)
+ return ret;
+
+ snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ ret = -ENODEV;
+ goto free_opp;
+ }
+
+ opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (IS_ERR(opp_tables[cpu])) {
+ ret = PTR_ERR(opp_tables[cpu]);
+ pr_err("Failed to set prop name\n");
+ goto free_opp;
+ }
+ }
+
+ cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, opp_tables);
+ return 0;
+ }
+
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ pr_err("Failed to register platform device\n");
+
+free_opp:
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(opp_tables[cpu]))
+ break;
+ dev_pm_opp_put_prop_name(opp_tables[cpu]);
+ }
+ kfree(opp_tables);
+
+ return ret;
+}
+
+static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
+{
+ struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
+ platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tables[cpu]);
+
+ kfree(opp_tables);
+
+ return 0;
+}
+
+static struct platform_driver sun50i_cpufreq_driver = {
+ .probe = sun50i_cpufreq_nvmem_probe,
+ .remove = sun50i_cpufreq_nvmem_remove,
+ .driver = {
+ .name = "sun50i-cpufreq-nvmem",
+ },
+};
+
+static const struct of_device_id sun50i_cpufreq_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6" },
+ {}
+};
+
+static const struct of_device_id *sun50i_cpufreq_match_node(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(sun50i_cpufreq_match_list, np);
+ of_node_put(np);
+
+ return match;
+}
+
+/*
+ * Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
+ * all the real activity is done in the probe, which may be defered as well.
+ * The init here is only registering the driver and the platform device.
+ */
+static int __init sun50i_cpufreq_init(void)
+{
+ const struct of_device_id *match;
+ int ret;
+
+ match = sun50i_cpufreq_match_node();
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&sun50i_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ sun50i_cpufreq_pdev =
+ platform_device_register_simple("sun50i-cpufreq-nvmem",
+ -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(sun50i_cpufreq_pdev);
+ if (ret == 0)
+ return 0;
+
+ platform_driver_unregister(&sun50i_cpufreq_driver);
+ return ret;
+}
+module_init(sun50i_cpufreq_init);
+
+static void __exit sun50i_cpufreq_exit(void)
+{
+ platform_device_unregister(sun50i_cpufreq_pdev);
+ platform_driver_unregister(&sun50i_cpufreq_driver);
+}
+module_exit(sun50i_cpufreq_exit);
+
+MODULE_DESCRIPTION("Sun50i-h6 cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 2ad1ae17932d..aeaa883a8c9d 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -77,6 +77,7 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
+ /* Fall through */
case DRA7_EFUSE_HAS_OD_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
}
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index a4ac31e4a58c..88727b7c0d59 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -33,6 +33,17 @@ config CPU_IDLE_GOV_TEO
Some workloads benefit from using it and it generally should be safe
to use. Say Y here if you are not happy with the alternatives.
+config CPU_IDLE_GOV_HALTPOLL
+ bool "Haltpoll governor (for virtualized systems)"
+ depends on KVM_GUEST
+ help
+ This governor implements haltpoll idle state selection, to be
+ used in conjunction with the haltpoll cpuidle driver, allowing
+ for polling for a certain amount of time before entering idle
+ state.
+
+ Some virtualized workloads benefit from using it.
+
config DT_IDLE_STATES
bool
@@ -51,6 +62,15 @@ depends on PPC
source "drivers/cpuidle/Kconfig.powerpc"
endmenu
+config HALTPOLL_CPUIDLE
+ tristate "Halt poll cpuidle driver"
+ depends on X86 && KVM_GUEST
+ default y
+ help
+ This option enables halt poll cpuidle driver, which allows to poll
+ before halting in the guest (more efficient than polling in the
+ host via halt_poll_ns for some scenarios).
+
endif
config ARCH_NEEDS_CPU_IDLE_COUPLED
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 48cb3d4bb7d1..d8530475493c 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -13,6 +13,16 @@ config ARM_CPUIDLE
initialized by calling the CPU operations init idle hook
provided by architecture code.
+config ARM_PSCI_CPUIDLE
+ bool "PSCI CPU idle Driver"
+ depends on ARM_PSCI_FW
+ select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this to enable PSCI firmware based CPUidle driver for ARM.
+ It provides an idle driver that is capable of detecting and
+ managing idle states through the PSCI firmware interface.
+
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9d7176cee3d3..ee70d5cc5b99 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,6 +7,7 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
+obj-$(CONFIG_HALTPOLL_CPUIDLE) += cpuidle-haltpoll.o
##################################################################################
# ARM SoC drivers
@@ -20,6 +21,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 5bcd82c35dcf..9e5156d39627 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
-#include <linux/topology.h>
#include <asm/cpuidle.h>
@@ -106,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu)
ret = arm_cpuidle_init(cpu);
/*
- * Allow the initialization to continue for other CPUs, if the reported
- * failure is a HW misconfiguration/breakage (-ENXIO).
+ * Allow the initialization to continue for other CPUs, if the
+ * reported failure is a HW misconfiguration/breakage (-ENXIO).
+ *
+ * Some platforms do not support idle operations
+ * (arm_cpuidle_init() returning -EOPNOTSUPP), we should
+ * not flag this case as an error, it is a valid
+ * configuration.
*/
if (ret) {
- pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ if (ret != -EOPNOTSUPP)
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
ret = ret == -ENXIO ? 0 : ret;
goto out_kfree_drv;
}
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
new file mode 100644
index 000000000000..932390b028f1
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cpuidle driver for haltpoll governor.
+ *
+ * Copyright 2019 Red Hat, Inc. and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Authors: Marcelo Tosatti <mtosatti@redhat.com>
+ */
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <linux/sched/idle.h>
+#include <linux/kvm_para.h>
+#include <linux/cpuidle_haltpoll.h>
+
+static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
+static enum cpuhp_state haltpoll_hp_state;
+
+static int default_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ if (current_clr_polling_and_test()) {
+ local_irq_enable();
+ return index;
+ }
+ default_idle();
+ return index;
+}
+
+static struct cpuidle_driver haltpoll_driver = {
+ .name = "haltpoll",
+ .governor = "haltpoll",
+ .states = {
+ { /* entry 0 is for polling */ },
+ {
+ .enter = default_enter_idle,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = -1,
+ .name = "haltpoll idle",
+ .desc = "default architecture idle",
+ },
+ },
+ .safe_state_index = 0,
+ .state_count = 2,
+};
+
+static int haltpoll_cpu_online(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
+ if (!dev->registered) {
+ dev->cpu = cpu;
+ if (cpuidle_register_device(dev)) {
+ pr_notice("cpuidle_register_device %d failed!\n", cpu);
+ return -EIO;
+ }
+ arch_haltpoll_enable(cpu);
+ }
+
+ return 0;
+}
+
+static int haltpoll_cpu_offline(unsigned int cpu)
+{
+ struct cpuidle_device *dev;
+
+ dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
+ if (dev->registered) {
+ arch_haltpoll_disable(cpu);
+ cpuidle_unregister_device(dev);
+ }
+
+ return 0;
+}
+
+static void haltpoll_uninit(void)
+{
+ if (haltpoll_hp_state)
+ cpuhp_remove_state(haltpoll_hp_state);
+ cpuidle_unregister_driver(&haltpoll_driver);
+
+ free_percpu(haltpoll_cpuidle_devices);
+ haltpoll_cpuidle_devices = NULL;
+}
+
+static int __init haltpoll_init(void)
+{
+ int ret;
+ struct cpuidle_driver *drv = &haltpoll_driver;
+
+ cpuidle_poll_state_init(drv);
+
+ if (!kvm_para_available() ||
+ !kvm_para_has_hint(KVM_HINTS_REALTIME))
+ return -ENODEV;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret < 0)
+ return ret;
+
+ haltpoll_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (haltpoll_cpuidle_devices == NULL) {
+ cpuidle_unregister_driver(drv);
+ return -ENOMEM;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpuidle/haltpoll:online",
+ haltpoll_cpu_online, haltpoll_cpu_offline);
+ if (ret < 0) {
+ haltpoll_uninit();
+ } else {
+ haltpoll_hp_state = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void __exit haltpoll_exit(void)
+{
+ haltpoll_uninit();
+}
+
+module_init(haltpoll_init);
+module_exit(haltpoll_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marcelo Tosatti <mtosatti@redhat.com>");
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
new file mode 100644
index 000000000000..f3c1a2396f98
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PSCI CPU idle driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+
+#include <asm/cpuidle.h>
+
+#include "dt_idle_states.h"
+
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ u32 *state = __this_cpu_read(psci_power_state);
+
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ idx, state[idx - 1]);
+}
+
+static struct cpuidle_driver psci_idle_driver __initdata = {
+ .name = "psci_idle",
+ .owner = THIS_MODULE,
+ /*
+ * PSCI idle states relies on architectural WFI to
+ * be represented as state index 0.
+ */
+ .states[0] = {
+ .enter = psci_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ }
+};
+
+static const struct of_device_id psci_idle_state_match[] __initconst = {
+ { .compatible = "arm,idle-state",
+ .data = psci_enter_idle_state },
+ { },
+};
+
+static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!psci_power_state_is_valid(*state)) {
+ pr_warn("Invalid PSCI power state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+ int i, ret = 0, count = 0;
+ u32 *psci_states;
+ struct device_node *state_node;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+ of_node_put(state_node);
+
+ if (ret)
+ goto free_mem;
+
+ pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
+ }
+
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
+static __init int psci_cpu_init_idle(unsigned int cpu)
+{
+ struct device_node *cpu_node;
+ int ret;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int __init psci_idle_init_cpu(int cpu)
+{
+ struct cpuidle_driver *drv;
+ struct device_node *cpu_node;
+ const char *enable_method;
+ int ret = 0;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
+
+ /*
+ * Check whether the enable-method for the cpu is PSCI, fail
+ * if it is not.
+ */
+ enable_method = of_get_property(cpu_node, "enable-method", NULL);
+ if (!enable_method || (strcmp(enable_method, "psci")))
+ ret = -ENODEV;
+
+ of_node_put(cpu_node);
+ if (ret)
+ return ret;
+
+ drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1, since
+ * by default idle state 0 is the quiescent state reached
+ * by the cpu by executing the wfi instruction.
+ *
+ * If no DT idle states are detected (ret == 0) let the driver
+ * initialization fail accordingly since there is no reason to
+ * initialize the idle driver if only wfi is supported, the
+ * default archictectural back-end already executes wfi
+ * on idle entry.
+ */
+ ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Initialize PSCI idle states.
+ */
+ ret = psci_cpu_init_idle(cpu);
+ if (ret) {
+ pr_err("CPU %d failed to PSCI idle\n", cpu);
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret)
+ goto out_kfree_drv;
+
+ return 0;
+
+out_kfree_drv:
+ kfree(drv);
+ return ret;
+}
+
+/*
+ * psci_idle_init - Initializes PSCI cpuidle driver
+ *
+ * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registration.
+ */
+static int __init psci_idle_init(void)
+{
+ int cpu, ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(cpu) {
+ ret = psci_idle_init_cpu(cpu);
+ if (ret)
+ goto out_fail;
+ }
+
+ return 0;
+
+out_fail:
+ while (--cpu >= 0) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
+ cpuidle_unregister(drv);
+ kfree(drv);
+ }
+
+ return ret;
+}
+device_initcall(psci_idle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0f4b7c45df3e..0895b988fa92 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -362,6 +362,36 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
}
/**
+ * cpuidle_poll_time - return amount of time to poll for,
+ * governors can override dev->poll_limit_ns if necessary
+ *
+ * @drv: the cpuidle driver tied with the cpu
+ * @dev: the cpuidle device
+ *
+ */
+u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ int i;
+ u64 limit_ns;
+
+ if (dev->poll_limit_ns)
+ return dev->poll_limit_ns;
+
+ limit_ns = TICK_NSEC;
+ for (i = 1; i < drv->state_count; i++) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ }
+
+ dev->poll_limit_ns = limit_ns;
+
+ return dev->poll_limit_ns;
+}
+
+/**
* cpuidle_install_idle_handler - installs the cpuidle idle loop handler
*/
void cpuidle_install_idle_handler(void)
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index d6613101af92..9f336af17fa6 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -9,6 +9,7 @@
/* For internal use only */
extern char param_governor[];
extern struct cpuidle_governor *cpuidle_curr_governor;
+extern struct cpuidle_governor *cpuidle_prev_governor;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
@@ -22,6 +23,7 @@ extern void cpuidle_install_idle_handler(void);
extern void cpuidle_uninstall_idle_handler(void);
/* governors */
+extern struct cpuidle_governor *cpuidle_find_governor(const char *str);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index dc32f34e68d9..80c1a830d991 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -254,12 +254,25 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
+ struct cpuidle_governor *gov;
int ret;
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+ if (!ret && !strlen(param_governor) && drv->governor &&
+ (cpuidle_get_driver() == drv)) {
+ mutex_lock(&cpuidle_lock);
+ gov = cpuidle_find_governor(drv->governor);
+ if (gov) {
+ cpuidle_prev_governor = cpuidle_curr_governor;
+ if (cpuidle_switch_governor(gov) < 0)
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
@@ -274,9 +287,21 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver);
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
+ bool enabled = (cpuidle_get_driver() == drv);
+
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+
+ if (!enabled)
+ return;
+
+ mutex_lock(&cpuidle_lock);
+ if (cpuidle_prev_governor) {
+ if (!cpuidle_switch_governor(cpuidle_prev_governor))
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 2e3e14192bee..e9801f26c732 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -20,14 +20,15 @@ char param_governor[CPUIDLE_NAME_LEN];
LIST_HEAD(cpuidle_governors);
struct cpuidle_governor *cpuidle_curr_governor;
+struct cpuidle_governor *cpuidle_prev_governor;
/**
- * __cpuidle_find_governor - finds a governor of the specified name
+ * cpuidle_find_governor - finds a governor of the specified name
* @str: the name
*
* Must be called with cpuidle_lock acquired.
*/
-static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
+struct cpuidle_governor *cpuidle_find_governor(const char *str)
{
struct cpuidle_governor *gov;
@@ -87,7 +88,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
return -ENODEV;
mutex_lock(&cpuidle_lock);
- if (__cpuidle_find_governor(gov->name) == NULL) {
+ if (cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile
index 42f44cc610dd..63abb5393a4d 100644
--- a/drivers/cpuidle/governors/Makefile
+++ b/drivers/cpuidle/governors/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o
+obj-$(CONFIG_CPU_IDLE_GOV_HALTPOLL) += haltpoll.o
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
new file mode 100644
index 000000000000..7a703d2e0064
--- /dev/null
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * haltpoll.c - haltpoll idle governor
+ *
+ * Copyright 2019 Red Hat, Inc. and/or its affiliates.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Authors: Marcelo Tosatti <mtosatti@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpuidle.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/kvm_para.h>
+
+static unsigned int guest_halt_poll_ns __read_mostly = 200000;
+module_param(guest_halt_poll_ns, uint, 0644);
+
+/* division factor to shrink halt_poll_ns */
+static unsigned int guest_halt_poll_shrink __read_mostly = 2;
+module_param(guest_halt_poll_shrink, uint, 0644);
+
+/* multiplication factor to grow per-cpu poll_limit_ns */
+static unsigned int guest_halt_poll_grow __read_mostly = 2;
+module_param(guest_halt_poll_grow, uint, 0644);
+
+/* value in us to start growing per-cpu halt_poll_ns */
+static unsigned int guest_halt_poll_grow_start __read_mostly = 50000;
+module_param(guest_halt_poll_grow_start, uint, 0644);
+
+/* allow shrinking guest halt poll */
+static bool guest_halt_poll_allow_shrink __read_mostly = true;
+module_param(guest_halt_poll_allow_shrink, bool, 0644);
+
+/**
+ * haltpoll_select - selects the next idle state to enter
+ * @drv: cpuidle driver containing state data
+ * @dev: the CPU
+ * @stop_tick: indication on whether or not to stop the tick
+ */
+static int haltpoll_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev,
+ bool *stop_tick)
+{
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+
+ if (!drv->state_count || latency_req == 0) {
+ *stop_tick = false;
+ return 0;
+ }
+
+ if (dev->poll_limit_ns == 0)
+ return 1;
+
+ /* Last state was poll? */
+ if (dev->last_state_idx == 0) {
+ /* Halt if no event occurred on poll window */
+ if (dev->poll_time_limit == true)
+ return 1;
+
+ *stop_tick = false;
+ /* Otherwise, poll again */
+ return 0;
+ }
+
+ *stop_tick = false;
+ /* Last state was halt: poll */
+ return 0;
+}
+
+static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+{
+ unsigned int val;
+ u64 block_ns = block_us*NSEC_PER_USEC;
+
+ /* Grow cpu_halt_poll_us if
+ * cpu_halt_poll_us < block_ns < guest_halt_poll_us
+ */
+ if (block_ns > dev->poll_limit_ns && block_ns <= guest_halt_poll_ns) {
+ val = dev->poll_limit_ns * guest_halt_poll_grow;
+
+ if (val < guest_halt_poll_grow_start)
+ val = guest_halt_poll_grow_start;
+ if (val > guest_halt_poll_ns)
+ val = guest_halt_poll_ns;
+
+ dev->poll_limit_ns = val;
+ } else if (block_ns > guest_halt_poll_ns &&
+ guest_halt_poll_allow_shrink) {
+ unsigned int shrink = guest_halt_poll_shrink;
+
+ val = dev->poll_limit_ns;
+ if (shrink == 0)
+ val = 0;
+ else
+ val /= shrink;
+ dev->poll_limit_ns = val;
+ }
+}
+
+/**
+ * haltpoll_reflect - update variables and update poll time
+ * @dev: the CPU
+ * @index: the index of actual entered state
+ */
+static void haltpoll_reflect(struct cpuidle_device *dev, int index)
+{
+ dev->last_state_idx = index;
+
+ if (index != 0)
+ adjust_poll_limit(dev, dev->last_residency);
+}
+
+/**
+ * haltpoll_enable_device - scans a CPU's states and does setup
+ * @drv: cpuidle driver
+ * @dev: the CPU
+ */
+static int haltpoll_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ dev->poll_limit_ns = 0;
+
+ return 0;
+}
+
+static struct cpuidle_governor haltpoll_governor = {
+ .name = "haltpoll",
+ .rating = 9,
+ .enable = haltpoll_enable_device,
+ .select = haltpoll_select,
+ .reflect = haltpoll_reflect,
+};
+
+static int __init init_haltpoll(void)
+{
+ if (kvm_para_available())
+ return cpuidle_register_governor(&haltpoll_governor);
+
+ return 0;
+}
+
+postcore_initcall(init_haltpoll);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index f0dddc66af26..428eeb832fe7 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -38,7 +38,6 @@ struct ladder_device_state {
struct ladder_device {
struct ladder_device_state states[CPUIDLE_STATE_MAX];
- int last_state_idx;
};
static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
@@ -49,12 +48,13 @@ static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
* @old_idx: the current state index
* @new_idx: the new target state index
*/
-static inline void ladder_do_selection(struct ladder_device *ldev,
+static inline void ladder_do_selection(struct cpuidle_device *dev,
+ struct ladder_device *ldev,
int old_idx, int new_idx)
{
ldev->states[old_idx].stats.promotion_count = 0;
ldev->states[old_idx].stats.demotion_count = 0;
- ldev->last_state_idx = new_idx;
+ dev->last_state_idx = new_idx;
}
/**
@@ -68,13 +68,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
- int last_residency, last_idx = ldev->last_state_idx;
+ int last_residency, last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = cpuidle_governor_latency_req(dev->cpu);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
- ladder_do_selection(ldev, last_idx, 0);
+ ladder_do_selection(dev, ldev, last_idx, 0);
return 0;
}
@@ -91,7 +91,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
- ladder_do_selection(ldev, last_idx, last_idx + 1);
+ ladder_do_selection(dev, ldev, last_idx, last_idx + 1);
return last_idx + 1;
}
}
@@ -107,7 +107,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
if (drv->states[i].exit_latency <= latency_req)
break;
}
- ladder_do_selection(ldev, last_idx, i);
+ ladder_do_selection(dev, ldev, last_idx, i);
return i;
}
@@ -116,7 +116,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
- ladder_do_selection(ldev, last_idx, last_idx - 1);
+ ladder_do_selection(dev, ldev, last_idx, last_idx - 1);
return last_idx - 1;
}
}
@@ -139,7 +139,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = first_idx;
+ dev->last_state_idx = first_idx;
for (i = first_idx; i < drv->state_count; i++) {
state = &drv->states[i];
@@ -167,9 +167,8 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
*/
static void ladder_reflect(struct cpuidle_device *dev, int index)
{
- struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
if (index > 0)
- ldev->last_state_idx = index;
+ dev->last_state_idx = index;
}
static struct cpuidle_governor ladder_governor = {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e9a28c7846d6..e5a5d0c8d66b 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -117,7 +117,6 @@
*/
struct menu_device {
- int last_state_idx;
int needs_update;
int tick_wakeup;
@@ -302,9 +301,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
!drv->states[0].disabled && !dev->states_usage[0].disable)) {
/*
* In this case state[0] will be used no matter what, so return
- * it right away and keep the tick running.
+ * it right away and keep the tick running if state[0] is a
+ * polling one.
*/
- *stop_tick = false;
+ *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
return 0;
}
@@ -395,16 +395,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return idx;
}
- if (s->exit_latency > latency_req) {
- /*
- * If we break out of the loop for latency reasons, use
- * the target residency of the selected state as the
- * expected idle duration so that the tick is retained
- * as long as that target residency is low enough.
- */
- predicted_us = drv->states[idx].target_residency;
+ if (s->exit_latency > latency_req)
break;
- }
+
idx = i;
}
@@ -455,7 +448,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- data->last_state_idx = index;
+ dev->last_state_idx = index;
data->needs_update = 1;
data->tick_wakeup = tick_nohz_idle_got_tick();
}
@@ -468,7 +461,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- int last_idx = data->last_state_idx;
+ int last_idx = dev->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
unsigned int new_factor;
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 7d05efdbd3c6..b5a0e498f798 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -96,7 +96,6 @@ struct teo_idle_state {
* @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @states: Idle states data corresponding to this CPU.
- * @last_state: Idle state entered by the CPU last time.
* @interval_idx: Index of the most recent saved idle interval.
* @intervals: Saved idle duration values.
*/
@@ -104,7 +103,6 @@ struct teo_cpu {
u64 time_span_ns;
u64 sleep_length_ns;
struct teo_idle_state states[CPUIDLE_STATE_MAX];
- int last_state;
int interval_idx;
unsigned int intervals[INTERVALS];
};
@@ -125,12 +123,15 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
/*
- * One of the safety nets has triggered or this was a timer
- * wakeup (or equivalent).
+ * One of the safety nets has triggered or the wakeup was close
+ * enough to the closest timer event expected at the idle state
+ * selection time to be discarded.
*/
- measured_us = sleep_length_us;
+ measured_us = UINT_MAX;
} else {
- unsigned int lat = drv->states[cpu_data->last_state].exit_latency;
+ unsigned int lat;
+
+ lat = drv->states[dev->last_state_idx].exit_latency;
measured_us = ktime_to_us(cpu_data->time_span_ns);
/*
@@ -189,15 +190,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
/*
- * If the total time span between idle state selection and the "reflect"
- * callback is greater than or equal to the sleep length determined at
- * the idle state selection time, the wakeup is likely to be due to a
- * timer event.
- */
- if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns)
- measured_us = UINT_MAX;
-
- /*
* Save idle duration values corresponding to non-timer wakeups for
* pattern detection.
*/
@@ -242,12 +234,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
int latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int duration_us, count;
- int max_early_idx, idx, i;
+ int max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick;
- if (cpu_data->last_state >= 0) {
+ if (dev->last_state_idx >= 0) {
teo_update(drv, dev);
- cpu_data->last_state = -1;
+ dev->last_state_idx = -1;
}
cpu_data->time_span_ns = local_clock();
@@ -257,6 +249,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
count = 0;
max_early_idx = -1;
+ constraint_idx = drv->state_count;
idx = -1;
for (i = 0; i < drv->state_count; i++) {
@@ -286,16 +279,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (s->target_residency > duration_us)
break;
- if (s->exit_latency > latency_req) {
- /*
- * If we break out of the loop for latency reasons, use
- * the target residency of the selected state as the
- * expected idle duration to avoid stopping the tick
- * as long as that target residency is low enough.
- */
- duration_us = drv->states[idx].target_residency;
- goto refine;
- }
+ if (s->exit_latency > latency_req && constraint_idx > i)
+ constraint_idx = i;
idx = i;
@@ -321,7 +306,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
duration_us = drv->states[idx].target_residency;
}
-refine:
+ /*
+ * If there is a latency constraint, it may be necessary to use a
+ * shallower idle state than the one selected so far.
+ */
+ if (constraint_idx < idx)
+ idx = constraint_idx;
+
if (idx < 0) {
idx = 0; /* No states enabled. Must use 0. */
} else if (idx > 0) {
@@ -331,13 +322,12 @@ refine:
/*
* Count and sum the most recent idle duration values less than
- * the target residency of the state selected so far, find the
- * max.
+ * the current expected idle duration value.
*/
for (i = 0; i < INTERVALS; i++) {
unsigned int val = cpu_data->intervals[i];
- if (val >= drv->states[idx].target_residency)
+ if (val >= duration_us)
continue;
count++;
@@ -356,8 +346,10 @@ refine:
* would be too shallow.
*/
if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
- idx = teo_find_shallower_state(drv, dev, idx, avg_us);
duration_us = avg_us;
+ if (drv->states[idx].target_residency > avg_us)
+ idx = teo_find_shallower_state(drv, dev,
+ idx, avg_us);
}
}
}
@@ -394,7 +386,7 @@ static void teo_reflect(struct cpuidle_device *dev, int state)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- cpu_data->last_state = state;
+ dev->last_state_idx = state;
/*
* If the wakeup was not "natural", but triggered by one of the safety
* nets, assume that the CPU might have been idle for the entire sleep
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 02b9315a9e96..c8fa5f41dfc4 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -20,16 +20,9 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
- u64 limit = TICK_NSEC;
- int i;
+ u64 limit;
- for (i = 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
- continue;
-
- limit = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
- break;
- }
+ limit = cpuidle_poll_time(drv, dev);
while (!need_resched()) {
cpu_relax();
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index eb20adb5de23..2bb2683b493c 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -334,6 +334,7 @@ struct cpuidle_state_kobj {
struct cpuidle_state_usage *state_usage;
struct completion kobj_unregister;
struct kobject kobj;
+ struct cpuidle_device *device;
};
#ifdef CONFIG_SUSPEND
@@ -391,6 +392,7 @@ static inline void cpuidle_remove_s2idle_attr_group(struct cpuidle_state_kobj *k
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
+#define kobj_to_device(k) (kobj_to_state_obj(k)->device)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
@@ -414,10 +416,14 @@ static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr,
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
+ struct cpuidle_device *dev = kobj_to_device(kobj);
if (cattr->store)
ret = cattr->store(state, state_usage, buf, size);
+ /* reset poll time cache */
+ dev->poll_limit_ns = 0;
+
return ret;
}
@@ -468,6 +474,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
}
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
+ kobj->device = device;
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 83271d944a96..1fb622f2a87d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -145,6 +145,26 @@ config CRYPTO_SHA512_S390
It is available as of z10.
+config CRYPTO_SHA3_256_S390
+ tristate "SHA3_224 and SHA3_256 digest algorithm"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ This is the s390 hardware accelerated implementation of the
+ SHA3_256 secure hash standard.
+
+ It is available as of z14.
+
+config CRYPTO_SHA3_512_S390
+ tristate "SHA3_384 and SHA3_512 digest algorithm"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ This is the s390 hardware accelerated implementation of the
+ SHA3_512 secure hash standard.
+
+ It is available as of z14.
+
config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms"
depends on S390
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index f9fec2ddf56a..94c1ad7eeddf 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
+ switch (authsize) {
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ case 12:
+ case 8:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
return 0;
}
@@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
rctx->cmd.u.aes.type = ctx->u.aes.type;
rctx->cmd.u.aes.mode = ctx->u.aes.mode;
rctx->cmd.u.aes.action = encrypt;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index e58d69d4dd43..73acf0fdb793 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -555,6 +555,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
unsigned long flags;
unsigned int i;
+ /* If there's no device there's nothing to do */
+ if (!ccp)
+ return 0;
+
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
@@ -579,6 +583,10 @@ int ccp_dev_resume(struct sp_device *sp)
unsigned long flags;
unsigned int i;
+ /* If there's no device there's nothing to do */
+ if (!ccp)
+ return 0;
+
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 591584a95028..c8da8eb160da 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -620,6 +620,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
unsigned long long *final;
unsigned int dm_offset;
+ unsigned int authsize;
unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
@@ -641,6 +642,21 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (!aes->key) /* Gotta have a key SGL */
return -EINVAL;
+ /* Zero defaults to 16 bytes, the maximum size */
+ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
+ switch (authsize) {
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ case 12:
+ case 8:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* First, decompose the source buffer into AAD & PT,
* and the destination buffer into AAD, CT & tag, or
* the input into CT & tag.
@@ -655,7 +671,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
} else {
/* Input length for decryption includes tag */
- ilen = aes->src_len - AES_BLOCK_SIZE;
+ ilen = aes->src_len - authsize;
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}
@@ -764,8 +780,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left) {
- unsigned int nbytes = aes->src_len
- % AES_BLOCK_SIZE;
+ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
op.eom = 1;
@@ -837,19 +852,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
if (ret)
goto e_tag;
ret = crypto_memneq(tag.address, final_wa.address,
- AES_BLOCK_SIZE) ? -EBADMSG : 0;
+ authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
@@ -857,11 +872,11 @@ e_tag:
ccp_dm_free(&final_wa);
e_dst:
- if (aes->src_len && !in_place)
+ if (ilen > 0 && !in_place)
ccp_free_data(&dst, cmd_q);
e_src:
- if (aes->src_len)
+ if (ilen > 0)
ccp_free_data(&src, cmd_q);
e_aad:
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index ba98a4e3ad33..defe1d438710 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -93,15 +93,28 @@ config ARM_EXYNOS_BUS_DEVFREQ
This does not yet operate with optimal voltages.
config ARM_TEGRA_DEVFREQ
- tristate "Tegra DEVFREQ Driver"
- depends on ARCH_TEGRA_124_SOC
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ tristate "NVIDIA Tegra30/114/124/210 DEVFREQ Driver"
+ depends on ARCH_TEGRA_3x_SOC || ARCH_TEGRA_114_SOC || \
+ ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
+ ARCH_TEGRA_210_SOC || \
+ COMPILE_TEST
select PM_OPP
help
This adds the DEVFREQ driver for the Tegra family of SoCs.
It reads ACTMON counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
+config ARM_TEGRA20_DEVFREQ
+ tristate "NVIDIA Tegra20 DEVFREQ Driver"
+ depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
+ depends on COMMON_CLK
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_OPP
+ help
+ This adds the DEVFREQ driver for the Tegra20 family of SoCs.
+ It reads Memory Controller counters and adjusts the operating
+ frequencies and voltages with OPP support.
+
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
depends on ARCH_ROCKCHIP
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 32b8d4d3f12c..338ae8440db6 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -10,7 +10,8 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o
-obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
+obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra30-devfreq.o
+obj-$(CONFIG_ARM_TEGRA20_DEVFREQ) += tegra20-devfreq.o
# DEVFREQ Event Drivers
obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ab22bf8a12d6..446490c9d635 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
- return ERR_PTR(err);
+ return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
governor = find_devfreq_governor(name);
}
@@ -402,7 +402,7 @@ static void devfreq_monitor(struct work_struct *work)
* devfreq_monitor_start() - Start load monitoring of devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function for starting devfreq device load monitoing. By
+ * Helper function for starting devfreq device load monitoring. By
* default delayed work based monitoring is supported. Function
* to be called from governor in response to DEVFREQ_GOV_START
* event when device is added to devfreq framework.
@@ -420,7 +420,7 @@ EXPORT_SYMBOL(devfreq_monitor_start);
* devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function to stop devfreq device load monitoing. Function
+ * Helper function to stop devfreq device load monitoring. Function
* to be called from governor in response to DEVFREQ_GOV_STOP
* event when device is removed from devfreq framework.
*/
@@ -434,7 +434,7 @@ EXPORT_SYMBOL(devfreq_monitor_stop);
* devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function to suspend devfreq device load monitoing. Function
+ * Helper function to suspend devfreq device load monitoring. Function
* to be called from governor in response to DEVFREQ_GOV_SUSPEND
* event or when polling interval is set to zero.
*
@@ -461,7 +461,7 @@ EXPORT_SYMBOL(devfreq_monitor_suspend);
* devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function to resume devfreq device load monitoing. Function
+ * Helper function to resume devfreq device load monitoring. Function
* to be called from governor in response to DEVFREQ_GOV_RESUME
* event or when polling interval is set to non-zero.
*/
@@ -867,7 +867,7 @@ EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
/**
* devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
- * @dev: the device to add devfreq feature.
+ * @dev: the device from which to remove devfreq feature.
* @devfreq: the devfreq instance to be removed
*/
void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 3ee3dd5653aa..87b42055e6bc 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/suspend.h>
@@ -20,6 +21,11 @@
#include "exynos-ppmu.h"
+enum exynos_ppmu_type {
+ EXYNOS_TYPE_PPMU,
+ EXYNOS_TYPE_PPMU_V2,
+};
+
struct exynos_ppmu_data {
struct clk *clk;
};
@@ -33,6 +39,7 @@ struct exynos_ppmu {
struct regmap *regmap;
struct exynos_ppmu_data ppmu;
+ enum exynos_ppmu_type ppmu_type;
};
#define PPMU_EVENT(name) \
@@ -86,6 +93,12 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(d1-cpu),
PPMU_EVENT(d1-general),
PPMU_EVENT(d1-rt),
+
+ /* For Exynos5422 SoC */
+ PPMU_EVENT(dmc0_0),
+ PPMU_EVENT(dmc0_1),
+ PPMU_EVENT(dmc1_0),
+ PPMU_EVENT(dmc1_1),
};
static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
@@ -151,9 +164,9 @@ static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
if (ret < 0)
return ret;
- /* Set the event of Read/Write data count */
+ /* Set the event of proper data type monitoring */
ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
- PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT);
+ edev->desc->event_type);
if (ret < 0)
return ret;
@@ -365,23 +378,11 @@ static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
if (ret < 0)
return ret;
- /* Set the event of Read/Write data count */
- switch (id) {
- case PPMU_PMNCNT0:
- case PPMU_PMNCNT1:
- case PPMU_PMNCNT2:
- ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
- PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT);
- if (ret < 0)
- return ret;
- break;
- case PPMU_PMNCNT3:
- ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
- PPMU_V2_EVT3_RW_DATA_CNT);
- if (ret < 0)
- return ret;
- break;
- }
+ /* Set the event of proper data type monitoring */
+ ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
+ edev->desc->event_type);
+ if (ret < 0)
+ return ret;
/* Reset cycle counter/performance counter and enable PPMU */
ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
@@ -480,31 +481,24 @@ static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
static const struct of_device_id exynos_ppmu_id_match[] = {
{
.compatible = "samsung,exynos-ppmu",
- .data = (void *)&exynos_ppmu_ops,
+ .data = (void *)EXYNOS_TYPE_PPMU,
}, {
.compatible = "samsung,exynos-ppmu-v2",
- .data = (void *)&exynos_ppmu_v2_ops,
+ .data = (void *)EXYNOS_TYPE_PPMU_V2,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
-static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
-{
- const struct of_device_id *match;
-
- match = of_match_node(exynos_ppmu_id_match, np);
- return (struct devfreq_event_ops *)match->data;
-}
-
static int of_get_devfreq_events(struct device_node *np,
struct exynos_ppmu *info)
{
struct devfreq_event_desc *desc;
- struct devfreq_event_ops *event_ops;
struct device *dev = info->dev;
struct device_node *events_np, *node;
int i, j, count;
+ const struct of_device_id *of_id;
+ int ret;
events_np = of_get_child_by_name(np, "events");
if (!events_np) {
@@ -512,7 +506,6 @@ static int of_get_devfreq_events(struct device_node *np,
"failed to get child node of devfreq-event devices\n");
return -EINVAL;
}
- event_ops = exynos_bus_get_ops(np);
count = of_get_child_count(events_np);
desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
@@ -520,6 +513,12 @@ static int of_get_devfreq_events(struct device_node *np,
return -ENOMEM;
info->num_events = count;
+ of_id = of_match_device(exynos_ppmu_id_match, dev);
+ if (of_id)
+ info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
+ else
+ return -EINVAL;
+
j = 0;
for_each_child_of_node(events_np, node) {
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
@@ -537,10 +536,51 @@ static int of_get_devfreq_events(struct device_node *np,
continue;
}
- desc[j].ops = event_ops;
+ switch (info->ppmu_type) {
+ case EXYNOS_TYPE_PPMU:
+ desc[j].ops = &exynos_ppmu_ops;
+ break;
+ case EXYNOS_TYPE_PPMU_V2:
+ desc[j].ops = &exynos_ppmu_v2_ops;
+ break;
+ }
+
desc[j].driver_data = info;
of_property_read_string(node, "event-name", &desc[j].name);
+ ret = of_property_read_u32(node, "event-data-type",
+ &desc[j].event_type);
+ if (ret) {
+ /* Set the event of proper data type counting.
+ * Check if the data type has been defined in DT,
+ * use default if not.
+ */
+ if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
+ struct devfreq_event_dev edev;
+ int id;
+ /* Not all registers take the same value for
+ * read+write data count.
+ */
+ edev.desc = &desc[j];
+ id = exynos_ppmu_find_ppmu_id(&edev);
+
+ switch (id) {
+ case PPMU_PMNCNT0:
+ case PPMU_PMNCNT1:
+ case PPMU_PMNCNT2:
+ desc[j].event_type = PPMU_V2_RO_DATA_CNT
+ | PPMU_V2_WO_DATA_CNT;
+ break;
+ case PPMU_PMNCNT3:
+ desc[j].event_type =
+ PPMU_V2_EVT3_RW_DATA_CNT;
+ break;
+ }
+ } else {
+ desc[j].event_type = PPMU_RO_DATA_CNT |
+ PPMU_WO_DATA_CNT;
+ }
+ }
j++;
}
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index d9f377912c10..c832673273a2 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#define DEFAULT_SATURATION_RATIO 40
-#define DEFAULT_VOLTAGE_TOLERANCE 2
struct exynos_bus {
struct device *dev;
@@ -34,9 +33,8 @@ struct exynos_bus {
unsigned long curr_freq;
- struct regulator *regulator;
+ struct opp_table *opp_table;
struct clk *clk;
- unsigned int voltage_tolerance;
unsigned int ratio;
};
@@ -90,62 +88,29 @@ static int exynos_bus_get_event(struct exynos_bus *bus,
}
/*
- * Must necessary function for devfreq simple-ondemand governor
+ * devfreq function for both simple-ondemand and passive governor
*/
static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
struct dev_pm_opp *new_opp;
- unsigned long old_freq, new_freq, new_volt, tol;
int ret = 0;
- /* Get new opp-bus instance according to new bus clock */
+ /* Get correct frequency for bus. */
new_opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(new_opp)) {
dev_err(dev, "failed to get recommended opp instance\n");
return PTR_ERR(new_opp);
}
- new_freq = dev_pm_opp_get_freq(new_opp);
- new_volt = dev_pm_opp_get_voltage(new_opp);
dev_pm_opp_put(new_opp);
- old_freq = bus->curr_freq;
-
- if (old_freq == new_freq)
- return 0;
- tol = new_volt * bus->voltage_tolerance / 100;
-
/* Change voltage and frequency according to new OPP level */
mutex_lock(&bus->lock);
+ ret = dev_pm_opp_set_rate(dev, *freq);
+ if (!ret)
+ bus->curr_freq = *freq;
- if (old_freq < new_freq) {
- ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
- if (ret < 0) {
- dev_err(bus->dev, "failed to set voltage\n");
- goto out;
- }
- }
-
- ret = clk_set_rate(bus->clk, new_freq);
- if (ret < 0) {
- dev_err(dev, "failed to change clock of bus\n");
- clk_set_rate(bus->clk, old_freq);
- goto out;
- }
-
- if (old_freq > new_freq) {
- ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
- if (ret < 0) {
- dev_err(bus->dev, "failed to set voltage\n");
- goto out;
- }
- }
- bus->curr_freq = new_freq;
-
- dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
- old_freq, new_freq, clk_get_rate(bus->clk));
-out:
mutex_unlock(&bus->lock);
return ret;
@@ -191,57 +156,12 @@ static void exynos_bus_exit(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to disable the devfreq-event devices\n");
- if (bus->regulator)
- regulator_disable(bus->regulator);
-
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
-}
-
-/*
- * Must necessary function for devfreq passive governor
- */
-static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
- u32 flags)
-{
- struct exynos_bus *bus = dev_get_drvdata(dev);
- struct dev_pm_opp *new_opp;
- unsigned long old_freq, new_freq;
- int ret = 0;
-
- /* Get new opp-bus instance according to new bus clock */
- new_opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(new_opp)) {
- dev_err(dev, "failed to get recommended opp instance\n");
- return PTR_ERR(new_opp);
+ if (bus->opp_table) {
+ dev_pm_opp_put_regulators(bus->opp_table);
+ bus->opp_table = NULL;
}
-
- new_freq = dev_pm_opp_get_freq(new_opp);
- dev_pm_opp_put(new_opp);
-
- old_freq = bus->curr_freq;
-
- if (old_freq == new_freq)
- return 0;
-
- /* Change the frequency according to new OPP level */
- mutex_lock(&bus->lock);
-
- ret = clk_set_rate(bus->clk, new_freq);
- if (ret < 0) {
- dev_err(dev, "failed to set the clock of bus\n");
- goto out;
- }
-
- *freq = new_freq;
- bus->curr_freq = new_freq;
-
- dev_dbg(dev, "Set the frequency of bus (%luHz -> %luHz, %luHz)\n",
- old_freq, new_freq, clk_get_rate(bus->clk));
-out:
- mutex_unlock(&bus->lock);
-
- return ret;
}
static void exynos_bus_passive_exit(struct device *dev)
@@ -256,21 +176,19 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
struct exynos_bus *bus)
{
struct device *dev = bus->dev;
+ struct opp_table *opp_table;
+ const char *vdd = "vdd";
int i, ret, count, size;
- /* Get the regulator to provide each bus with the power */
- bus->regulator = devm_regulator_get(dev, "vdd");
- if (IS_ERR(bus->regulator)) {
- dev_err(dev, "failed to get VDD regulator\n");
- return PTR_ERR(bus->regulator);
- }
-
- ret = regulator_enable(bus->regulator);
- if (ret < 0) {
- dev_err(dev, "failed to enable VDD regulator\n");
+ opp_table = dev_pm_opp_set_regulators(dev, &vdd, 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ dev_err(dev, "failed to set regulators %d\n", ret);
return ret;
}
+ bus->opp_table = opp_table;
+
/*
* Get the devfreq-event devices to get the current utilization of
* buses. This raw data will be used in devfreq ondemand governor.
@@ -311,14 +229,11 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
bus->ratio = DEFAULT_SATURATION_RATIO;
- if (of_property_read_u32(np, "exynos,voltage-tolerance",
- &bus->voltage_tolerance))
- bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
-
return 0;
err_regulator:
- regulator_disable(bus->regulator);
+ dev_pm_opp_put_regulators(bus->opp_table);
+ bus->opp_table = NULL;
return ret;
}
@@ -383,6 +298,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
struct exynos_bus *bus;
int ret, max_state;
unsigned long min_freq, max_freq;
+ bool passive = false;
if (!np) {
dev_err(dev, "failed to find devicetree node\n");
@@ -396,27 +312,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
bus->dev = &pdev->dev;
platform_set_drvdata(pdev, bus);
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- return ret;
-
profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!profile)
+ return -ENOMEM;
node = of_parse_phandle(dev->of_node, "devfreq", 0);
if (node) {
of_node_put(node);
- goto passive;
+ passive = true;
} else {
ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
}
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
- goto err;
+ goto err_reg;
+
+ if (passive)
+ goto passive;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@@ -468,7 +384,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
goto out;
passive:
/* Initialize the struct profile and governor data for passive device */
- profile->target = exynos_bus_passive_target;
+ profile->target = exynos_bus_target;
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
@@ -507,6 +423,11 @@ out:
err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
+err_reg:
+ if (!passive) {
+ dev_pm_opp_put_regulators(bus->opp_table);
+ bus->opp_table = NULL;
+ }
return ret;
}
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 58308948b863..be6eeab9c814 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -149,7 +149,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
static int devfreq_passive_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
- struct device *dev = devfreq->dev.parent;
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent = (struct devfreq *)p_data->parent;
@@ -165,12 +164,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
p_data->this = devfreq;
nb->notifier_call = devfreq_passive_notifier_call;
- ret = devm_devfreq_register_notifier(dev, parent, nb,
+ ret = devfreq_register_notifier(parent, nb,
DEVFREQ_TRANSITION_NOTIFIER);
break;
case DEVFREQ_GOV_STOP:
- devm_devfreq_unregister_notifier(dev, parent, nb,
- DEVFREQ_TRANSITION_NOTIFIER);
+ WARN_ON(devfreq_unregister_notifier(parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER));
break;
default:
break;
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 682465fa57e1..2e65d7279d79 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -351,7 +351,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
/*
* Get dram timing and pass it to arm trust firmware,
- * the dram drvier in arm trust firmware will get these
+ * the dram driver in arm trust firmware will get these
* timing and to do dram initial.
*/
if (!of_get_ddr_timings(&data->timing, np)) {
diff --git a/drivers/devfreq/tegra20-devfreq.c b/drivers/devfreq/tegra20-devfreq.c
new file mode 100644
index 000000000000..ff82bac9ee4e
--- /dev/null
+++ b/drivers/devfreq/tegra20-devfreq.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVIDIA Tegra20 devfreq driver
+ *
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/mc.h>
+
+#include "governor.h"
+
+#define MC_STAT_CONTROL 0x90
+#define MC_STAT_EMC_CLOCK_LIMIT 0xa0
+#define MC_STAT_EMC_CLOCKS 0xa4
+#define MC_STAT_EMC_CONTROL 0xa8
+#define MC_STAT_EMC_COUNT 0xb8
+
+#define EMC_GATHER_CLEAR (1 << 8)
+#define EMC_GATHER_ENABLE (3 << 8)
+
+struct tegra_devfreq {
+ struct devfreq *devfreq;
+ struct clk *emc_clock;
+ void __iomem *regs;
+};
+
+static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+ struct devfreq *devfreq = tegra->devfreq;
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ err = clk_set_min_rate(tegra->emc_clock, rate);
+ if (err)
+ return err;
+
+ err = clk_set_rate(tegra->emc_clock, 0);
+ if (err)
+ goto restore_min_rate;
+
+ return 0;
+
+restore_min_rate:
+ clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
+
+ return err;
+}
+
+static int tegra_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+
+ /*
+ * EMC_COUNT returns number of memory events, that number is lower
+ * than the number of clocks. Conversion ratio of 1/8 results in a
+ * bit higher bandwidth than actually needed, it is good enough for
+ * the time being because drivers don't support requesting minimum
+ * needed memory bandwidth yet.
+ *
+ * TODO: adjust the ratio value once relevant drivers will support
+ * memory bandwidth management.
+ */
+ stat->busy_time = readl_relaxed(tegra->regs + MC_STAT_EMC_COUNT);
+ stat->total_time = readl_relaxed(tegra->regs + MC_STAT_EMC_CLOCKS) / 8;
+ stat->current_frequency = clk_get_rate(tegra->emc_clock);
+
+ writel_relaxed(EMC_GATHER_CLEAR, tegra->regs + MC_STAT_CONTROL);
+ writel_relaxed(EMC_GATHER_ENABLE, tegra->regs + MC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+ .polling_ms = 500,
+ .target = tegra_devfreq_target,
+ .get_dev_status = tegra_devfreq_get_dev_status,
+};
+
+static struct tegra_mc *tegra_get_memory_controller(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+
+ np = of_find_compatible_node(NULL, NULL, "nvidia,tegra20-mc-gart");
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return mc;
+}
+
+static int tegra_devfreq_probe(struct platform_device *pdev)
+{
+ struct tegra_devfreq *tegra;
+ struct tegra_mc *mc;
+ unsigned long max_rate;
+ unsigned long rate;
+ int err;
+
+ mc = tegra_get_memory_controller();
+ if (IS_ERR(mc)) {
+ err = PTR_ERR(mc);
+ dev_err(&pdev->dev, "failed to get memory controller: %d\n",
+ err);
+ return err;
+ }
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ /* EMC is a system-critical clock that is always enabled */
+ tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(tegra->emc_clock)) {
+ err = PTR_ERR(tegra->emc_clock);
+ dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
+ return err;
+ }
+
+ tegra->regs = mc->regs;
+
+ max_rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+
+ for (rate = 0; rate <= max_rate; rate++) {
+ rate = clk_round_rate(tegra->emc_clock, rate);
+
+ err = dev_pm_opp_add(&pdev->dev, rate, 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add opp: %d\n", err);
+ goto remove_opps;
+ }
+ }
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, tegra->regs + MC_STAT_CONTROL);
+ writel_relaxed(0x00000000, tegra->regs + MC_STAT_EMC_CONTROL);
+ writel_relaxed(0xffffffff, tegra->regs + MC_STAT_EMC_CLOCK_LIMIT);
+
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(tegra->devfreq)) {
+ err = PTR_ERR(tegra->devfreq);
+ goto remove_opps;
+ }
+
+ return 0;
+
+remove_opps:
+ dev_pm_opp_remove_all_dynamic(&pdev->dev);
+
+ return err;
+}
+
+static int tegra_devfreq_remove(struct platform_device *pdev)
+{
+ struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+
+ devfreq_remove_device(tegra->devfreq);
+ dev_pm_opp_remove_all_dynamic(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver tegra_devfreq_driver = {
+ .probe = tegra_devfreq_probe,
+ .remove = tegra_devfreq_remove,
+ .driver = {
+ .name = "tegra20-devfreq",
+ },
+};
+module_platform_driver(tegra_devfreq_driver);
+
+MODULE_ALIAS("platform:tegra20-devfreq");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 devfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 35c38aad8b4f..a6ba75f4106d 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -132,7 +132,6 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
struct tegra_devfreq_device {
const struct tegra_devfreq_device_config *config;
void __iomem *regs;
- spinlock_t lock;
/* Average event count sampled in the last interrupt */
u32 avg_count;
@@ -160,6 +159,8 @@ struct tegra_devfreq {
struct notifier_block rate_change_nb;
struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
+
+ int irq;
};
struct tegra_actmon_emc_ratio {
@@ -179,23 +180,23 @@ static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
{
- return readl(tegra->regs + offset);
+ return readl_relaxed(tegra->regs + offset);
}
static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
{
- writel(val, tegra->regs + offset);
+ writel_relaxed(val, tegra->regs + offset);
}
static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
{
- return readl(dev->regs + offset);
+ return readl_relaxed(dev->regs + offset);
}
static void device_writel(struct tegra_devfreq_device *dev, u32 val,
u32 offset)
{
- writel(val, dev->regs + offset);
+ writel_relaxed(val, dev->regs + offset);
}
static unsigned long do_percent(unsigned long val, unsigned int pct)
@@ -231,18 +232,14 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
static void actmon_write_barrier(struct tegra_devfreq *tegra)
{
/* ensure the update has reached the ACTMON */
- wmb();
- actmon_readl(tegra, ACTMON_GLB_STATUS);
+ readl(tegra->regs + ACTMON_GLB_STATUS);
}
static void actmon_isr_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- unsigned long flags;
u32 intr_status, dev_ctrl;
- spin_lock_irqsave(&dev->lock, flags);
-
dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
tegra_devfreq_update_avg_wmark(tegra, dev);
@@ -291,26 +288,6 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
actmon_write_barrier(tegra);
-
- spin_unlock_irqrestore(&dev->lock, flags);
-}
-
-static irqreturn_t actmon_isr(int irq, void *data)
-{
- struct tegra_devfreq *tegra = data;
- bool handled = false;
- unsigned int i;
- u32 val;
-
- val = actmon_readl(tegra, ACTMON_GLB_STATUS);
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- if (val & tegra->devices[i].config->irq_mask) {
- actmon_isr_device(tegra, tegra->devices + i);
- handled = true;
- }
- }
-
- return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
@@ -337,15 +314,12 @@ static void actmon_update_target(struct tegra_devfreq *tegra,
unsigned long cpu_freq = 0;
unsigned long static_cpu_emc_freq = 0;
unsigned int avg_sustain_coef;
- unsigned long flags;
if (dev->config->avg_dependency_threshold) {
cpu_freq = cpufreq_get(0);
static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
}
- spin_lock_irqsave(&dev->lock, flags);
-
dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
@@ -353,19 +327,31 @@ static void actmon_update_target(struct tegra_devfreq *tegra,
if (dev->avg_count >= dev->config->avg_dependency_threshold)
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
-
- spin_unlock_irqrestore(&dev->lock, flags);
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
{
struct tegra_devfreq *tegra = data;
+ bool handled = false;
+ unsigned int i;
+ u32 val;
mutex_lock(&tegra->devfreq->lock);
- update_devfreq(tegra->devfreq);
+
+ val = actmon_readl(tegra, ACTMON_GLB_STATUS);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ if (val & tegra->devices[i].config->irq_mask) {
+ actmon_isr_device(tegra, tegra->devices + i);
+ handled = true;
+ }
+ }
+
+ if (handled)
+ update_devfreq(tegra->devfreq);
+
mutex_unlock(&tegra->devfreq->lock);
- return IRQ_HANDLED;
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
@@ -375,7 +361,6 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
unsigned int i;
- unsigned long flags;
if (action != POST_RATE_CHANGE)
return NOTIFY_OK;
@@ -387,9 +372,7 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
dev = &tegra->devices[i];
- spin_lock_irqsave(&dev->lock, flags);
tegra_devfreq_update_wmark(tegra, dev);
- spin_unlock_irqrestore(&dev->lock, flags);
}
actmon_write_barrier(tegra);
@@ -397,48 +380,6 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra)
-{
- struct tegra_devfreq_device *dev;
- u32 val;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- dev = &tegra->devices[i];
-
- val = device_readl(dev, ACTMON_DEV_CTRL);
- val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
- val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
-
- device_writel(dev, val, ACTMON_DEV_CTRL);
- }
-
- actmon_write_barrier(tegra);
-}
-
-static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra)
-{
- struct tegra_devfreq_device *dev;
- u32 val;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- dev = &tegra->devices[i];
-
- val = device_readl(dev, ACTMON_DEV_CTRL);
- val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
- val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
-
- device_writel(dev, val, ACTMON_DEV_CTRL);
- }
-
- actmon_write_barrier(tegra);
-}
-
static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
@@ -462,34 +403,80 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+ val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_ENB;
device_writel(dev, val, ACTMON_DEV_CTRL);
+}
+
+static void tegra_actmon_start(struct tegra_devfreq *tegra)
+{
+ unsigned int i;
+
+ disable_irq(tegra->irq);
+
+ actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+ ACTMON_GLB_PERIOD_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
+ tegra_actmon_configure_device(tegra, &tegra->devices[i]);
+
+ actmon_write_barrier(tegra);
+
+ enable_irq(tegra->irq);
+}
+
+static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+{
+ unsigned int i;
+
+ disable_irq(tegra->irq);
+
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+ device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
+ device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
+ ACTMON_DEV_INTR_STATUS);
+ }
actmon_write_barrier(tegra);
+
+ enable_irq(tegra->irq);
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+ struct devfreq *devfreq = tegra->devfreq;
struct dev_pm_opp *opp;
- unsigned long rate = *freq * KHZ;
+ unsigned long rate;
+ int err;
- opp = devfreq_recommended_opp(dev, &rate, flags);
+ opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp)) {
- dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+ dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
return PTR_ERR(opp);
}
rate = dev_pm_opp_get_freq(opp);
dev_pm_opp_put(opp);
- clk_set_min_rate(tegra->emc_clock, rate);
- clk_set_rate(tegra->emc_clock, 0);
+ err = clk_set_min_rate(tegra->emc_clock, rate);
+ if (err)
+ return err;
- *freq = rate;
+ err = clk_set_rate(tegra->emc_clock, 0);
+ if (err)
+ goto restore_min_rate;
return 0;
+
+restore_min_rate:
+ clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
+
+ return err;
}
static int tegra_devfreq_get_dev_status(struct device *dev,
@@ -497,13 +484,15 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
{
struct tegra_devfreq *tegra = dev_get_drvdata(dev);
struct tegra_devfreq_device *actmon_dev;
+ unsigned long cur_freq;
- stat->current_frequency = tegra->cur_freq;
+ cur_freq = READ_ONCE(tegra->cur_freq);
/* To be used by the tegra governor */
stat->private_data = tegra;
/* The below are to be used by the other governors */
+ stat->current_frequency = cur_freq * KHZ;
actmon_dev = &tegra->devices[MCALL];
@@ -514,7 +503,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
/* Number of cycles in a sampling period */
- stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+ stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
stat->busy_time = min(stat->busy_time, stat->total_time);
@@ -553,7 +542,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
target_freq = max(target_freq, dev->target_freq);
}
- *freq = target_freq;
+ *freq = target_freq * KHZ;
return 0;
}
@@ -566,22 +555,22 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
switch (event) {
case DEVFREQ_GOV_START:
devfreq_monitor_start(devfreq);
- tegra_actmon_enable_interrupts(tegra);
+ tegra_actmon_start(tegra);
break;
case DEVFREQ_GOV_STOP:
- tegra_actmon_disable_interrupts(tegra);
+ tegra_actmon_stop(tegra);
devfreq_monitor_stop(devfreq);
break;
case DEVFREQ_GOV_SUSPEND:
- tegra_actmon_disable_interrupts(tegra);
+ tegra_actmon_stop(tegra);
devfreq_monitor_suspend(devfreq);
break;
case DEVFREQ_GOV_RESUME:
devfreq_monitor_resume(devfreq);
- tegra_actmon_enable_interrupts(tegra);
+ tegra_actmon_start(tegra);
break;
}
@@ -592,25 +581,22 @@ static struct devfreq_governor tegra_devfreq_governor = {
.name = "tegra_actmon",
.get_target_freq = tegra_governor_get_target,
.event_handler = tegra_governor_event_handler,
+ .immutable = true,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
{
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
- struct resource *res;
unsigned int i;
unsigned long rate;
- int irq;
int err;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
@@ -632,13 +618,10 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return PTR_ERR(tegra->emc_clock);
}
- clk_set_rate(tegra->emc_clock, ULONG_MAX);
-
- tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
- err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to register rate change notifier\n");
+ tegra->irq = platform_get_irq(pdev, 0);
+ if (tegra->irq < 0) {
+ err = tegra->irq;
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
return err;
}
@@ -656,73 +639,94 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
- actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
- ACTMON_GLB_PERIOD_CTRL);
-
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
dev->config = actmon_device_configs + i;
dev->regs = tegra->regs + dev->config->offset;
- spin_lock_init(&dev->lock);
-
- tegra_actmon_configure_device(tegra, dev);
}
for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
rate = clk_round_rate(tegra->emc_clock, rate);
- dev_pm_opp_add(&pdev->dev, rate, 0);
- }
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
- return irq;
+ err = dev_pm_opp_add(&pdev->dev, rate, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
+ goto remove_opps;
+ }
}
platform_set_drvdata(pdev, tegra);
- err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
- actmon_thread_isr, IRQF_SHARED,
- "tegra-devfreq", tegra);
+ tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
+ err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
if (err) {
- dev_err(&pdev->dev, "Interrupt request failed\n");
- return err;
+ dev_err(&pdev->dev,
+ "Failed to register rate change notifier\n");
+ goto remove_opps;
+ }
+
+ err = devfreq_add_governor(&tegra_devfreq_governor);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
+ goto unreg_notifier;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
- tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
- &tegra_devfreq_profile,
- "tegra_actmon",
- NULL);
+ tegra->devfreq = devfreq_add_device(&pdev->dev,
+ &tegra_devfreq_profile,
+ "tegra_actmon",
+ NULL);
+ if (IS_ERR(tegra->devfreq)) {
+ err = PTR_ERR(tegra->devfreq);
+ goto remove_governor;
+ }
+
+ err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
+ actmon_thread_isr, IRQF_ONESHOT,
+ "tegra-devfreq", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
+ goto remove_devfreq;
+ }
return 0;
+
+remove_devfreq:
+ devfreq_remove_device(tegra->devfreq);
+
+remove_governor:
+ devfreq_remove_governor(&tegra_devfreq_governor);
+
+unreg_notifier:
+ clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+
+remove_opps:
+ dev_pm_opp_remove_all_dynamic(&pdev->dev);
+
+ reset_control_reset(tegra->reset);
+ clk_disable_unprepare(tegra->clock);
+
+ return err;
}
static int tegra_devfreq_remove(struct platform_device *pdev)
{
struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
- u32 val;
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
- val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL);
- val &= ~ACTMON_DEV_CTRL_ENB;
- device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL);
- }
-
- actmon_write_barrier(tegra);
-
- devm_free_irq(&pdev->dev, irq, tegra);
+ devfreq_remove_device(tegra->devfreq);
+ devfreq_remove_governor(&tegra_devfreq_governor);
clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+ dev_pm_opp_remove_all_dynamic(&pdev->dev);
+ reset_control_reset(tegra->reset);
clk_disable_unprepare(tegra->clock);
return 0;
}
static const struct of_device_id tegra_devfreq_of_match[] = {
+ { .compatible = "nvidia,tegra30-actmon" },
{ .compatible = "nvidia,tegra124-actmon" },
{ },
};
@@ -737,36 +741,7 @@ static struct platform_driver tegra_devfreq_driver = {
.of_match_table = tegra_devfreq_of_match,
},
};
-
-static int __init tegra_devfreq_init(void)
-{
- int ret = 0;
-
- ret = devfreq_add_governor(&tegra_devfreq_governor);
- if (ret) {
- pr_err("%s: failed to add governor: %d\n", __func__, ret);
- return ret;
- }
-
- ret = platform_driver_register(&tegra_devfreq_driver);
- if (ret)
- devfreq_remove_governor(&tegra_devfreq_governor);
-
- return ret;
-}
-module_init(tegra_devfreq_init)
-
-static void __exit tegra_devfreq_exit(void)
-{
- int ret = 0;
-
- platform_driver_unregister(&tegra_devfreq_driver);
-
- ret = devfreq_remove_governor(&tegra_devfreq_governor);
- if (ret)
- pr_err("%s: failed to remove governor: %d\n", __func__, ret);
-}
-module_exit(tegra_devfreq_exit)
+module_platform_driver(tegra_devfreq_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Tegra devfreq driver");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 03fa0c58cef3..7c511e3db4c8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -294,8 +294,8 @@ config INTEL_IOATDMA
If unsure, say N.
config INTEL_IOP_ADMA
- tristate "Intel IOP ADMA support"
- depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ tristate "Intel IOP32x ADMA support"
+ depends on ARCH_IOP32X || COMPILE_TEST
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 30243f5c0710..8a05db3343d3 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -82,6 +83,12 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
return 0;
+ /* Set up DMA mask based on value from CSRT */
+ ret = dma_coerce_mask_and_coherent(&adev->dev,
+ DMA_BIT_MASK(si->dma_address_width));
+ if (ret)
+ return 0;
+
adma->base_request_line = si->base_request_line;
adma->end_request_line = si->base_request_line +
si->num_handshake_signals - 1;
@@ -140,7 +147,7 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
* @dev: struct device of DMA controller
* @acpi_dma_xlate: translation function which converts a dma specifier
* into a dma_chan structure
- * @data pointer to controller specific data to be used by
+ * @data: pointer to controller specific data to be used by
* translation function
*
* Allocated memory should be freed with appropriate acpi_dma_controller_free()
@@ -224,7 +231,7 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
* devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
* @dev: device that is registering this DMA controller
* @acpi_dma_xlate: translation function
- * @data pointer to controller specific data
+ * @data: pointer to controller specific data
*
* Managed acpi_dma_controller_register(). DMA controller registered by this
* function are automatically freed on driver detach. See
@@ -257,6 +264,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
/**
* devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
+ * @dev: device that is unregistering as DMA controller
*
* Unregister a DMA controller registered with
* devm_acpi_dma_controller_register(). Normally this function will not need to
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 8101ff2f05c1..e4c593f48575 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -37,10 +37,19 @@
#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
#define BCM2835_DMA_CHAN_NAME_SIZE 8
+/**
+ * struct bcm2835_dmadev - BCM2835 DMA controller
+ * @ddev: DMA device
+ * @base: base address of register map
+ * @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
+ * @zero_page: bus address of zero page (to detect transactions copying from
+ * zero page and avoid accessing memory if so)
+ */
struct bcm2835_dmadev {
struct dma_device ddev;
void __iomem *base;
struct device_dma_parameters dma_parms;
+ dma_addr_t zero_page;
};
struct bcm2835_dma_cb {
@@ -687,11 +696,12 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
+ struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_desc *d;
dma_addr_t src, dst;
u32 info = BCM2835_DMA_WAIT_RESP;
- u32 extra = BCM2835_DMA_INT_EN;
+ u32 extra = 0;
size_t max_len = bcm2835_dma_max_frame_length(c);
size_t frames;
@@ -707,6 +717,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
return NULL;
}
+ if (flags & DMA_PREP_INTERRUPT)
+ extra |= BCM2835_DMA_INT_EN;
+ else
+ period_len = buf_len;
+
/*
* warn if buf_len is not a multiple of period_len - this may leed
* to unexpected latencies for interrupts and thus audiable clicks
@@ -732,6 +747,10 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
dst = c->cfg.dst_addr;
src = buf_addr;
info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+
+ /* non-lite channels can write zeroes w/o accessing memory */
+ if (buf_addr == od->zero_page && !c->is_lite_channel)
+ info |= BCM2835_DMA_S_IGNORE;
}
/* calculate number of frames */
@@ -778,7 +797,10 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
/* stop DMA activity */
if (c->desc) {
- vchan_terminate_vdesc(&c->desc->vd);
+ if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
+ vchan_terminate_vdesc(&c->desc->vd);
+ else
+ vchan_vdesc_fini(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
@@ -831,6 +853,9 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od)
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
}
+
+ dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static const struct of_device_id bcm2835_dma_of_match[] = {
@@ -871,8 +896,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
+ }
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -907,11 +934,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
BIT(DMA_MEM_TO_MEM);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ od->ddev.descriptor_reuse = true;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
platform_set_drvdata(pdev, od);
+ od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
+ PAGE_SIZE, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
+ dev_err(&pdev->dev, "Failed to map zero page\n");
+ return -ENOMEM;
+ }
+
/* Request DMA channel mask from device tree */
if (of_property_read_u32(pdev->dev.of_node,
"brcm,dma-channel-mask",
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7fe9309a876b..cafb1cc065bb 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -92,6 +92,7 @@
#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
+#define JZ_SOC_DATA_BREAK_LINKS BIT(4)
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
@@ -355,6 +356,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
void *context)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
struct jz4780_dma_desc *desc;
unsigned int i;
int err;
@@ -375,7 +377,8 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
- if (i != (sg_len - 1)) {
+ if (i != (sg_len - 1) &&
+ !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
/* Automatically proceeed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
@@ -664,6 +667,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
struct jz4780_dma_chan *jzchan)
{
+ const unsigned int soc_flags = jzdma->soc_data->flags;
+ struct jz4780_dma_desc *desc = jzchan->desc;
uint32_t dcs;
bool ack = true;
@@ -691,8 +696,11 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
jz4780_dma_begin(jzchan);
} else if (dcs & JZ_DMA_DCS_TT) {
- vchan_cookie_complete(&jzchan->desc->vdesc);
- jzchan->desc = NULL;
+ if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
+ (jzchan->curr_hwdesc + 1 == desc->count)) {
+ vchan_cookie_complete(&desc->vdesc);
+ jzchan->desc = NULL;
+ }
jz4780_dma_begin(jzchan);
} else {
@@ -878,10 +886,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get IRQ: %d\n", ret);
+ if (ret < 0)
return ret;
- }
jzdma->irq = ret;
@@ -992,6 +998,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
+ .flags = JZ_SOC_DATA_BREAK_LINKS,
};
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 3d22ae8dca72..a2cadfa2e6d7 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -72,6 +72,10 @@ static bool norandom;
module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
+static bool polled;
+module_param(polled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
+
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
@@ -110,6 +114,7 @@ struct dmatest_params {
bool norandom;
int alignment;
unsigned int transfer_size;
+ bool polled;
};
/**
@@ -651,7 +656,10 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ if (params->polled)
+ flags = DMA_CTRL_ACK;
+ else
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
while (!kthread_should_stop()
@@ -780,8 +788,10 @@ static int dmatest_func(void *data)
}
done->done = false;
- tx->callback = dmatest_callback;
- tx->callback_param = done;
+ if (!params->polled) {
+ tx->callback = dmatest_callback;
+ tx->callback_param = done;
+ }
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -790,12 +800,22 @@ static int dmatest_func(void *data)
msleep(100);
goto error_unmap_continue;
}
- dma_async_issue_pending(chan);
- wait_event_freezable_timeout(thread->done_wait, done->done,
- msecs_to_jiffies(params->timeout));
+ if (params->polled) {
+ status = dma_sync_wait(chan, cookie);
+ dmaengine_terminate_sync(chan);
+ if (status == DMA_COMPLETE)
+ done->done = true;
+ } else {
+ dma_async_issue_pending(chan);
+
+ wait_event_freezable_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ status = dma_async_is_tx_complete(chan, cookie, NULL,
+ NULL);
+ }
if (!done->done) {
result("test timed out", total_tests, src->off, dst->off,
@@ -1065,6 +1085,7 @@ static void add_threaded_test(struct dmatest_info *info)
params->norandom = norandom;
params->alignment = alignment;
params->transfer_size = transfer_size;
+ params->polled = polled;
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
struct dw_edma_region {
phys_addr_t paddr;
- dma_addr_t vaddr;
+ void __iomem *vaddr;
size_t sz;
};
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
- dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+ dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
- dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+ dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
- &dw->rg_region.vaddr, &dw->rg_region.paddr);
+ dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- &dw->ll_region.vaddr, &dw->ll_region.paddr);
+ dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- &dw->dt_region.vaddr, &dw->dt_region.paddr);
+ dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+ return dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_v0_lli *lli;
- struct dw_edma_v0_llp *llp;
+ struct dw_edma_v0_lli __iomem *lli;
+ struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
- u64 sar, dar, addr;
int j;
- lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+ lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
- sar = cpu_to_le64(child->sar);
- SET_LL(&lli[i].sar_low, lower_32_bits(sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+ SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+ SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
/* DAR - low, high */
- dar = cpu_to_le64(child->dar);
- SET_LL(&lli[i].dar_low, lower_32_bits(dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+ SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+ SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
i++;
}
- llp = (struct dw_edma_v0_llp *)&lli[i];
+ llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
- addr = cpu_to_le64(chunk->ll_region.paddr);
- SET_LL(&llp->llp_low, lower_32_bits(addr));
- SET_LL(&llp->llp_high, upper_32_bits(addr));
+ SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
- u64 llp;
dw_edma_v0_core_write_chunk(chunk);
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
- llp = cpu_to_le64(chunk->ll_region.paddr);
- SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
- SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+ SET_CH(dw, chan->dir, chan->id, llp_low,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH(dw, chan->dir, chan->id, llp_high,
+ upper_32_bits(chunk->ll_region.paddr));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
- ((dma_addr_t *)&regs->name)
+ ((void __force *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
@@ -40,36 +40,37 @@
static struct dentry *base_dir;
static struct dw_edma *dw;
-static struct dw_edma_v0_regs *regs;
+static struct dw_edma_v0_regs __iomem *regs;
static struct {
- void *start;
- void *end;
+ void __iomem *start;
+ void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
- char name[24];
+ const char *name;
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
+ void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY &&
- data >= (void *)&regs->type.legacy.ch) {
- void *ptr = (void *)&regs->type.legacy.ch;
+ reg >= (void __iomem *)&regs->type.legacy.ch) {
+ void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= data && data < lim[0][ch].end) {
- ptr += (data - lim[0][ch].start);
+ if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+ ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= data && data < lim[1][ch].end) {
- ptr += (data - lim[1][ch].start);
+ if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+ ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
@@ -86,7 +87,7 @@ legacy_sel_wr:
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
- *val = readl(data);
+ *val = readl(reg);
}
return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
{
int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw)
return;
- regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+ regs = dw->rg_region.vaddr;
if (!regs)
return;
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 63ed895c09aa..b6f06699e91a 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
-dw_dmac-objs := platform.o
+dw_dmac-y := platform.o
+dw_dmac-$(CONFIG_ACPI) += acpi.o
+dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-objs := pci.o
diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
new file mode 100644
index 000000000000..f6e8d55b4f6e
--- /dev/null
+++ b/drivers/dma/dw/acpi.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013,2019 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+#include "internal.h"
+
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct acpi_dma_spec *dma_spec = param;
+ struct dw_dma_slave slave = {
+ .dma_dev = dma_spec->dev,
+ .src_id = dma_spec->slave_id,
+ .dst_id = dma_spec->slave_id,
+ .m_master = 0,
+ .p_master = 1,
+ };
+
+ return dw_dma_filter(chan, &slave);
+}
+
+void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ if (!has_acpi_companion(dev))
+ return;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
+}
+
+void dw_dma_acpi_controller_free(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+
+ if (!has_acpi_companion(dev))
+ return;
+
+ acpi_dma_controller_free(dev);
+}
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 1dd7a4e6dd23..2e1c52eefdeb 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -23,4 +23,55 @@ int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
+#ifdef CONFIG_ACPI
+void dw_dma_acpi_controller_register(struct dw_dma *dw);
+void dw_dma_acpi_controller_free(struct dw_dma *dw);
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
+
+struct platform_device;
+
+#ifdef CONFIG_OF
+struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev);
+void dw_dma_of_controller_register(struct dw_dma *dw);
+void dw_dma_of_controller_free(struct dw_dma *dw);
+#else
+static inline struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+static inline void dw_dma_of_controller_register(struct dw_dma *dw) {}
+static inline void dw_dma_of_controller_free(struct dw_dma *dw) {}
+#endif
+
+struct dw_dma_chip_pdata {
+ const struct dw_dma_platform_data *pdata;
+ int (*probe)(struct dw_dma_chip *chip);
+ int (*remove)(struct dw_dma_chip *chip);
+ struct dw_dma_chip *chip;
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
+ .probe = dw_dma_probe,
+ .remove = dw_dma_remove,
+};
+
+static const struct dw_dma_platform_data idma32_pdata = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+ .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
+ .pdata = &idma32_pdata,
+ .probe = idma32_dma_probe,
+ .remove = idma32_dma_remove,
+};
+
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
new file mode 100644
index 000000000000..9e27831dee32
--- /dev/null
+++ b/drivers/dma/dw/of.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
+ */
+
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_slave slave = {
+ .dma_dev = dw->dma.dev,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ slave.src_id = dma_spec->args[0];
+ slave.dst_id = dma_spec->args[0];
+ slave.m_master = dma_spec->args[1];
+ slave.p_master = dma_spec->args[2];
+
+ if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.m_master >= dw->pdata->nr_masters ||
+ slave.p_master >= dw->pdata->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_filter, &slave);
+}
+
+struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
+ u32 nr_masters;
+ u32 nr_channels;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(np, "dma-masters", &nr_masters))
+ return NULL;
+ if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &nr_channels))
+ return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->nr_masters = nr_masters;
+ pdata->nr_channels = nr_channels;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+ } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
+ pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
+ }
+
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
+ if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
+ if (tmp > CHAN_PROTCTL_MASK)
+ return NULL;
+ pdata->protctl = tmp;
+ }
+
+ return pdata;
+}
+
+void dw_dma_of_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ int ret;
+
+ if (!dev->of_node)
+ return;
+
+ ret = of_dma_controller_register(dev->of_node, dw_dma_of_xlate, dw);
+ if (ret)
+ dev_err(dev, "could not register of_dma_controller\n");
+}
+
+void dw_dma_of_controller_free(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+
+ if (!dev->of_node)
+ return;
+
+ of_dma_controller_free(dev->of_node);
+}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 8de87b15a988..cf6e8ec4c0ff 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -12,38 +12,10 @@
#include "internal.h"
-struct dw_dma_pci_data {
- const struct dw_dma_platform_data *pdata;
- int (*probe)(struct dw_dma_chip *chip);
- int (*remove)(struct dw_dma_chip *chip);
- struct dw_dma_chip *chip;
-};
-
-static const struct dw_dma_pci_data dw_pci_data = {
- .probe = dw_dma_probe,
- .remove = dw_dma_remove,
-};
-
-static const struct dw_dma_platform_data idma32_pdata = {
- .nr_channels = 8,
- .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
- .chan_priority = CHAN_PRIORITY_ASCENDING,
- .block_size = 131071,
- .nr_masters = 1,
- .data_width = {4},
- .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
-};
-
-static const struct dw_dma_pci_data idma32_pci_data = {
- .pdata = &idma32_pdata,
- .probe = idma32_dma_probe,
- .remove = idma32_dma_remove,
-};
-
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
- struct dw_dma_pci_data *data;
+ const struct dw_dma_chip_pdata *drv_data = (void *)pid->driver_data;
+ struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
int ret;
@@ -95,7 +67,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
static void dw_pci_remove(struct pci_dev *pdev)
{
- struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = pci_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
int ret;
@@ -108,7 +80,7 @@ static void dw_pci_remove(struct pci_dev *pdev)
static int dw_pci_suspend_late(struct device *dev)
{
- struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_disable(chip);
@@ -116,7 +88,7 @@ static int dw_pci_suspend_late(struct device *dev)
static int dw_pci_resume_early(struct device *dev)
{
- struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_enable(chip);
@@ -130,29 +102,29 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_dma_chip_pdata },
/* BayTrail */
- { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
- { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_dma_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Merrifield */
- { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
+ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_chip_pdata },
/* Braswell */
- { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
- { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_dma_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
- /* Elkhart Lake iDMA 32-bit (OSE DMA) */
- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
+ /* Elkhart Lake iDMA 32-bit (PSE DMA) */
+ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
/* Haswell */
- { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Broadwell */
- { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_dma_chip_pdata },
{ }
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 382dfd9e9600..c90c798e5ec3 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -17,163 +17,28 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
#include "internal.h"
#define DRV_NAME "dw_dmac"
-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_slave slave = {
- .dma_dev = dw->dma.dev,
- };
- dma_cap_mask_t cap;
-
- if (dma_spec->args_count != 3)
- return NULL;
-
- slave.src_id = dma_spec->args[0];
- slave.dst_id = dma_spec->args[0];
- slave.m_master = dma_spec->args[1];
- slave.p_master = dma_spec->args[2];
-
- if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
- slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
- slave.m_master >= dw->pdata->nr_masters ||
- slave.p_master >= dw->pdata->nr_masters))
- return NULL;
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- /* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_filter, &slave);
-}
-
-#ifdef CONFIG_ACPI
-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
-{
- struct acpi_dma_spec *dma_spec = param;
- struct dw_dma_slave slave = {
- .dma_dev = dma_spec->dev,
- .src_id = dma_spec->slave_id,
- .dst_id = dma_spec->slave_id,
- .m_master = 0,
- .p_master = 1,
- };
-
- return dw_dma_filter(chan, &slave);
-}
-
-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
-{
- struct device *dev = dw->dma.dev;
- struct acpi_dma_filter_info *info;
- int ret;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- dma_cap_zero(info->dma_cap);
- dma_cap_set(DMA_SLAVE, info->dma_cap);
- info->filter_fn = dw_dma_acpi_filter;
-
- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
- info);
- if (ret)
- dev_err(dev, "could not register acpi_dma_controller\n");
-}
-#else /* !CONFIG_ACPI */
-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
-#endif /* !CONFIG_ACPI */
-
-#ifdef CONFIG_OF
-static struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
- u32 nr_masters;
- u32 nr_channels;
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
- if (of_property_read_u32(np, "dma-masters", &nr_masters))
- return NULL;
- if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
- return NULL;
-
- if (of_property_read_u32(np, "dma-channels", &nr_channels))
- return NULL;
- if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return NULL;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- pdata->nr_masters = nr_masters;
- pdata->nr_channels = nr_channels;
-
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
-
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
-
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
- } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
- }
-
- if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = mb[tmp];
- } else {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = 1;
- }
-
- if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
- if (tmp > CHAN_PROTCTL_MASK)
- return NULL;
- pdata->protctl = tmp;
- }
-
- return pdata;
-}
-#else
-static inline struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
static int dw_probe(struct platform_device *pdev)
{
+ const struct dw_dma_chip_pdata *match;
+ struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
- struct resource *mem;
- const struct dw_dma_platform_data *pdata;
int err;
+ match = device_get_match_data(dev);
+ if (!match)
+ return -ENODEV;
+
+ data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -182,8 +47,7 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
@@ -191,13 +55,16 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
- pdata = dev_get_platdata(dev);
- if (!pdata)
- pdata = dw_dma_parse_dt(pdev);
+ if (!data->pdata)
+ data->pdata = dev_get_platdata(dev);
+ if (!data->pdata)
+ data->pdata = dw_dma_parse_dt(pdev);
chip->dev = dev;
chip->id = pdev->id;
- chip->pdata = pdata;
+ chip->pdata = data->pdata;
+
+ data->chip = chip;
chip->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(chip->clk))
@@ -208,22 +75,15 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- err = dw_dma_probe(chip);
+ err = data->probe(chip);
if (err)
goto err_dw_dma_probe;
- platform_set_drvdata(pdev, chip);
+ platform_set_drvdata(pdev, data);
- if (pdev->dev.of_node) {
- err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_of_xlate, chip->dw);
- if (err)
- dev_err(&pdev->dev,
- "could not register of_dma_controller\n");
- }
+ dw_dma_of_controller_register(chip->dw);
- if (ACPI_HANDLE(&pdev->dev))
- dw_dma_acpi_controller_register(chip->dw);
+ dw_dma_acpi_controller_register(chip->dw);
return 0;
@@ -235,12 +95,18 @@ err_dw_dma_probe:
static int dw_remove(struct platform_device *pdev)
{
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = data->chip;
+ int ret;
+
+ dw_dma_acpi_controller_free(chip->dw);
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
+ dw_dma_of_controller_free(chip->dw);
+
+ ret = data->remove(chip);
+ if (ret)
+ dev_warn(chip->dev, "can't remove device properly: %d\n", ret);
- dw_dma_remove(chip);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
@@ -249,7 +115,8 @@ static int dw_remove(struct platform_device *pdev)
static void dw_shutdown(struct platform_device *pdev)
{
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = data->chip;
/*
* We have to call do_dw_dma_disable() to stop any ongoing transfer. On
@@ -269,7 +136,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,dma-spear1340" },
+ { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
@@ -277,9 +144,15 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
- { "INTL9C60", 0 },
- { "80862286", 0 },
- { "808622C0", 0 },
+ { "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata },
+ { "80862286", (kernel_ulong_t)&dw_dma_chip_pdata },
+ { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
+
+ /* Elkhart Lake iDMA 32-bit (PSE DMA) */
+ { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
+
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
@@ -289,7 +162,8 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
static int dw_suspend_late(struct device *dev)
{
- struct dw_dma_chip *chip = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
+ struct dw_dma_chip *chip = data->chip;
do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
@@ -299,7 +173,8 @@ static int dw_suspend_late(struct device *dev)
static int dw_resume_early(struct device *dev)
{
- struct dw_dma_chip *chip = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
+ struct dw_dma_chip *chip = data->chip;
int ret;
ret = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 44d92c34dec3..b1a7ca91701a 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -90,8 +90,21 @@ static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
iowrite8(val8, addr + off);
}
+static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
+ u32 off, u32 slot, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = EDMAMUX_CHCFG_ENBL << 24 | slot;
+ else
+ val = EDMAMUX_CHCFG_DIS;
+
+ iowrite32(val, addr + off * 4);
+}
+
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
- unsigned int slot, bool enable)
+ unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
@@ -103,7 +116,10 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
+ if (fsl_chan->edma->drvdata->version == v3)
+ mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
+ else
+ mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 4e175560292c..5eaa2902ed39 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -125,6 +125,7 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
+ char chan_name[16];
};
struct fsl_edma_desc {
@@ -139,11 +140,13 @@ struct fsl_edma_desc {
enum edma_version {
v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */
+ v3, /* 32ch, i.mx7ulp */
};
struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
+ bool has_dmaclk;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -153,6 +156,7 @@ struct fsl_edma_engine {
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
+ struct clk *dmaclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index fcbad6ae954a..b626c06ac2e0 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -20,6 +20,13 @@
#include "fsl-edma-common.h"
+static void fsl_edma_synchronize(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ vchan_synchronize(&fsl_chan->vchan);
+}
+
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -125,16 +132,12 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
int ret;
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
- if (fsl_edma->txirq < 0) {
- dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+ if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
- }
fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
- if (fsl_edma->errirq < 0) {
- dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+ if (fsl_edma->errirq < 0)
return fsl_edma->errirq;
- }
if (fsl_edma->txirq == fsl_edma->errirq) {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
@@ -162,6 +165,49 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int
+fsl_edma2_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int i, ret, irq;
+ int count;
+
+ count = platform_irq_count(pdev);
+ dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
+ if (count <= 2) {
+ dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
+ return -EINVAL;
+ }
+ /*
+ * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
+ * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
+ * For now, just simply request irq without IRQF_SHARED flag, since 16
+ * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
+ */
+ for (i = 0; i < count; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return -ENXIO;
+
+ sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
+
+ /* The last IRQ is for eDMA err */
+ if (i == count - 1)
+ ret = devm_request_irq(&pdev->dev, irq,
+ fsl_edma_err_handler,
+ 0, "eDMA2-ERR", fsl_edma);
+ else
+ ret = devm_request_irq(&pdev->dev, irq,
+ fsl_edma_tx_handler, 0,
+ fsl_edma->chans[i].chan_name,
+ fsl_edma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void fsl_edma_irq_exit(
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -187,8 +233,16 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata imx7ulp_data = {
+ .version = v3,
+ .dmamuxs = 1,
+ .has_dmaclk = true,
+ .setup_irq = fsl_edma2_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -236,6 +290,20 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma_setup_regs(fsl_edma);
regs = &fsl_edma->regs;
+ if (drvdata->has_dmaclk) {
+ fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
+ if (IS_ERR(fsl_edma->dmaclk)) {
+ dev_err(&pdev->dev, "Missing DMA block clock.\n");
+ return PTR_ERR(fsl_edma->dmaclk);
+ }
+
+ ret = clk_prepare_enable(fsl_edma->dmaclk);
+ if (ret) {
+ dev_err(&pdev->dev, "DMA clk block failed.\n");
+ return ret;
+ }
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
@@ -302,6 +370,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+ fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 8e341c0c13bc..06664fbd2d91 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -758,10 +758,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
- if (fsl_qdma->error_irq < 0) {
- dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+ if (fsl_qdma->error_irq < 0)
return fsl_qdma->error_irq;
- }
ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
fsl_qdma_error_handler, 0,
@@ -776,11 +774,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->queue_irq[i] =
platform_get_irq_byname(pdev, irq_name);
- if (fsl_qdma->queue_irq[i] < 0) {
- dev_err(&pdev->dev,
- "Can't get qdma queue %d irq.\n", i);
+ if (fsl_qdma->queue_irq[i] < 0)
return fsl_qdma->queue_irq[i];
- }
ret = devm_request_irq(&pdev->dev,
fsl_qdma->queue_irq[i],
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ /* Fall through */
case FSL_DMA_IP_83XX:
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 00a089e24150..5c0fb3134825 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -556,6 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
* We fall-through here intentionally, since a 2D transfer is
* similar to MEMCPY just adding the 2D slot configuration.
*/
+ /* Fall through */
case IMXDMA_DESC_MEMCPY:
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a01f4b5d793c..9ba74ab7e912 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1886,10 +1886,6 @@ static int sdma_init(struct sdma_engine *sdma)
sdma->context_phys = ccb_phys +
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
- /* Zero-out the CCB structures array just allocated */
- memset(sdma->channel_control, 0,
- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
-
/* disable all channels */
for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 70fd8454d002..be61c32a876f 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -286,8 +286,7 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return NULL;
dca = alloc_dca_provider(&ioat_dca_ops,
- sizeof(*ioatdca)
- + (sizeof(struct ioat_dca_slot) * slots));
+ struct_size(ioatdca, req_slots, slots));
if (!dca)
return NULL;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6c0143670d9..a3f942a6a946 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -16,13 +16,13 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/prefetch.h>
#include <linux/memory.h>
#include <linux/ioport.h>
#include <linux/raid/pq.h>
#include <linux/slab.h>
-#include <mach/adma.h>
-
+#include "iop-adma.h"
#include "dmaengine.h"
#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
@@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
- "this_desc: %#x next_desc: %#x ack: %d\n",
+ "this_desc: %pad next_desc: %#llx ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
- iter->async_tx.phys, iop_desc_get_next_desc(iter),
+ &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
@@ -306,9 +306,9 @@ retry:
int i;
dev_dbg(iop_chan->device->common.dev,
"allocated slot: %d "
- "(desc %p phys: %#x) slots_per_op %d\n",
+ "(desc %p phys: %#llx) slots_per_op %d\n",
iter->idx, iter->hw_desc,
- iter->async_tx.phys, slots_per_op);
+ (u64)iter->async_tx.phys, slots_per_op);
/* pre-ack all but the last descriptor */
if (num_slots != slots_per_op)
@@ -364,13 +364,11 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
struct iop_adma_desc_slot *grp_start, *old_chain_tail;
int slot_cnt;
- int slots_per_op;
dma_cookie_t cookie;
dma_addr_t next_dma;
grp_start = sw_desc->group_head;
slot_cnt = grp_start->slot_cnt;
- slots_per_op = grp_start->slots_per_op;
spin_lock_bh(&iop_chan->lock);
cookie = dma_cookie_assign(tx);
@@ -516,7 +514,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
return NULL;
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
__func__, len);
spin_lock_bh(&iop_chan->lock);
@@ -549,7 +547,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
@@ -582,7 +580,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
if (unlikely(!len))
return NULL;
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
@@ -620,7 +618,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
if (dmaf_p_disabled_continue(flags))
@@ -683,7 +681,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
return NULL;
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h
new file mode 100644
index 000000000000..c499c9578f00
--- /dev/null
+++ b/drivers/dma/iop-adma.h
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006, Intel Corporation.
+ */
+#ifndef _ADMA_H
+#define _ADMA_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/platform_data/dma-iop32x.h>
+
+/* Memory copy units */
+#define DMA_CCR(chan) (chan->mmr_base + 0x0)
+#define DMA_CSR(chan) (chan->mmr_base + 0x4)
+#define DMA_DAR(chan) (chan->mmr_base + 0xc)
+#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
+#define DMA_PADR(chan) (chan->mmr_base + 0x14)
+#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
+#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
+#define DMA_BCR(chan) (chan->mmr_base + 0x20)
+#define DMA_DCR(chan) (chan->mmr_base + 0x24)
+
+/* Application accelerator unit */
+#define AAU_ACR(chan) (chan->mmr_base + 0x0)
+#define AAU_ASR(chan) (chan->mmr_base + 0x4)
+#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
+#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
+#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
+#define AAU_DAR(chan) (chan->mmr_base + 0x20)
+#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
+#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
+#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
+#define AAU_EDCR0_IDX 8
+#define AAU_EDCR1_IDX 17
+#define AAU_EDCR2_IDX 26
+
+struct iop3xx_aau_desc_ctrl {
+ unsigned int int_en:1;
+ unsigned int blk1_cmd_ctrl:3;
+ unsigned int blk2_cmd_ctrl:3;
+ unsigned int blk3_cmd_ctrl:3;
+ unsigned int blk4_cmd_ctrl:3;
+ unsigned int blk5_cmd_ctrl:3;
+ unsigned int blk6_cmd_ctrl:3;
+ unsigned int blk7_cmd_ctrl:3;
+ unsigned int blk8_cmd_ctrl:3;
+ unsigned int blk_ctrl:2;
+ unsigned int dual_xor_en:1;
+ unsigned int tx_complete:1;
+ unsigned int zero_result_err:1;
+ unsigned int zero_result_en:1;
+ unsigned int dest_write_en:1;
+};
+
+struct iop3xx_aau_e_desc_ctrl {
+ unsigned int reserved:1;
+ unsigned int blk1_cmd_ctrl:3;
+ unsigned int blk2_cmd_ctrl:3;
+ unsigned int blk3_cmd_ctrl:3;
+ unsigned int blk4_cmd_ctrl:3;
+ unsigned int blk5_cmd_ctrl:3;
+ unsigned int blk6_cmd_ctrl:3;
+ unsigned int blk7_cmd_ctrl:3;
+ unsigned int blk8_cmd_ctrl:3;
+ unsigned int reserved2:7;
+};
+
+struct iop3xx_dma_desc_ctrl {
+ unsigned int pci_transaction:4;
+ unsigned int int_en:1;
+ unsigned int dac_cycle_en:1;
+ unsigned int mem_to_mem_en:1;
+ unsigned int crc_data_tx_en:1;
+ unsigned int crc_gen_en:1;
+ unsigned int crc_seed_dis:1;
+ unsigned int reserved:21;
+ unsigned int crc_tx_complete:1;
+};
+
+struct iop3xx_desc_dma {
+ u32 next_desc;
+ union {
+ u32 pci_src_addr;
+ u32 pci_dest_addr;
+ u32 src_addr;
+ };
+ union {
+ u32 upper_pci_src_addr;
+ u32 upper_pci_dest_addr;
+ };
+ union {
+ u32 local_pci_src_addr;
+ u32 local_pci_dest_addr;
+ u32 dest_addr;
+ };
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_dma_desc_ctrl desc_ctrl_field;
+ };
+ u32 crc_addr;
+};
+
+struct iop3xx_desc_aau {
+ u32 next_desc;
+ u32 src[4];
+ u32 dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ union {
+ u32 src_addr;
+ u32 e_desc_ctrl;
+ struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+ } src_edc[31];
+};
+
+struct iop3xx_aau_gfmr {
+ unsigned int gfmr1:8;
+ unsigned int gfmr2:8;
+ unsigned int gfmr3:8;
+ unsigned int gfmr4:8;
+};
+
+struct iop3xx_desc_pq_xor {
+ u32 next_desc;
+ u32 src[3];
+ union {
+ u32 data_mult1;
+ struct iop3xx_aau_gfmr data_mult1_field;
+ };
+ u32 dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ union {
+ u32 src_addr;
+ u32 e_desc_ctrl;
+ struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+ u32 data_multiplier;
+ struct iop3xx_aau_gfmr data_mult_field;
+ u32 reserved;
+ } src_edc_gfmr[19];
+};
+
+struct iop3xx_desc_dual_xor {
+ u32 next_desc;
+ u32 src0_addr;
+ u32 src1_addr;
+ u32 h_src_addr;
+ u32 d_src_addr;
+ u32 h_dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ u32 d_dest_addr;
+};
+
+union iop3xx_desc {
+ struct iop3xx_desc_aau *aau;
+ struct iop3xx_desc_dma *dma;
+ struct iop3xx_desc_pq_xor *pq_xor;
+ struct iop3xx_desc_dual_xor *dual_xor;
+ void *ptr;
+};
+
+/* No support for p+q operations */
+static inline int
+iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr, unsigned char coef)
+{
+ BUG();
+}
+
+static inline int
+iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+ BUG();
+}
+
+#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
+
+static inline void
+iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
+ dma_addr_t *src)
+{
+ BUG();
+}
+
+static inline int iop_adma_get_max_xor(void)
+{
+ return 32;
+}
+
+static inline int iop_adma_get_max_pq(void)
+{
+ BUG();
+ return 0;
+}
+
+static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
+{
+ int id = chan->device->id;
+
+ switch (id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return __raw_readl(DMA_DAR(chan));
+ case AAU_ID:
+ return __raw_readl(AAU_ADAR(chan));
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
+ u32 next_desc_addr)
+{
+ int id = chan->device->id;
+
+ switch (id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ __raw_writel(next_desc_addr, DMA_NDAR(chan));
+ break;
+ case AAU_ID:
+ __raw_writel(next_desc_addr, AAU_ANDAR(chan));
+ break;
+ }
+
+}
+
+#define IOP_ADMA_STATUS_BUSY (1 << 10)
+#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
+#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
+#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
+
+static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
+}
+
+static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
+ int num_slots)
+{
+ /* num_slots will only ever be 1, 2, 4, or 8 */
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
+{
+ *slots_per_op = 1;
+ return 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
+{
+ *slots_per_op = 1;
+ return 1;
+}
+
+static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ static const char slot_count_table[] = {
+ 1, 1, 1, 1, /* 01 - 04 */
+ 2, 2, 2, 2, /* 05 - 08 */
+ 4, 4, 4, 4, /* 09 - 12 */
+ 4, 4, 4, 4, /* 13 - 16 */
+ 8, 8, 8, 8, /* 17 - 20 */
+ 8, 8, 8, 8, /* 21 - 24 */
+ 8, 8, 8, 8, /* 25 - 28 */
+ 8, 8, 8, 8, /* 29 - 32 */
+ };
+ *slots_per_op = slot_count_table[src_cnt - 1];
+ return *slots_per_op;
+}
+
+static inline int
+iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return iop_chan_memcpy_slot_count(0, slots_per_op);
+ case AAU_ID:
+ return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+ if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
+ return slot_cnt;
+
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+ while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+ slot_cnt += *slots_per_op;
+ }
+
+ slot_cnt += *slots_per_op;
+
+ return slot_cnt;
+}
+
+/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
+ * descriptors
+ */
+static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+ if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
+ return slot_cnt;
+
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ slot_cnt += *slots_per_op;
+ }
+
+ slot_cnt += *slots_per_op;
+
+ return slot_cnt;
+}
+
+static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return hw_desc.dma->byte_count;
+ case AAU_ID:
+ return hw_desc.aau->byte_count;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/* translate the src_idx to a descriptor word index */
+static inline int __desc_idx(int src_idx)
+{
+ static const int desc_idx_table[] = { 0, 0, 0, 0,
+ 0, 1, 2, 3,
+ 5, 6, 7, 8,
+ 9, 10, 11, 12,
+ 14, 15, 16, 17,
+ 18, 19, 20, 21,
+ 23, 24, 25, 26,
+ 27, 28, 29, 30,
+ };
+
+ return desc_idx_table[src_idx];
+}
+
+static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ int src_idx)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return hw_desc.dma->src_addr;
+ case AAU_ID:
+ break;
+ default:
+ BUG();
+ }
+
+ if (src_idx < 4)
+ return hw_desc.aau->src[src_idx];
+ else
+ return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
+}
+
+static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
+ int src_idx, dma_addr_t addr)
+{
+ if (src_idx < 4)
+ hw_desc->src[src_idx] = addr;
+ else
+ hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
+}
+
+static inline void
+iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+ struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_dma_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.mem_to_mem_en = 1;
+ u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+ hw_desc->upper_pci_src_addr = 0;
+ hw_desc->crc_addr = 0;
+}
+
+static inline void
+iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
+ u_desc_ctrl.field.dest_write_en = 1;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline u32
+iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
+ unsigned long flags)
+{
+ int i, shift;
+ u32 edcr;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ switch (src_cnt) {
+ case 25 ... 32:
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ edcr = 0;
+ shift = 1;
+ for (i = 24; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
+ src_cnt = 24;
+ /* fall through */
+ case 17 ... 24:
+ if (!u_desc_ctrl.field.blk_ctrl) {
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ }
+ edcr = 0;
+ shift = 1;
+ for (i = 16; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
+ src_cnt = 16;
+ /* fall through */
+ case 9 ... 16:
+ if (!u_desc_ctrl.field.blk_ctrl)
+ u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+ edcr = 0;
+ shift = 1;
+ for (i = 8; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
+ src_cnt = 8;
+ /* fall through */
+ case 2 ... 8:
+ shift = 1;
+ for (i = 0; i < src_cnt; i++) {
+ u_desc_ctrl.value |= (1 << shift);
+ shift += 3;
+ }
+
+ if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+ u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+ }
+
+ u_desc_ctrl.field.dest_write_en = 1;
+ u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+
+ return u_desc_ctrl.value;
+}
+
+static inline void
+iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
+}
+
+/* return the number of operations */
+static inline int
+iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+ int i, j;
+
+ hw_desc = desc->hw_desc;
+
+ for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, j++) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
+ u_desc_ctrl.field.dest_write_en = 0;
+ u_desc_ctrl.field.zero_result_en = 1;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ iter->desc_ctrl = u_desc_ctrl.value;
+
+ /* for the subsequent descriptors preserve the store queue
+ * and chain them together
+ */
+ if (i) {
+ prev_hw_desc =
+ iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
+ prev_hw_desc->next_desc =
+ (u32) (desc->async_tx.phys + (i << 5));
+ }
+ }
+
+ return j;
+}
+
+static inline void
+iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ switch (src_cnt) {
+ case 25 ... 32:
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 17 ... 24:
+ if (!u_desc_ctrl.field.blk_ctrl) {
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ }
+ hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 9 ... 16:
+ if (!u_desc_ctrl.field.blk_ctrl)
+ u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 1 ... 8:
+ if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+ u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+ }
+
+ u_desc_ctrl.field.dest_write_en = 0;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ u32 byte_count)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ hw_desc.dma->byte_count = byte_count;
+ break;
+ case AAU_ID:
+ hw_desc.aau->byte_count = byte_count;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void
+iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ iop_desc_init_memcpy(desc, 1);
+ hw_desc.dma->byte_count = 0;
+ hw_desc.dma->dest_addr = 0;
+ hw_desc.dma->src_addr = 0;
+ break;
+ case AAU_ID:
+ iop_desc_init_null_xor(desc, 2, 1);
+ hw_desc.aau->byte_count = 0;
+ hw_desc.aau->dest_addr = 0;
+ hw_desc.aau->src[0] = 0;
+ hw_desc.aau->src[1] = 0;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void
+iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+ int slots_per_op = desc->slots_per_op;
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int i = 0;
+
+ if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ hw_desc->byte_count = len;
+ } else {
+ do {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ i += slots_per_op;
+ } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
+
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iter->byte_count = len;
+ }
+}
+
+static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ dma_addr_t addr)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ hw_desc.dma->dest_addr = addr;
+ break;
+ case AAU_ID:
+ hw_desc.aau->dest_addr = addr;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
+ dma_addr_t addr)
+{
+ struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+ hw_desc->src_addr = addr;
+}
+
+static inline void
+iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr)
+{
+
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ int i;
+
+ for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+ }
+}
+
+static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
+ int src_idx, dma_addr_t addr)
+{
+
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ int i;
+
+ for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+ }
+}
+
+static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
+ u32 next_desc_addr)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ iop_paranoia(hw_desc.dma->next_desc);
+ hw_desc.dma->next_desc = next_desc_addr;
+}
+
+static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+ return hw_desc.dma->next_desc;
+}
+
+static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+ hw_desc.dma->next_desc = 0;
+}
+
+static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
+ u32 val)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ hw_desc->src[0] = val;
+}
+
+static inline enum sum_check_flags
+iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
+
+ iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
+ return desc_ctrl.zero_result_err << SUM_CHECK_P;
+}
+
+static inline void iop_chan_append(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl;
+
+ dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+ dma_chan_ctrl |= 0x2;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
+{
+ return __raw_readl(DMA_CSR(chan));
+}
+
+static inline void iop_chan_disable(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+ dma_chan_ctrl &= ~1;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_chan_enable(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+
+ dma_chan_ctrl |= 1;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ status &= (1 << 9);
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ status &= (1 << 8);
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
+ break;
+ case AAU_ID:
+ status &= (1 << 5);
+ break;
+ default:
+ BUG();
+ }
+
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline int
+iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return test_bit(5, &status);
+}
+
+static inline int
+iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(2, &status);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(3, &status);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(1, &status);
+ default:
+ return 0;
+ }
+}
+#endif /* _ADMA_H */
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 546995c20876..f40051d6aecb 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -547,10 +547,8 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
vchan_init(&c->vc, &mtkd->ddev);
rc = platform_get_irq(pdev, i);
- if (rc < 0) {
- dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
+ if (rc < 0)
goto err_no_dma;
- }
c->irq = rc;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index fa5dab481203..e3850f04f676 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -33,7 +33,6 @@
#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
-#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
@@ -50,7 +49,6 @@
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
-#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
/* XOR Global registers */
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
@@ -261,16 +259,15 @@ void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
/* Configure threshold of number of descriptors, and enable timer */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
- reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
- reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
+ reg |= MV_XOR_V2_DONE_IMSG_THRD;
reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
/* Configure Timer Threshold */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
- reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
- MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
- reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
+ reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
+ reg |= MV_XOR_V2_TIMER_THRD;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 1163af2ba4a3..6cce9ef61b29 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1922,9 +1922,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
if (ret) {
dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
__func__, __LINE__);
- dma_free_coherent(pl330->ddma.dev,
+ dma_free_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
+ pl330->mcode_cpu, pl330->mcode_bus,
+ DMA_ATTR_PRIVILEGED);
return ret;
}
@@ -2003,9 +2004,9 @@ static void pl330_del(struct pl330_dmac *pl330)
/* Free DMAC resources */
dmac_free_threads(pl330);
- dma_free_coherent(pl330->ddma.dev,
+ dma_free_attrs(pl330->ddma.dev,
pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
- pl330->mcode_bus);
+ pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
}
/* forward declaration */
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 5bf8b145c427..bb4471e84e48 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -749,7 +749,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->tre_ring)
return NULL;
- memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
lldev->nr_tres = nr_tres;
@@ -769,7 +768,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->evre_ring)
return NULL;
- memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
/* the EVRE ring has to be EVRE_SIZE aligned */
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 3022d66e7a33..806ca02c52d7 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -183,7 +183,6 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "irq resources not found\n");
rc = irq;
goto out;
}
@@ -388,7 +387,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
- of_node_get(child);
new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child, true);
/*
@@ -396,9 +394,14 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
* platforms with or without MSI support.
*/
of_msi_configure(&new_pdev->dev, child);
- of_node_put(child);
}
+
+ kfree(res);
+
+ return ret;
+
out:
+ of_node_put(child);
kfree(res);
return ret;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index ad30f3d2c7f6..43da8eeb18ef 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1237,11 +1237,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
phy->host = s3cdma;
phy->irq = platform_get_irq(pdev, i);
- if (phy->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
- i, phy->irq);
+ if (phy->irq < 0)
continue;
- }
ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
0, pdev->name, phy);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 9c41a4e42575..3993ab65c62c 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
* @iomem: remapped I/O memory base
* @n_channels: number of available channels
* @channels: array of DMAC channels
+ * @channels_mask: bitfield of which DMA channels are managed by this driver
* @modules: bitmask of client modules in use
*/
struct rcar_dmac {
@@ -202,6 +203,7 @@ struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
+ unsigned int channels_mask;
DECLARE_BITMAP(modules, 256);
};
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
for (i = 0; i < dmac->n_channels; ++i) {
struct rcar_dmac_chan *chan = &dmac->channels[i];
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
/* Stop and reinitialize the channel. */
spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
@@ -1744,10 +1749,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
- if (rchan->irq < 0) {
- dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ if (rchan->irq < 0)
return -ENODEV;
- }
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
@@ -1776,6 +1779,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
return 0;
}
+#define RCAR_DMAC_MAX_CHANNELS 32
+
static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -1787,12 +1792,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
return ret;
}
- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ /* The hardware and driver don't support more than 32 bits in CHCLR */
+ if (dmac->n_channels <= 0 ||
+ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
dev_err(dev, "invalid number of channels %u\n",
dmac->n_channels);
return -EINVAL;
}
+ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+
return 0;
}
@@ -1802,7 +1811,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
- unsigned int channels_offset = 0;
struct dma_device *engine;
struct rcar_dmac *dmac;
struct resource *mem;
@@ -1831,10 +1839,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
* level we can't disable it selectively, so ignore channel 0 for now if
* the device is part of an IOMMU group.
*/
- if (device_iommu_mapped(&pdev->dev)) {
- dmac->n_channels--;
- channels_offset = 1;
- }
+ if (device_iommu_mapped(&pdev->dev))
+ dmac->channels_mask &= ~BIT(0);
dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
sizeof(*dmac->channels), GFP_KERNEL);
@@ -1892,8 +1898,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->channels);
for (i = 0; i < dmac->n_channels; ++i) {
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
- i + channels_offset);
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
if (ret < 0)
goto error;
}
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 17063aaf51bc..b218a013c260 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -717,10 +717,8 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
- if (uchan->irq < 0) {
- dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ if (uchan->irq < 0)
return -ENODEV;
- }
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index baac476c8622..525dc7338fe3 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct dma_slave_config *slave_cfg = &schan->slave_cfg;
dma_addr_t src = 0, dst = 0;
+ dma_addr_t start_src = 0, start_dst = 0;
struct sprd_dma_desc *sdesc;
struct scatterlist *sg;
u32 len = 0;
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = sg_dma_address(sg);
}
+ if (!i) {
+ start_src = src;
+ start_dst = dst;
+ }
+
/*
* The link-list mode needs at least 2 link-list
* configurations. If there is only one sg, it doesn't
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
}
- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
- dir, flags, slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
+ start_dst, len, dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index a3ee0f6bb664..67087dbe2f9f 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -771,10 +771,8 @@ static int st_fdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdev);
fdev->irq = platform_get_irq(pdev, 0);
- if (fdev->irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq resource\n");
+ if (fdev->irq < 0)
return -EINVAL;
- }
ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
dev_name(&pdev->dev), fdev);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index ef4d109e7189..5989b0893521 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -243,12 +243,6 @@ static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
writel_relaxed(val, dmadev->base + reg);
}
-static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
-{
- return kzalloc(sizeof(struct stm32_dma_desc) +
- sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
-}
-
static int stm32_dma_get_width(struct stm32_dma_chan *chan,
enum dma_slave_buswidth width)
{
@@ -853,7 +847,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
return NULL;
}
- desc = stm32_dma_alloc_desc(sg_len);
+ desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -954,7 +948,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
num_periods = buf_len / period_len;
- desc = stm32_dma_alloc_desc(num_periods);
+ desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -989,7 +983,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
int i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
- desc = stm32_dma_alloc_desc(num_sgs);
+ desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -1366,12 +1360,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
ret = platform_get_irq(pdev, i);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "No irq resource for chan %d\n", i);
+ if (ret < 0)
goto err_unregister;
- }
chan->irq = ret;
ret = devm_request_irq(&pdev->dev, chan->irq,
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index b552949da14b..3c89bd39e096 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -185,8 +185,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
- count = device_property_read_u32_array(&pdev->dev, "dma-masters",
- NULL, 0);
+ count = device_property_count_u32(&pdev->dev, "dma-masters");
if (count < 0) {
dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
return -ENODEV;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..5838311cf990 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit;
}
@@ -1555,8 +1555,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
nr_requests);
}
- count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
- NULL, 0);
+ count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
if (count < 0)
count = 0;
@@ -1638,10 +1637,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
dmadev->irq = platform_get_irq(pdev, 0);
- if (dmadev->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (dmadev->irq < 0)
return dmadev->irq;
- }
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 1f80568b2613..e397a50058c8 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1132,10 +1132,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index ed5b68dcfe50..06cd7f867f7c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1251,10 +1251,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
return PTR_ERR(sdc->base);
sdc->irq = platform_get_irq(pdev, 0);
- if (sdc->irq < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ if (sdc->irq < 0)
return sdc->irq;
- }
sdc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdc->clk)) {
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 79e9593815f1..3a45079d11ec 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -152,6 +152,7 @@ struct tegra_dma_sg_req {
bool last_sg;
struct list_head node;
struct tegra_dma_desc *dma_desc;
+ unsigned int words_xferred;
};
/*
@@ -496,6 +497,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
+ nsg_req->words_xferred = 0;
tegra_dma_resume(tdc);
}
@@ -511,6 +513,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
typeof(*sg_req), node);
tegra_dma_start(tdc, sg_req);
sg_req->configured = true;
+ sg_req->words_xferred = 0;
tdc->busy = true;
}
@@ -638,6 +641,8 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
dma_desc->cb_count++;
+ sgreq->words_xferred = 0;
+
/* If not last req then put at end of pending list */
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
@@ -797,6 +802,65 @@ skip_dma_stop:
return 0;
}
+static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req)
+{
+ unsigned long status, wcount = 0;
+
+ if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
+ return 0;
+
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+
+ if (!tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = status;
+
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
+ return sg_req->req_len;
+
+ wcount = get_current_xferred_count(tdc, sg_req, wcount);
+
+ if (!wcount) {
+ /*
+ * If wcount wasn't ever polled for this SG before, then
+ * simply assume that transfer hasn't started yet.
+ *
+ * Otherwise it's the end of the transfer.
+ *
+ * The alternative would be to poll the status register
+ * until EOC bit is set or wcount goes UP. That's so
+ * because EOC bit is getting set only after the last
+ * burst's completion and counter is less than the actual
+ * transfer size by 4 bytes. The counter value wraps around
+ * in a cyclic mode before EOC is set(!), so we can't easily
+ * distinguish start of transfer from its end.
+ */
+ if (sg_req->words_xferred)
+ wcount = sg_req->req_len - 4;
+
+ } else if (wcount < sg_req->words_xferred) {
+ /*
+ * This case will never happen for a non-cyclic transfer.
+ *
+ * For a cyclic transfer, although it is possible for the
+ * next transfer to have already started (resetting the word
+ * count), this case should still not happen because we should
+ * have detected that the EOC bit is set and hence the transfer
+ * was completed.
+ */
+ WARN_ON_ONCE(1);
+
+ wcount = sg_req->req_len - 4;
+ } else {
+ sg_req->words_xferred = wcount;
+ }
+
+ return wcount;
+}
+
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
@@ -806,6 +870,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
enum dma_status ret;
unsigned long flags;
unsigned int residual;
+ unsigned int bytes = 0;
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -825,6 +890,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
dma_desc = sg_req->dma_desc;
if (dma_desc->txd.cookie == cookie) {
+ bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
ret = dma_desc->dma_status;
goto found;
}
@@ -836,7 +902,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
found:
if (dma_desc && txstate) {
residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
+ ((dma_desc->bytes_transferred + bytes) %
dma_desc->bytes_requested);
dma_set_residue(txstate, residual);
}
@@ -1441,12 +1507,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- /*
- * XXX The hardware appears to support
- * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
- * only used by this driver during tegra_dma_terminate_all()
- */
- tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..5f8adf5c1f20 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -42,12 +42,8 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
-#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
-#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
-#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
-#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
@@ -64,14 +60,10 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
-#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
- TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
- TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
-#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
- TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
- TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
@@ -712,7 +704,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
return chan;
}
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +736,7 @@ clk_disable:
return 0;
}
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index ad2f0a4cd6a4..f255056696ee 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
nelm * 2);
- if (ret)
+ if (ret) {
+ kfree(rsv_events);
return ret;
+ }
for (i = 0; i < nelm; i++) {
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea40ae0..ba7c4f07fcd6 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -133,6 +133,17 @@
#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+/*
+ * 64bit array registers are split into two 32bit registers:
+ * reg0: channel/event 0-31
+ * reg1: channel/event 32-63
+ *
+ * bit 5 in the channel number tells the array index (0/1)
+ * bit 0-4 (0x1f) is the bit offset within the register
+ */
+#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
+#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
+
/* PaRAM slots are laid out like this */
struct edmacc_param {
u32 opt;
@@ -169,6 +180,7 @@ struct edma_desc {
struct list_head node;
enum dma_transfer_direction direction;
int cyclic;
+ bool polled;
int absync;
int pset_nr;
struct edma_chan *echan;
@@ -412,12 +424,6 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
-static inline void edma_set_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- set_bit(offset + (len - 1), p);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -441,15 +447,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (enable) {
- edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
- BIT(channel & 0x1f));
- edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
} else {
- edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
}
}
@@ -587,26 +592,26 @@ static void edma_start(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (!echan->hw_triggered) {
/* EDMA channels without event association */
- dev_dbg(ecc->dev, "ESR%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ESR, j));
- edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
} else {
/* EDMA channel with event association */
- dev_dbg(ecc->dev, "ER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ER, j));
+ dev_dbg(ecc->dev, "ER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ER, idx));
/* Clear any pending event or error */
- edma_write_array(ecc, EDMA_ECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_shadow0_write_array(ecc, SH_EESR, j, mask);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
}
}
@@ -614,19 +619,19 @@ static void edma_stop(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_EECR, j, mask);
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* clear possibly pending completion interrupt */
- edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
/* REVISIT: consider guarding against inappropriate event
* chaining by overwriting with dummy_paramset.
@@ -640,45 +645,49 @@ static void edma_stop(struct edma_chan *echan)
static void edma_pause(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EECR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
/* Re-enable EDMA hardware events on the specified channel. */
static void edma_resume(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EESR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
static void edma_trigger_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
- dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
- edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
}
static void edma_clean_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
+ edma_read_array(ecc, EDMA_EMR, idx));
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
/* Clear the corresponding EMR bits */
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
}
@@ -708,7 +717,8 @@ static int edma_alloc_channel(struct edma_chan *echan,
int channel = EDMA_CHAN_SLOT(echan->ch_num);
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
/* ensure no events are pending */
edma_stop(echan);
@@ -1011,6 +1021,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
src_cidx = cidx;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = src_addr;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
@@ -1211,8 +1222,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc->pset[0].param.opt |= ITCCHEN;
if (nslots == 1) {
- /* Enable transfer complete interrupt */
- edesc->pset[0].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[0].param.opt |= TCINTEN;
} else {
/* Enable transfer complete chaining for the first slot */
edesc->pset[0].param.opt |= TCCHEN;
@@ -1239,9 +1251,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
edesc->pset[1].param.opt |= ITCCHEN;
- edesc->pset[1].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[1].param.opt |= TCINTEN;
}
+ if (!(tx_flags & DMA_PREP_INTERRUPT))
+ edesc->polled = true;
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -1721,7 +1738,11 @@ static u32 edma_residue(struct edma_desc *edesc)
int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
struct edma_chan *echan = edesc->echan;
struct edma_pset *pset = edesc->pset;
- dma_addr_t done, pos;
+ dma_addr_t done, pos, pos_old;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
+ int event_reg;
int i;
/*
@@ -1734,16 +1755,20 @@ static u32 edma_residue(struct edma_desc *edesc)
* "pos" may represent a transfer request that is still being
* processed by the EDMACC or EDMATC. We will busy wait until
* any one of the situations occurs:
- * 1. the DMA hardware is idle
- * 2. a new transfer request is setup
+ * 1. while and event is pending for the channel
+ * 2. a position updated
* 3. we hit the loop limit
*/
- while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
- /* check if a new transfer request is setup */
- if (edma_get_position(echan->ecc,
- echan->slot[0], dst) != pos) {
+ if (is_slave_direction(edesc->direction))
+ event_reg = SH_ER;
+ else
+ event_reg = SH_ESR;
+
+ pos_old = pos;
+ while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+ if (pos != pos_old)
break;
- }
if (!--loop_count) {
dev_dbg_ratelimited(echan->vchan.chan.device->dev,
@@ -1769,6 +1794,12 @@ static u32 edma_residue(struct edma_desc *edesc)
}
/*
+ * If the position is 0, then EDMA loaded the closing dummy slot, the
+ * transfer is completed
+ */
+ if (!pos)
+ return 0;
+ /*
* For SG operation we catch up with the last processed
* status.
*/
@@ -1796,19 +1827,46 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct edma_chan *echan = to_edma_chan(chan);
- struct virt_dma_desc *vdesc;
+ struct dma_tx_state txstate_tmp;
enum dma_status ret;
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
+
+ if (ret == DMA_COMPLETE)
return ret;
+ /* Provide a dummy dma_tx_state for completion checking */
+ if (!txstate)
+ txstate = &txstate_tmp;
+
spin_lock_irqsave(&echan->vchan.lock, flags);
- if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
txstate->residue = edma_residue(echan->edesc);
- else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
- txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ } else {
+ struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
+ cookie);
+
+ if (vdesc)
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ else
+ txstate->residue = 0;
+ }
+
+ /*
+ * Mark the cookie completed if the residue is 0 for non cyclic
+ * transfers
+ */
+ if (ret != DMA_COMPLETE && !txstate->residue &&
+ echan->edesc && echan->edesc->polled &&
+ echan->edesc->vdesc.tx.cookie == cookie) {
+ edma_stop(echan);
+ vchan_cookie_complete(&echan->edesc->vdesc);
+ echan->edesc = NULL;
+ edma_execute(echan);
+ ret = DMA_COMPLETE;
+ }
+
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -2185,11 +2243,13 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
}
#endif
+static bool edma_filter_fn(struct dma_chan *chan, void *param);
+
static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off, ln;
+ int i, off;
const s16 (*rsv_slots)[2];
const s16 (*xbar_chans)[2];
int irq;
@@ -2273,21 +2333,22 @@ static int edma_probe(struct platform_device *pdev)
ecc->default_queue = info->default_queue;
- for (i = 0; i < ecc->num_slots; i++)
- edma_write_slot(ecc, i, &dummy_paramset);
-
if (info->rsv) {
/* Set the reserved slots in inuse list */
rsv_slots = info->rsv->rsv_slots;
if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++) {
- off = rsv_slots[i][0];
- ln = rsv_slots[i][1];
- edma_set_bits(off, ln, ecc->slot_inuse);
- }
+ for (i = 0; rsv_slots[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
+ rsv_slots[i][1]);
}
}
+ for (i = 0; i < ecc->num_slots; i++) {
+ /* Reset only unused - not reserved - paRAM slots */
+ if (!test_bit(i, ecc->slot_inuse))
+ edma_write_slot(ecc, i, &dummy_paramset);
+ }
+
/* Clear the xbar mapped channels in unused list */
xbar_chans = info->xbar_chans;
if (xbar_chans) {
@@ -2366,11 +2427,10 @@ static int edma_probe(struct platform_device *pdev)
edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
- for (i = 0; i < ecc->num_region; i++) {
- edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
- edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
- edma_write_array(ecc, EDMA_QRAE, i, 0x0);
- }
+ edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
+ edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
+ edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
+
ecc->info = info;
/* Init the dma device and channels */
@@ -2482,8 +2542,9 @@ static int edma_pm_resume(struct device *dev)
for (i = 0; i < ecc->num_channels; i++) {
if (echan[i].alloced) {
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
- BIT(i & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0,
+ EDMA_REG_ARRAY_INDEX(i),
+ EDMA_CHANNEL_BIT(i));
edma_setup_interrupt(&echan[i], true);
@@ -2524,7 +2585,7 @@ static struct platform_driver edma_tptc_driver = {
},
};
-bool edma_filter_fn(struct dma_chan *chan, void *param)
+static bool edma_filter_fn(struct dma_chan *chan, void *param)
{
bool match = false;
@@ -2539,7 +2600,6 @@ bool edma_filter_fn(struct dma_chan *chan, void *param)
}
return match;
}
-EXPORT_SYMBOL(edma_filter_fn);
static int edma_init(void)
{
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..6b6ba238b81a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -91,6 +91,7 @@ struct omap_desc {
bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
+ bool polled;
int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
int16_t ei; /* for double indexing */
@@ -202,6 +203,7 @@ static const unsigned es_bytes[] = {
[CSDP_DATA_TYPE_32] = 4,
};
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info omap_dma_info = {
.filter_fn = omap_dma_filter_fn,
};
@@ -812,31 +814,22 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
+ struct omap_desc *d = NULL;
ret = dma_cookie_status(chan, cookie, txstate);
-
- if (!c->paused && c->running) {
- uint32_t ccr = omap_dma_chan_read(c, CCR);
- /*
- * The channel is no longer active, set the return value
- * accordingly
- */
- if (!(ccr & CCR_ENABLE))
- ret = DMA_COMPLETE;
- }
-
- if (ret == DMA_COMPLETE || !txstate)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
- vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
- txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
- } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
- struct omap_desc *d = c->desc;
+ if (c->desc && c->desc->vd.tx.cookie == cookie)
+ d = c->desc;
+
+ if (!txstate)
+ goto out;
+
+ if (d) {
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
@@ -848,10 +841,31 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
txstate->residue = omap_dma_desc_size_pos(d, pos);
} else {
- txstate->residue = 0;
+ struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
+
+ if (vd)
+ txstate->residue = omap_dma_desc_size(
+ to_omap_dma_desc(&vd->tx));
+ else
+ txstate->residue = 0;
}
- if (ret == DMA_IN_PROGRESS && c->paused)
+
+out:
+ if (ret == DMA_IN_PROGRESS && c->paused) {
ret = DMA_PAUSED;
+ } else if (d && d->polled && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly and mark it as completed
+ */
+ if (!(ccr & CCR_ENABLE)) {
+ ret = DMA_COMPLETE;
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -1178,7 +1192,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
+ else
+ d->polled = true;
d->csdp = data_type;
@@ -1234,7 +1251,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
- d->fi = src_icg;
+ d->fi = src_icg + 1;
} else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0;
@@ -1249,7 +1266,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1;
- sg->fi = dst_icg;
+ sg->fi = dst_icg + 1;
} else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0;
@@ -1540,8 +1557,10 @@ static int omap_dma_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
IRQF_SHARED, "omap-dma-engine", od);
- if (rc)
+ if (rc) {
+ omap_dma_free(od);
return rc;
+ }
}
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
@@ -1637,7 +1656,7 @@ static struct platform_driver omap_dma_driver = {
},
};
-bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &omap_dma_driver.driver) {
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
@@ -1651,7 +1670,6 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
return false;
}
-EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
static int omap_dma_init(void)
{
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index ec65a7430dc4..fde54687856b 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -354,11 +354,8 @@ static int uniphier_mdmac_chan_init(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq(pdev, chan_id);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n",
- chan_id);
+ if (irq < 0)
return irq;
- }
irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d",
chan_id);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 957c269ce1fd..cd60fa6d6750 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1678,20 +1678,16 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
/* Get DMA error interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get Error IRQ\n");
+ if (irq <= 0)
return -ENXIO;
- }
pdma->err_irq = irq;
/* Get DMA Rx ring descriptor interrupts for all DMA channels */
for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
irq = platform_get_irq(pdev, i);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
+ if (irq <= 0)
return -ENXIO;
- }
pdma->chan[i - 1].rx_irq = irq;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 200c04ce5b0e..2a2603bfb918 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -510,4 +510,11 @@ config EDAC_ASPEED
First, ECC must be configured in the bootloader. Then, this driver
will expose error counters via the EDAC kernel framework.
+config EDAC_BLUEFIELD
+ tristate "Mellanox BlueField Memory ECC"
+ depends on ARM64 && ((MELLANOX_PLATFORM && ACPI) || COMPILE_TEST)
+ help
+ Support for error detection and correction on the
+ Mellanox BlueField SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 165ca65e1a3a..d265ff9311f0 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
+obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index c2e693e34d43..fbda4b876afd 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -222,7 +222,6 @@ static unsigned long get_total_mem(void)
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -1170,6 +1169,24 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
return 0;
}
+/*********************** SDRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+
+static const struct edac_device_prv_data s10_sdramecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_S10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_S10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_S10_ECC_EN,
+ .ecc_en_ofst = ALTR_S10_ECC_CTRL_SDRAM_OFST,
+ .ce_set_mask = ALTR_S10_ECC_TSERRA,
+ .ue_set_mask = ALTR_S10_ECC_TDERRA,
+ .set_err_ofst = ALTR_S10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+#endif /* CONFIG_EDAC_ALTERA_SDRAM */
+
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
@@ -1759,6 +1776,9 @@ static const struct of_device_id altr_edac_a10_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_SDMMC
{ .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
#endif
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ { .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data },
+#endif
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
@@ -1866,6 +1886,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
+ unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@@ -1875,7 +1896,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+ bits = irq_status;
+ for_each_set_bit(bit, &bits, 32) {
irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
if (irq)
generic_handle_irq(irq);
@@ -1889,6 +1911,10 @@ static int validate_parent_available(struct device_node *np)
struct device_node *parent;
int ret = 0;
+ /* SDRAM must be present for Linux (implied parent) */
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ return 0;
+
/* Ensure parent device is enabled if parent node exists */
parent = of_parse_phandle(np, "altr,ecc-parent", 0);
if (parent && !of_device_is_available(parent))
@@ -1898,6 +1924,22 @@ static int validate_parent_available(struct device_node *np)
return ret;
}
+static int get_s10_sdram_edac_resource(struct device_node *np,
+ struct resource *res)
+{
+ struct device_node *parent;
+ int ret;
+
+ parent = of_parse_phandle(np, "altr,sdr-syscon", 0);
+ if (!parent)
+ return -ENODEV;
+
+ ret = of_address_to_resource(parent, 0, res);
+ of_node_put(parent);
+
+ return ret;
+}
+
static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
struct device_node *np)
{
@@ -1925,7 +1967,11 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
return -ENOMEM;
- rc = of_address_to_resource(np, 0, &res);
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ rc = get_s10_sdram_edac_resource(np, &res);
+ else
+ rc = of_address_to_resource(np, 0, &res);
+
if (rc < 0) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: no resource address\n", ecc_name);
@@ -2231,13 +2277,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ of_device_is_compatible(child, "altr,sdram-edac-s10") ||
+#endif
of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
- else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
- (of_device_is_compatible(child, "altr,sdram-edac-s10")))
+ else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 55654cc4bcdf..3727e72c8c2e 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -289,6 +289,29 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_ECC_CTRL_SDRAM_OFST 0x00
+#define ALTR_S10_ECC_EN BIT(0)
+
+#define ALTR_S10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_S10_ECC_ERRINTENS_OFST 0x14
+#define ALTR_S10_ECC_ERRINTENR_OFST 0x18
+#define ALTR_S10_ECC_SERRINTEN BIT(0)
+
+#define ALTR_S10_ECC_INTMODE_OFST 0x1C
+#define ALTR_S10_ECC_INTMODE BIT(0)
+
+#define ALTR_S10_ECC_INTSTAT_OFST 0x20
+#define ALTR_S10_ECC_SERRPENA BIT(0)
+#define ALTR_S10_ECC_DERRPENA BIT(8)
+#define ALTR_S10_ECC_ERRPENA_MASK (ALTR_S10_ECC_SERRPENA | \
+ ALTR_S10_ECC_DERRPENA)
+
+#define ALTR_S10_ECC_INTTEST_OFST 0x24
+#define ALTR_S10_ECC_TSERRA BIT(0)
+#define ALTR_S10_ECC_TDERRA BIT(8)
+#define ALTR_S10_ECC_TSERRB BIT(16)
+#define ALTR_S10_ECC_TDERRB BIT(24)
+
#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
@@ -300,7 +323,7 @@ struct altr_sdram_mc_data {
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
-#define S10_DBE_IRQ_MASK 0x3FE
+#define S10_DBE_IRQ_MASK 0x3FFFE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 873437be86d9..c1d4536ae466 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -788,51 +788,45 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
-/*
- * The Address Mask should be a contiguous set of bits in the non-interleaved
- * case. So to check for CS interleaving, find the most- and least-significant
- * bits of the mask, generate a contiguous bitmask, and compare the two.
- */
-static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+#define CS_EVEN_PRIMARY BIT(0)
+#define CS_ODD_PRIMARY BIT(1)
+#define CS_EVEN_SECONDARY BIT(2)
+#define CS_ODD_SECONDARY BIT(3)
+
+#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
+#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
+
+static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
- u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
- u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
- u32 test_mask = GENMASK(msb, lsb);
+ int cs_mode = 0;
- edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+ if (csrow_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_PRIMARY;
- return mask ^ test_mask;
+ if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_PRIMARY;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_SECONDARY;
+
+ return cs_mode;
}
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
- int dimm, size0, size1, cs0, cs1;
+ int dimm, size0, size1, cs0, cs1, cs_mode;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
- for (dimm = 0; dimm < 4; dimm++) {
- size0 = 0;
+ for (dimm = 0; dimm < 2; dimm++) {
cs0 = dimm * 2;
-
- if (csrow_enabled(cs0, ctrl, pvt))
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
-
- size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt)) {
- /*
- * CS interleaving is only supported if both CSes have
- * the same amount of memory. Because they are
- * interleaved, it will look like both CSes have the
- * full amount of memory. Save the size for both as
- * half the amount we found on CS0, if interleaved.
- */
- if (f17_cs_interleaved(pvt, ctrl, cs1))
- size1 = size0 = (size0 >> 1);
- else
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
- }
+ cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
+
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -942,89 +936,119 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
+ } else if (pvt->fam >= 0x17) {
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 4;
+ pvt->csels[umc].m_cnt = 2;
+ }
+
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
+static void read_umc_base_mask(struct amd64_pvt *pvt)
+{
+ u32 umc_base_reg, umc_base_reg_sec;
+ u32 umc_mask_reg, umc_mask_reg_sec;
+ u32 base_reg, base_reg_sec;
+ u32 mask_reg, mask_reg_sec;
+ u32 *base, *base_sec;
+ u32 *mask, *mask_sec;
+ int cs, umc;
+
+ for_each_umc(umc) {
+ umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
+ umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
+
+ for_each_chip_select(cs, umc, pvt) {
+ base = &pvt->csels[umc].csbases[cs];
+ base_sec = &pvt->csels[umc].csbases_sec[cs];
+
+ base_reg = umc_base_reg + (cs * 4);
+ base_reg_sec = umc_base_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+ edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base_sec, base_reg_sec);
+ }
+
+ umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
+ umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+
+ for_each_chip_select_mask(cs, umc, pvt) {
+ mask = &pvt->csels[umc].csmasks[cs];
+ mask_sec = &pvt->csels[umc].csmasks_sec[cs];
+
+ mask_reg = umc_mask_reg + (cs * 4);
+ mask_reg_sec = umc_mask_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+ edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask_sec, mask_reg_sec);
+ }
+ }
+}
+
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
+ int cs;
prep_chip_selects(pvt);
- if (pvt->umc) {
- base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
- base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
- mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
- mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
- } else {
- base_reg0 = DCSB0;
- base_reg1 = DCSB1;
- mask_reg0 = DCSM0;
- mask_reg1 = DCSM1;
- }
+ if (pvt->umc)
+ return read_umc_base_mask(pvt);
for_each_chip_select(cs, 0, pvt) {
- int reg0 = base_reg0 + (cs * 4);
- int reg1 = base_reg1 + (cs * 4);
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
- cs, *base0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
- if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
- cs, *base1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
-
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = mask_reg0 + (cs * 4);
- int reg1 = mask_reg1 + (cs * 4);
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
- cs, *mask0, reg0);
-
- if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
- cs, *mask1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
}
@@ -1556,18 +1580,58 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
-static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+ u32 addr_mask_orig, addr_mask_deinterleaved;
+ u32 msb, weight, num_zero_bits;
+ int dimm, size = 0;
- /* Each mask is used for every two base addresses. */
- u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+ /* No Chip Selects are enabled. */
+ if (!cs_mode)
+ return size;
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+ /* Requested size of an even CS but none are enabled. */
+ if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+ return size;
- edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+ /* Requested size of an odd CS but none are enabled. */
+ if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+ return size;
+
+ /*
+ * There is one mask per DIMM, and two Chip Selects per DIMM.
+ * CS0 and CS1 -> DIMM0
+ * CS2 and CS3 -> DIMM1
+ */
+ dimm = csrow_nr >> 1;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+ else
+ addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ */
+ msb = fls(addr_mask_orig) - 1;
+ weight = hweight_long(addr_mask_orig);
+ num_zero_bits = msb - weight;
+
+ /* Take the number of zero bits off from the top of the mask. */
+ addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
@@ -2232,7 +2296,7 @@ static struct amd64_family_type family_types[] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
@@ -2241,7 +2305,7 @@ static struct amd64_family_type family_types[] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
@@ -2250,7 +2314,16 @@ static struct amd64_family_type family_types[] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
+ [F17_M70H_CPUS] = {
+ .ctl_name = "F17h_M70h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
};
@@ -2537,13 +2610,6 @@ static void decode_umc_error(int node_id, struct mce *m)
err.channel = find_umc_channel(m);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
- err.err_code = ERR_NORM_ADDR;
- goto log_error;
- }
-
- error_address_to_page_and_offset(sys_addr, &err);
-
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
@@ -2560,6 +2626,13 @@ static void decode_umc_error(int node_id, struct mce *m)
err.csrow = m->synd & 0x7;
+ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
+
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
@@ -2809,10 +2882,12 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
- if (!pvt->umc)
+ if (!pvt->umc) {
csrow_nr >>= 1;
-
- cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ } else {
+ cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
+ }
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
@@ -2824,6 +2899,49 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
return nr_pages;
}
+static int init_csrows_df(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
+ enum dev_type dev_type = DEV_UNKNOWN;
+ struct dimm_info *dimm;
+ int empty = 1;
+ u8 umc, cs;
+
+ if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+ edac_mode = EDAC_S16ECD16ED;
+ dev_type = DEV_X16;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+ edac_mode = EDAC_S8ECD8ED;
+ dev_type = DEV_X8;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+ edac_mode = EDAC_S4ECD4ED;
+ dev_type = DEV_X4;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+ edac_mode = EDAC_SECDED;
+ }
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ empty = 0;
+ dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
+ }
+ }
+
+ return empty;
+}
+
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
@@ -2838,15 +2956,16 @@ static int init_csrows(struct mem_ctl_info *mci)
int nr_pages = 0;
u32 val;
- if (!pvt->umc) {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (pvt->umc)
+ return init_csrows_df(mci);
- pvt->nbcfg = val;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- }
+ pvt->nbcfg = val;
+
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2883,13 +3002,7 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
- if (pvt->umc) {
- if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
- edac_mode = EDAC_S4ECD4ED;
- else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
-
- } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
@@ -3137,12 +3250,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
- u8 i, ecc_en = 1, cpk_en = 1;
+ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
@@ -3150,8 +3266,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (cpk_en)
+ if (!cpk_en)
+ return;
+
+ if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ else if (dev_x16)
+ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+ else
+ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}
@@ -3241,6 +3364,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
+ fam_type = &family_types[F17_M70H_CPUS];
+ pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8f66472f7adc..8c3cda81e619 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,6 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
+#define NUM_CONTROLLERS 8
#define ON true
#define OFF false
@@ -119,6 +120,8 @@
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
/*
* Function 1 - Address Map
@@ -168,7 +171,8 @@
#define DCSM0 0x60
#define DCSM1 0x160
-#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE)
#define DRAM_CONTROL 0x78
@@ -258,7 +262,9 @@
/* UMC CH register offsets */
#define UMCCH_BASE_ADDR 0x0
+#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
+#define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_CFG 0x30
#define UMCCH_DIMM_CFG 0x80
#define UMCCH_UMC_CFG 0x100
@@ -285,6 +291,7 @@ enum amd_families {
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
+ F17_M70H_CPUS,
NUM_FAMILIES,
};
@@ -311,9 +318,11 @@ struct dram_range {
/* A DCT chip selects collection */
struct chip_select {
u32 csbases[NUM_CHIPSELECTS];
+ u32 csbases_sec[NUM_CHIPSELECTS];
u8 b_cnt;
u32 csmasks[NUM_CHIPSELECTS];
+ u32 csmasks_sec[NUM_CHIPSELECTS];
u8 m_cnt;
};
@@ -351,8 +360,8 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* one for each DCT */
- struct chip_select csels[2];
+ /* one for each DCT/UMC */
+ struct chip_select csels[NUM_CONTROLLERS];
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES];
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
new file mode 100644
index 000000000000..e4736eb37bfb
--- /dev/null
+++ b/drivers/edac/bluefield_edac.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bluefield-specific EDAC driver.
+ *
+ * Copyright (c) 2019 Mellanox Technologies.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define DRIVER_NAME "bluefield-edac"
+
+/*
+ * Mellanox BlueField EMI (External Memory Interface) register definitions.
+ */
+
+#define MLXBF_ECC_CNT 0x340
+#define MLXBF_ECC_CNT__SERR_CNT GENMASK(15, 0)
+#define MLXBF_ECC_CNT__DERR_CNT GENMASK(31, 16)
+
+#define MLXBF_ECC_ERR 0x348
+#define MLXBF_ECC_ERR__SECC BIT(0)
+#define MLXBF_ECC_ERR__DECC BIT(16)
+
+#define MLXBF_ECC_LATCH_SEL 0x354
+#define MLXBF_ECC_LATCH_SEL__START BIT(24)
+
+#define MLXBF_ERR_ADDR_0 0x358
+
+#define MLXBF_ERR_ADDR_1 0x37c
+
+#define MLXBF_SYNDROM 0x35c
+#define MLXBF_SYNDROM__DERR BIT(0)
+#define MLXBF_SYNDROM__SERR BIT(1)
+#define MLXBF_SYNDROM__SYN GENMASK(25, 16)
+
+#define MLXBF_ADD_INFO 0x364
+#define MLXBF_ADD_INFO__ERR_PRANK GENMASK(9, 8)
+
+#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
+#define MLXBF_EDAC_ERROR_GRAIN 8
+
+/*
+ * Request MLNX_SIP_GET_DIMM_INFO
+ *
+ * Retrieve information about DIMM on a certain slot.
+ *
+ * Call register usage:
+ * a0: MLNX_SIP_GET_DIMM_INFO
+ * a1: (Memory controller index) << 16 | (Dimm index in memory controller)
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: MLXBF_DIMM_INFO defined below describing the DIMM.
+ * a1-3: not used.
+ */
+#define MLNX_SIP_GET_DIMM_INFO 0x82000008
+
+/* Format for the SMC response about the memory information */
+#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
+#define MLXBF_DIMM_INFO__IS_RDIMM BIT(16)
+#define MLXBF_DIMM_INFO__IS_LRDIMM BIT(17)
+#define MLXBF_DIMM_INFO__IS_NVDIMM BIT(18)
+#define MLXBF_DIMM_INFO__RANKS GENMASK_ULL(23, 21)
+#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
+
+struct bluefield_edac_priv {
+ int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
+ void __iomem *emi_base;
+ int dimm_per_mc;
+};
+
+static u64 smc_call1(u64 smc_op, u64 smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/*
+ * Gather the ECC information from the External Memory Interface registers
+ * and report it to the edac handler.
+ */
+static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
+ int error_cnt,
+ int is_single_ecc)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 dram_additional_info, err_prank, edea0, edea1;
+ u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
+ enum hw_event_mc_err_type ecc_type;
+ u64 ecc_dimm_addr;
+ int ecc_dimm;
+
+ ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
+ HW_EVENT_ERR_UNCORRECTED;
+
+ /*
+ * Tell the External Memory Interface to populate the relevant
+ * registers with information about the last ECC error occurrence.
+ */
+ ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
+ writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+
+ /*
+ * Verify that the ECC reported info in the registers is of the
+ * same type as the one asked to report. If not, just report the
+ * error without the detailed information.
+ */
+ dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+ serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
+ derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
+ syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
+
+ if ((is_single_ecc && !serr) || (!is_single_ecc && !derr)) {
+ edac_mc_handle_error(ecc_type, mci, error_cnt, 0, 0, 0,
+ 0, 0, -1, mci->ctl_name, "");
+ return;
+ }
+
+ dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+ err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
+
+ ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
+
+ edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
+ edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+
+ ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
+
+ edac_mc_handle_error(ecc_type, mci, error_cnt,
+ PFN_DOWN(ecc_dimm_addr),
+ offset_in_page(ecc_dimm_addr),
+ syndrom, ecc_dimm, 0, 0, mci->ctl_name, "");
+}
+
+static void bluefield_edac_check(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+
+ /*
+ * The memory controller might not be initialized by the firmware
+ * when there isn't memory, which may lead to bad register readings.
+ */
+ if (mci->edac_cap == EDAC_FLAG_NONE)
+ return;
+
+ ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+ single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
+ double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
+
+ if (single_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__SECC;
+
+ bluefield_gather_report_ecc(mci, single_error_count, 1);
+ }
+
+ if (double_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__DECC;
+
+ bluefield_gather_report_ecc(mci, double_error_count, 0);
+ }
+
+ /* Write to clear reported errors. */
+ if (ecc_count)
+ writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+}
+
+/* Initialize the DIMMs information for the given memory controller. */
+static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ int mem_ctrl_idx = mci->mc_idx;
+ struct dimm_info *dimm;
+ u64 smc_info, smc_arg;
+ int is_empty = 1, i;
+
+ for (i = 0; i < priv->dimm_per_mc; i++) {
+ dimm = mci->dimms[i];
+
+ smc_arg = mem_ctrl_idx << 16 | i;
+ smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+
+ if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
+ dimm->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ is_empty = 0;
+
+ dimm->edac_mode = EDAC_SECDED;
+
+ if (FIELD_GET(MLXBF_DIMM_INFO__IS_NVDIMM, smc_info))
+ dimm->mtype = MEM_NVDIMM;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_LRDIMM, smc_info))
+ dimm->mtype = MEM_LRDDR4;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_RDIMM, smc_info))
+ dimm->mtype = MEM_RDDR4;
+ else
+ dimm->mtype = MEM_DDR4;
+
+ dimm->nr_pages =
+ FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info) *
+ (SZ_1G / PAGE_SIZE);
+ dimm->grain = MLXBF_EDAC_ERROR_GRAIN;
+
+ /* Mem controller for BlueField only supports x4, x8 and x16 */
+ switch (FIELD_GET(MLXBF_DIMM_INFO__PACKAGE_X, smc_info)) {
+ case 4:
+ dimm->dtype = DEV_X4;
+ break;
+ case 8:
+ dimm->dtype = DEV_X8;
+ break;
+ case 16:
+ dimm->dtype = DEV_X16;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ }
+
+ priv->dimm_ranks[i] =
+ FIELD_GET(MLXBF_DIMM_INFO__RANKS, smc_info);
+ }
+
+ if (is_empty)
+ mci->edac_cap = EDAC_FLAG_NONE;
+ else
+ mci->edac_cap = EDAC_FLAG_SECDED;
+}
+
+static int bluefield_edac_mc_probe(struct platform_device *pdev)
+{
+ struct bluefield_edac_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci;
+ struct resource *emi_res;
+ unsigned int mc_idx, dimm_count;
+ int rc, ret;
+
+ /* Read the MSS (Memory SubSystem) index from ACPI table. */
+ if (device_property_read_u32(dev, "mss_number", &mc_idx)) {
+ dev_warn(dev, "bf_edac: MSS number unknown\n");
+ return -EINVAL;
+ }
+
+ /* Read the DIMMs per MC from ACPI table. */
+ if (device_property_read_u32(dev, "dimm_per_mc", &dimm_count)) {
+ dev_warn(dev, "bf_edac: DIMMs per MC unknown\n");
+ return -EINVAL;
+ }
+
+ if (dimm_count > MLXBF_EDAC_MAX_DIMM_PER_MC) {
+ dev_warn(dev, "bf_edac: DIMMs per MC not valid\n");
+ return -EINVAL;
+ }
+
+ emi_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!emi_res)
+ return -EINVAL;
+
+ layers[0].type = EDAC_MC_LAYER_SLOT;
+ layers[0].size = dimm_count;
+ layers[0].is_virt_csrow = true;
+
+ mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv));
+ if (!mci)
+ return -ENOMEM;
+
+ priv = mci->pvt_info;
+
+ priv->dimm_per_mc = dimm_count;
+ priv->emi_base = devm_ioremap_resource(dev, emi_res);
+ if (IS_ERR(priv->emi_base)) {
+ dev_err(dev, "failed to map EMI IO resource\n");
+ ret = PTR_ERR(priv->emi_base);
+ goto err;
+ }
+
+ mci->pdev = dev;
+ mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+ MEM_FLAG_LRDDR4 | MEM_FLAG_NVDIMM;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = DRIVER_NAME;
+ mci->ctl_name = "BlueField_Memory_Controller";
+ mci->dev_name = dev_name(dev);
+ mci->edac_check = bluefield_edac_check;
+
+ /* Initialize mci with the actual populated DIMM information. */
+ bluefield_edac_init_dimms(mci);
+
+ platform_set_drvdata(pdev, mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(dev, "failed to register with EDAC core\n");
+ ret = rc;
+ goto err;
+ }
+
+ /* Only POLL mode supported so far. */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+
+ return ret;
+
+}
+
+static int bluefield_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
+ {"MLNXBF08", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, bluefield_mc_acpi_ids);
+
+static struct platform_driver bluefield_edac_mc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = bluefield_mc_acpi_ids,
+ },
+ .probe = bluefield_edac_mc_probe,
+ .remove = bluefield_edac_mc_remove,
+};
+
+module_platform_driver(bluefield_edac_mc_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField memory edac driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 64922c8fa7e3..e6fd079783bd 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -114,8 +114,8 @@ static const struct kernel_param_ops edac_report_ops = {
module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
-unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
- unsigned len)
+unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned int len)
{
struct mem_ctl_info *mci = dimm->mci;
int i, n, count = 0;
@@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(edac_mem_types);
* At return, the pointer 'p' will be incremented to be used on a next call
* to this function.
*/
-void *edac_align_ptr(void **p, unsigned size, int n_elems)
+void *edac_align_ptr(void **p, unsigned int size, int n_elems)
{
- unsigned align, r;
+ unsigned int align, r;
void *ptr = *p;
*p += size * n_elems;
@@ -275,38 +275,37 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
static void _edac_mc_free(struct mem_ctl_info *mci)
{
- int i, chn, row;
struct csrow_info *csr;
- const unsigned int tot_dimms = mci->tot_dimms;
- const unsigned int tot_channels = mci->num_cschannel;
- const unsigned int tot_csrows = mci->nr_csrows;
+ int i, chn, row;
if (mci->dimms) {
- for (i = 0; i < tot_dimms; i++)
+ for (i = 0; i < mci->tot_dimms; i++)
kfree(mci->dimms[i]);
kfree(mci->dimms);
}
+
if (mci->csrows) {
- for (row = 0; row < tot_csrows; row++) {
+ for (row = 0; row < mci->nr_csrows; row++) {
csr = mci->csrows[row];
- if (csr) {
- if (csr->channels) {
- for (chn = 0; chn < tot_channels; chn++)
- kfree(csr->channels[chn]);
- kfree(csr->channels);
- }
- kfree(csr);
+ if (!csr)
+ continue;
+
+ if (csr->channels) {
+ for (chn = 0; chn < mci->num_cschannel; chn++)
+ kfree(csr->channels[chn]);
+ kfree(csr->channels);
}
+ kfree(csr);
}
kfree(mci->csrows);
}
kfree(mci);
}
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt)
+ unsigned int sz_pvt)
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
@@ -314,9 +313,9 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
struct rank_info *chan;
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
- unsigned pos[EDAC_MAX_LAYERS];
- unsigned size, tot_dimms = 1, count = 1;
- unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ unsigned int pos[EDAC_MAX_LAYERS];
+ unsigned int size, tot_dimms = 1, count = 1;
+ unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
int i, j, row, chn, n, len, off;
bool per_rank = false;
@@ -1235,9 +1234,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (p > e->location)
*(p - 1) = '\0';
- /* Report the error via the trace interface */
- grain_bits = fls_long(e->grain) + 1;
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+ grain_bits = fls_long(e->grain - 1);
+
+ /* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
trace_mc_event(type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 4165e15995ad..02aac5c61d00 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -122,10 +122,10 @@ do { \
* On success, return a pointer to struct mem_ctl_info pointer;
* %NULL otherwise
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt);
+ unsigned int sz_pvt);
/**
* edac_get_owner - Return the owner's mod_name of EDAC MC
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4386ea4b9b5a..32d016f1ecd1 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -131,7 +131,7 @@ static const char * const edac_caps[] = {
struct dev_ch_attribute {
struct device_attribute attr;
- int channel;
+ unsigned int channel;
};
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
@@ -200,7 +200,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
/* if field has not been initialized, there is nothing to send */
@@ -216,7 +216,7 @@ static ssize_t channel_dimm_label_store(struct device *dev,
const char *data, size_t count)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
size_t copy_count = count;
@@ -240,7 +240,7 @@ static ssize_t channel_ce_count_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
return sprintf(data, "%u\n", rank->ce_count);
@@ -278,7 +278,7 @@ static void csrow_attr_release(struct device *dev)
{
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(csrow);
}
@@ -414,14 +414,16 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
dev_set_name(&csrow->dev, "csrow%d", index);
dev_set_drvdata(&csrow->dev, csrow);
- edac_dbg(0, "creating (virtual) csrow node %s\n",
- dev_name(&csrow->dev));
-
err = device_add(&csrow->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
put_device(&csrow->dev);
+ return err;
+ }
- return err;
+ edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
+
+ return 0;
}
/* Create a CSROW object under specifed edac_mc_device */
@@ -435,12 +437,8 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0) {
- edac_dbg(1,
- "failure: create csrow objects for csrow %d\n",
- i);
+ if (err < 0)
goto error;
- }
}
return 0;
@@ -624,7 +622,7 @@ static void dimm_attr_release(struct device *dev)
{
struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
- edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dimm);
}
@@ -653,12 +651,21 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
pm_runtime_forbid(&mci->dev);
err = device_add(&dimm->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
put_device(&dimm->dev);
+ return err;
+ }
- edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
+ if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
+ char location[80];
- return err;
+ edac_dimm_info_location(dimm, location, sizeof(location));
+ edac_dbg(0, "device %s created at location %s\n",
+ dev_name(&dimm->dev), location);
+ }
+
+ return 0;
}
/*
@@ -901,7 +908,7 @@ static void mci_attr_release(struct device *dev)
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(mci);
}
@@ -933,14 +940,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
- edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
put_device(&mci->dev);
- goto out;
+ return err;
}
+ edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
+
/*
* Create the dimm/rank devices
*/
@@ -950,22 +958,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
if (!dimm->nr_pages)
continue;
-#ifdef CONFIG_EDAC_DEBUG
- edac_dbg(1, "creating dimm%d, located at ", i);
- if (edac_debug_level >= 1) {
- int lay;
- for (lay = 0; lay < mci->n_layers; lay++)
- printk(KERN_CONT "%s %d ",
- edac_layer_name[mci->layers[lay].type],
- dimm->location[lay]);
- printk(KERN_CONT "\n");
- }
-#endif
err = edac_create_dimm_object(mci, dimm, i);
- if (err) {
- edac_dbg(1, "failure: create dimm %d obj\n", i);
+ if (err)
goto fail_unregister_dimm;
- }
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
@@ -987,7 +982,6 @@ fail_unregister_dimm:
}
device_unregister(&mci->dev);
-out:
return err;
}
@@ -1011,14 +1005,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
device_unregister(&dimm->dev);
}
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
- edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
}
@@ -1029,7 +1023,7 @@ static void mc_attr_release(struct device *dev)
* parent device, used to create the /sys/devices/mc sysfs node.
* So, there are no attributes on it.
*/
- edac_dbg(1, "Releasing device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dev);
}
@@ -1044,10 +1038,8 @@ int __init edac_mc_sysfs_init(void)
int err;
mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
- if (!mci_pdev) {
- err = -ENOMEM;
- goto out;
- }
+ if (!mci_pdev)
+ return -ENOMEM;
mci_pdev->bus = edac_get_sysfs_subsys();
mci_pdev->type = &mc_attr_type;
@@ -1055,17 +1047,15 @@ int __init edac_mc_sysfs_init(void)
dev_set_name(mci_pdev, "mc");
err = device_add(mci_pdev);
- if (err < 0)
- goto out_put_device;
+ if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
+ put_device(mci_pdev);
+ return err;
+ }
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
-
- out_put_device:
- put_device(mci_pdev);
- out:
- return err;
}
void edac_mc_sysfs_exit(void)
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 7f19f1c672c3..d413a0bdc9ad 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -68,7 +68,7 @@ struct memdev_dmi_entry {
struct ghes_edac_dimm_fill {
struct mem_ctl_info *mci;
- unsigned count;
+ unsigned int count;
};
static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 83392f2841de..c370d5457e6b 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -123,9 +123,9 @@ static int i10nm_get_all_munits(void)
}
static const struct x86_cpu_id i10nm_cpuids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_X, 0, 0 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_XEON_D, 0, 0 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index b506eef6b146..251f2b692785 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -417,7 +417,8 @@ static const char *i5100_err_msg(unsigned err)
}
/* convert csrow index into a rank (per channel -- 0..5) */
-static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
@@ -425,7 +426,8 @@ static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
}
/* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
@@ -653,11 +655,11 @@ static struct pci_dev *pci_get_device_func(unsigned vendor,
return ret;
}
-static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned chan = i5100_csrow_to_chan(mci, csrow);
+ const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
@@ -852,8 +854,8 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->tot_dimms; i++) {
struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
- const unsigned chan = i5100_csrow_to_chan(mci, i);
- const unsigned rank = i5100_csrow_to_rank(mci, i);
+ const unsigned int chan = i5100_csrow_to_chan(mci, i);
+ const unsigned int rank = i5100_csrow_to_rank(mci, i);
if (!npages)
continue;
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index ca25f8fe57ef..b1193be1ef1d 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -260,11 +260,14 @@ static u64 get_sideband_reg_base_addr(void)
}
}
+#define DNV_MCHBAR_SIZE 0x8000
+#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
char *base;
u64 addr;
+ unsigned long size;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
@@ -279,15 +282,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
addr = get_mem_ctrl_hub_base_addr();
if (!addr)
return -ENODEV;
+ size = DNV_MCHBAR_SIZE;
} else {
/* MMIO via sideband register base address */
addr = get_sideband_reg_base_addr();
if (!addr)
return -ENODEV;
addr += (port << 16);
+ size = DNV_SB_PORT_SIZE;
}
- base = ioremap((resource_size_t)addr, 0x10000);
+ base = ioremap((resource_size_t)addr, size);
if (!base)
return -ENODEV;
@@ -1533,7 +1538,7 @@ static struct dunit_ops dnv_ops = {
static const struct x86_cpu_id pnd2_cpuids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 37746b045e18..f743502ca9b7 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3429,7 +3429,7 @@ static const struct x86_cpu_id sbridge_cpuids[] = {
INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
- INTEL_CPU_FAM6(BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table),
INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ee9b5f70bfa4..ad02dc6747a4 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -140,10 +140,8 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
err = request_any_context_irq(data->irq, adc_jack_irq_thread,
pdata->irq_flags, pdata->name, data);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7e9f4c9ee87d..e970134c95fa 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1253,7 +1253,7 @@ static int arizona_extcon_get_micd_configs(struct device *dev,
int i, j;
u32 *vals;
- nconfs = device_property_read_u32_array(arizona->dev, prop, NULL, 0);
+ nconfs = device_property_count_u32(arizona->dev, prop);
if (nconfs <= 0)
return 0;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 7254852e6ec0..415afaf479e7 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -121,7 +121,6 @@ static const char * const axp288_pwr_up_down_info[] = {
"Last shutdown caused by PMIC UVLO threshold",
"Last shutdown caused by SOC initiated cold off",
"Last shutdown caused by user pressing the power button",
- NULL,
};
/*
@@ -130,18 +129,21 @@ static const char * const axp288_pwr_up_down_info[] = {
*/
static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
{
- const char * const *rsi;
unsigned int val, i, clear_mask = 0;
+ unsigned long bits;
int ret;
ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
- for (i = 0, rsi = axp288_pwr_up_down_info; *rsi; rsi++, i++) {
- if (val & BIT(i)) {
- dev_dbg(info->dev, "%s\n", *rsi);
- clear_mask |= BIT(i);
- }
+ if (ret < 0) {
+ dev_err(info->dev, "failed to read reset source indicator\n");
+ return;
}
+ bits = val & GENMASK(ARRAY_SIZE(axp288_pwr_up_down_info) - 1, 0);
+ for_each_set_bit(i, &bits, ARRAY_SIZE(axp288_pwr_up_down_info))
+ dev_dbg(info->dev, "%s\n", axp288_pwr_up_down_info[i]);
+ clear_mask = bits;
+
/* Clear the register value for next reboot (write 1 to clear bit) */
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
}
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index 350fb34abfa0..8405512f5199 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -363,6 +363,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id);
static const struct of_device_id fsa9480_of_match[] = {
{ .compatible = "fcs,fsa9480", },
+ { .compatible = "fcs,fsa880", },
{ },
};
MODULE_DEVICE_TABLE(of, fsa9480_of_match);
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index faddeac948db..c211222f5d0c 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -22,26 +22,22 @@
/**
* struct gpio_extcon_data - A simple GPIO-controlled extcon device state container.
* @edev: Extcon device.
- * @irq: Interrupt line for the external connector.
* @work: Work fired by the interrupt.
* @debounce_jiffies: Number of jiffies to wait for the GPIO to stabilize, from the debounce
* value.
* @gpiod: GPIO descriptor for this external connector.
* @extcon_id: The unique id of specific external connector.
* @debounce: Debounce time for GPIO IRQ in ms.
- * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
* @check_on_resume: Boolean describing whether to check the state of gpio
* while resuming from sleep.
*/
struct gpio_extcon_data {
struct extcon_dev *edev;
- int irq;
struct delayed_work work;
unsigned long debounce_jiffies;
struct gpio_desc *gpiod;
unsigned int extcon_id;
unsigned long debounce;
- unsigned long irq_flags;
bool check_on_resume;
};
@@ -69,6 +65,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
{
struct gpio_extcon_data *data;
struct device *dev = &pdev->dev;
+ unsigned long irq_flags;
+ int irq;
int ret;
data = devm_kzalloc(dev, sizeof(struct gpio_extcon_data), GFP_KERNEL);
@@ -82,15 +80,26 @@ static int gpio_extcon_probe(struct platform_device *pdev)
* developed to get the extcon id from device-tree or others.
* On later, it have to be solved.
*/
- if (!data->irq_flags || data->extcon_id > EXTCON_NONE)
+ if (data->extcon_id > EXTCON_NONE)
return -EINVAL;
data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
if (IS_ERR(data->gpiod))
return PTR_ERR(data->gpiod);
- data->irq = gpiod_to_irq(data->gpiod);
- if (data->irq <= 0)
- return data->irq;
+ irq = gpiod_to_irq(data->gpiod);
+ if (irq <= 0)
+ return irq;
+
+ /*
+ * It is unlikely that this is an acknowledged interrupt that goes
+ * away after handling, what we are looking for are falling edges
+ * if the signal is active low, and rising edges if the signal is
+ * active high.
+ */
+ if (gpiod_is_active_low(data->gpiod))
+ irq_flags = IRQF_TRIGGER_FALLING;
+ else
+ irq_flags = IRQF_TRIGGER_RISING;
/* Allocate the memory of extcon devie and register extcon device */
data->edev = devm_extcon_dev_allocate(dev, &data->extcon_id);
@@ -109,8 +118,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
* Request the interrupt of gpio to detect whether external connector
* is attached or detached.
*/
- ret = devm_request_any_context_irq(dev, data->irq,
- gpio_irq_handler, data->irq_flags,
+ ret = devm_request_any_context_irq(dev, irq,
+ gpio_irq_handler, irq_flags,
pdev->name, data);
if (ret < 0)
return ret;
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index a343a6ef3506..e6b50ca83008 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -774,12 +774,12 @@ static int max77843_init_muic_regmap(struct max77693_dev *max77843)
{
int ret;
- max77843->i2c_muic = i2c_new_dummy(max77843->i2c->adapter,
+ max77843->i2c_muic = i2c_new_dummy_device(max77843->i2c->adapter,
I2C_ADDR_MUIC);
- if (!max77843->i2c_muic) {
+ if (IS_ERR(max77843->i2c_muic)) {
dev_err(&max77843->i2c->dev,
"Cannot allocate I2C device for MUIC\n");
- return -ENOMEM;
+ return PTR_ERR(max77843->i2c_muic);
}
i2c_set_clientdata(max77843->i2c_muic, max77843);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 98e4f616b8f1..dc43847ad2b0 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -597,7 +597,7 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
ret = devm_request_threaded_irq(info->dev, virq, NULL,
sm5502_muic_irq_handler,
- IRQF_NO_SUSPEND,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
muic_irq->name, info);
if (ret) {
dev_err(info->dev,
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3dc1cbf849db..b785e936244f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation)
device->bc_implemented = BC_IMPLEMENTED;
break;
}
- /* else fall through to case address error */
+ /* else, fall through - to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 42566b7be8f5..df8a56a979b9 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
if ((data[0] & bit) == (data[1] & bit))
continue;
- /* 1394-1995 IRM, fall through to retry. */
+ /* fall through - It's a 1394-1995 IRM, retry. */
default:
if (retry) {
retry--;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 46bd22dde535..94a13fca8267 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
+ /* fall through */
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 53446e39a32c..e40a77bfe821 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86 && ACPI
+ depends on X86 && ISCSI_IBFT
default n
help
This option enables the kernel to find the region of memory
@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
default n
help
This option enables support for detection and exposing of iSCSI
@@ -215,6 +216,24 @@ config INTEL_STRATIX10_SERVICE
Say Y here if you want Stratix10 service layer support.
+config INTEL_STRATIX10_RSU
+ tristate "Intel Stratix10 Remote System Update"
+ depends on INTEL_STRATIX10_SERVICE
+ help
+ The Intel Remote System Update (RSU) driver exposes interfaces
+ access through the Intel Service Layer to user space via sysfs
+ device attribute nodes. The RSU interfaces report/control some of
+ the optional RSU features of the Stratix 10 SoC FPGA.
+
+ The RSU provides a way for customers to update the boot
+ configuration of a Stratix 10 SoC device with significantly reduced
+ risk of corrupting the bitstream storage and bricking the system.
+
+ Enable RSU support if you are using an Intel SoC FPGA with the RSU
+ feature enabled and you want Linux user space control.
+
+ Say Y here if you want Intel RSU support.
+
config QCOM_SCM
bool
depends on ARM || ARM64
@@ -270,6 +289,20 @@ config TRUSTED_FOUNDATIONS
Choose N if you don't know what this is about.
+config TURRIS_MOX_RWTM
+ tristate "Turris Mox rWTM secure firmware driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_DMA && OF
+ depends on MAILBOX
+ select HW_RANDOM
+ select ARMADA_37XX_RWTM_MBOX
+ help
+ This driver communicates with the firmware on the Cortex-M3 secure
+ processor of the Turris Mox router. Enable if you are building for
+ Turris Mox, and you will be able to read the device serial number and
+ other manufacturing data and also utilize the Entropy Bit Generator
+ for hardware random number generation.
+
config HAVE_ARM_SMCCC
bool
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 3fa0b34eb72f..3fcb91975bdc 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EDD) += edd.o
obj-$(CONFIG_EFI_PCDP) += pcdp.o
obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
+obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
obj-y += psci/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index c47d28d556b6..5f298f00a82e 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -2,5 +2,5 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
-scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 204390297f4b..f804e8af6521 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(id);
+ put_unaligned_le32(id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 0a194af92438..32526a793f3a 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct scmi_clock_set_rate {
__le32 flags;
#define CLOCK_SET_ASYNC BIT(0)
-#define CLOCK_SET_DELAYED BIT(1)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id;
@@ -67,6 +67,7 @@ struct scmi_clock_set_rate {
struct clock_info {
int num_clocks;
int max_async_req;
+ atomic_t cur_async_req;
struct scmi_clock_info *clk;
};
@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+ put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+ put_unaligned_le32(clk_id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
- if (!ret) {
- __le32 *pval = t->rx.buf;
-
- *value = le32_to_cpu(*pval);
- *value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
- }
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
- u32 config, u64 rate)
+ u64 rate)
{
int ret;
+ u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = handle->clk_priv;
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
+ if (ci->max_async_req &&
+ atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
cfg = t->tx.buf;
- cfg->flags = cpu_to_le32(config);
+ cfg->flags = cpu_to_le32(flags);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
- ret = scmi_do_xfer(handle, t);
+ if (flags & CLOCK_SET_ASYNC)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ if (ci->max_async_req)
+ atomic_dec(&ci->cur_async_req);
scmi_xfer_put(handle, t);
return ret;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 44fd4f9404a9..5237c2ff79fe 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -15,6 +15,8 @@
#include <linux/scmi_protocol.h>
#include <linux/types.h>
+#include <asm/unaligned.h>
+
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version {
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
- * @id: The identifier of the command being sent
- * @protocol_id: The identifier of the protocol used to send @id command
- * @seq: The token to identify the message. when a message/command returns,
- * the platform returns the whole message header unmodified including
- * the token
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
@@ -84,17 +86,21 @@ struct scmi_msg {
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
- * @done: completion event
+ * @done: command message transmit completion event
+ * @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
+ struct completion *async_done;
};
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer_with_response(const struct scmi_handle *h,
+ struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b5bc4c7a8fab..3eb0382491ce 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -30,8 +30,14 @@
#include "common.h"
#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
@@ -86,7 +92,7 @@ struct scmi_desc {
};
/**
- * struct scmi_chan_info - Structure representing a SCMI channel informfation
+ * struct scmi_chan_info - Structure representing a SCMI channel information
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
@@ -111,8 +117,9 @@ struct scmi_chan_info {
* @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
- * @minfo: Message info
- * @tx_idr: IDR object to map protocol id to channel info pointer
+ * @tx_minfo: Universal Transmit Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: List head
@@ -123,8 +130,9 @@ struct scmi_info {
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
- struct scmi_xfers_info minfo;
+ struct scmi_xfers_info tx_minfo;
struct idr tx_idr;
+ struct idr rx_idr;
u8 *protocols_imp;
struct list_head node;
int users;
@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno)
static inline void scmi_dump_header_dbg(struct device *dev,
struct scmi_msg_hdr *hdr)
{
- dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
+ dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
hdr->id, hdr->seq, hdr->protocol_id);
}
@@ -190,7 +198,7 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
struct scmi_shared_mem __iomem *mem)
{
xfer->hdr.status = ioread32(mem->msg_payload);
- /* Skip the length of header and statues in payload area i.e 8 bytes*/
+ /* Skip the length of header and status in payload area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
/* Take a copy to the rx buffer.. */
@@ -198,56 +206,12 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
}
/**
- * scmi_rx_callback() - mailbox client callback for receive messages
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
-{
- u16 xfer_id;
- struct scmi_xfer *xfer;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct device *dev = cinfo->dev;
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->minfo;
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
- /* Are we even expecting this? */
- if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
- dev_err(dev, "message for %d is not expected!\n", xfer_id);
- return;
- }
-
- xfer = &minfo->xfer_block[xfer_id];
-
- scmi_dump_header_dbg(dev, &xfer->hdr);
- /* Is the message of valid length? */
- if (xfer->rx.len > info->desc->max_msg_size) {
- dev_err(dev, "unable to handle %zu xfer(max %d)\n",
- xfer->rx.len, info->desc->max_msg_size);
- return;
- }
-
- scmi_fetch_response(xfer, mem);
- complete(&xfer->done);
-}
-
-/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
- * Return: 32-bit packed command header to be sent to the platform.
+ * Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
@@ -257,6 +221,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
}
/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
*
* @cl: client pointer
@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload;
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&mem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
*
- * Helper function which is used by various command functions that are
+ * Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* This function can sleep depending on pending requests already in the system
@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
*
* Return: 0 if all went fine, else corresponding error.
*/
-static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
{
u16 xfer_id;
struct scmi_xfer *xfer;
unsigned long flags, bit_pos;
struct scmi_info *info = handle_to_scmi_info(handle);
- struct scmi_xfers_info *minfo = &info->minfo;
/* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
}
/**
- * scmi_xfer_put() - Release a message
+ * __scmi_xfer_put() - Release a message
*
- * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
unsigned long flags;
- struct scmi_info *info = handle_to_scmi_info(handle);
- struct scmi_xfers_info *minfo = &info->minfo;
/*
* Keep the locked section as small as possible
@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
+/**
+ * scmi_rx_callback() - mailbox client callback for receive messages
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void scmi_rx_callback(struct mbox_client *cl, void *m)
+{
+ u8 msg_type;
+ u32 msg_hdr;
+ u16 xfer_id;
+ struct scmi_xfer *xfer;
+ struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+ msg_hdr = ioread32(&mem->msg_header);
+ msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+ if (msg_type == MSG_TYPE_NOTIFICATION)
+ return; /* Notifications not yet supported */
+
+ /* Are we even expecting this? */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+
+ scmi_fetch_response(xfer, mem);
+
+ if (msg_type == MSG_TYPE_DELAYED_RESP)
+ complete(xfer->async_done);
+ else
+ complete(&xfer->done);
+}
+
+/**
+ * scmi_xfer_put() - Release a transmit message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
static bool
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@@ -435,8 +481,36 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+/**
+ * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ * response is received
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ * return corresponding error, else if all goes well, return 0.
+ */
+int scmi_do_xfer_with_response(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ DECLARE_COMPLETION_ONSTACK(async_response);
+
+ xfer->async_done = &async_response;
+
+ ret = scmi_do_xfer(handle, xfer);
+ if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
+ ret = -ETIMEDOUT;
+
+ xfer->async_done = NULL;
+ return ret;
+}
+
/**
- * scmi_xfer_get_init() - Allocate and initialise one message
+ * scmi_xfer_get_init() - Allocate and initialise one message for transmit
*
* @handle: Pointer to SCMI entity handle
* @msg_id: Message identifier
@@ -457,6 +531,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
int ret;
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
@@ -464,7 +539,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(handle);
+ xfer = scmi_xfer_get(handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -597,27 +672,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
-static const struct scmi_desc scmi_generic_desc = {
- .max_rx_timeout_ms = 30, /* We may increase this if required */
- .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
-};
-
-/* Each compatible listed below must have descriptor associated with it */
-static const struct of_device_id scmi_of_match[] = {
- { .compatible = "arm,scmi", .data = &scmi_generic_desc },
- { /* Sentinel */ },
-};
-
-MODULE_DEVICE_TABLE(of, scmi_of_match);
-
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
- struct scmi_xfers_info *info = &sinfo->minfo;
+ struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -652,61 +713,32 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
-static int scmi_mailbox_check(struct device_node *np)
+static int scmi_mailbox_check(struct device_node *np, int idx)
{
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
+ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
+ idx, NULL);
}
-static int scmi_mbox_free_channel(int id, void *p, void *data)
+static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
+ int prot_id, bool tx)
{
- struct scmi_chan_info *cinfo = p;
- struct idr *idr = data;
-
- if (!IS_ERR_OR_NULL(cinfo->chan)) {
- mbox_free_channel(cinfo->chan);
- cinfo->chan = NULL;
- }
-
- idr_remove(idr, id);
-
- return 0;
-}
-
-static int scmi_remove(struct platform_device *pdev)
-{
- int ret = 0;
- struct scmi_info *info = platform_get_drvdata(pdev);
- struct idr *idr = &info->tx_idr;
-
- mutex_lock(&scmi_list_mutex);
- if (info->users)
- ret = -EBUSY;
- else
- list_del(&info->node);
- mutex_unlock(&scmi_list_mutex);
-
- if (ret)
- return ret;
-
- /* Safe to free channels since no more users */
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
- idr_destroy(&info->tx_idr);
-
- return ret;
-}
-
-static inline int
-scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
-{
- int ret;
+ int ret, idx;
struct resource res;
resource_size_t size;
struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl;
+ struct idr *idr;
+ const char *desc = tx ? "Tx" : "Rx";
- if (scmi_mailbox_check(np)) {
- cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ /* Transmit channel is first entry i.e. index 0 */
+ idx = tx ? 0 : 1;
+ idr = tx ? &info->tx_idr : &info->rx_idr;
+
+ if (scmi_mailbox_check(np, idx)) {
+ cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+ if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+ return -EINVAL;
goto idr_alloc;
}
@@ -719,36 +751,36 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
cl = &cinfo->cl;
cl->dev = dev;
cl->rx_callback = scmi_rx_callback;
- cl->tx_prepare = scmi_tx_prepare;
+ cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
cl->tx_block = false;
- cl->knows_txdone = true;
+ cl->knows_txdone = tx;
- shmem = of_parse_phandle(np, "shmem", 0);
+ shmem = of_parse_phandle(np, "shmem", idx);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
- dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
+ dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
return ret;
}
size = resource_size(&res);
cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!cinfo->payload) {
- dev_err(dev, "failed to ioremap SCMI Tx payload\n");
+ dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
return -EADDRNOTAVAIL;
}
- /* Transmit channel is first entry i.e. index 0 */
- cinfo->chan = mbox_request_channel(cl, 0);
+ cinfo->chan = mbox_request_channel(cl, idx);
if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request SCMI Tx mailbox\n");
+ dev_err(dev, "failed to request SCMI %s mailbox\n",
+ desc);
return ret;
}
idr_alloc:
- ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+ ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
return ret;
@@ -758,6 +790,17 @@ idr_alloc:
return 0;
}
+static inline int
+scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+ int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+
+ if (!ret) /* Rx is optional, hence no error check */
+ scmi_mbox_chan_setup(info, dev, prot_id, false);
+
+ return ret;
+}
+
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
int prot_id)
@@ -771,7 +814,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
return;
}
- if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
+ if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
@@ -791,7 +834,7 @@ static int scmi_probe(struct platform_device *pdev)
struct device_node *child, *np = dev->of_node;
/* Only mailbox method supported, check for the presence of one */
- if (scmi_mailbox_check(np)) {
+ if (scmi_mailbox_check(np, 0)) {
dev_err(dev, "no mailbox found in %pOF\n", np);
return -EINVAL;
}
@@ -814,12 +857,13 @@ static int scmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
+ idr_init(&info->rx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
- ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
+ ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
@@ -854,6 +898,62 @@ static int scmi_probe(struct platform_device *pdev)
return 0;
}
+static int scmi_mbox_free_channel(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct idr *idr = data;
+
+ if (!IS_ERR_OR_NULL(cinfo->chan)) {
+ mbox_free_channel(cinfo->chan);
+ cinfo->chan = NULL;
+ }
+
+ idr_remove(idr, id);
+
+ return 0;
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct scmi_info *info = platform_get_drvdata(pdev);
+ struct idr *idr = &info->tx_idr;
+
+ mutex_lock(&scmi_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&scmi_list_mutex);
+
+ if (ret)
+ return ret;
+
+ /* Safe to free channels since no more users */
+ ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ idr_destroy(&info->tx_idr);
+
+ idr = &info->rx_idr;
+ ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ idr_destroy(&info->rx_idr);
+
+ return ret;
+}
+
+static const struct scmi_desc scmi_generic_desc = {
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "arm,scmi", .data = &scmi_generic_desc },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 3c8ae7cc35de..4a8012e3cb8c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,7 +5,10 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#include <linux/bits.h>
#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/sort.h>
@@ -21,6 +24,7 @@ enum scmi_performance_protocol_cmd {
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
+ PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
struct scmi_opp {
@@ -44,6 +48,7 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
@@ -87,17 +92,56 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[0];
};
+struct scmi_perf_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_perf_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *level_set_addr;
+ void __iomem *limit_set_addr;
+ void __iomem *level_get_addr;
+ void __iomem *limit_get_addr;
+ struct scmi_fc_db_info *level_set_db;
+ struct scmi_fc_db_info *limit_set_db;
+};
+
struct perf_dom_info {
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
+ bool perf_fastchannels;
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
};
struct scmi_perf_info {
@@ -151,7 +195,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -162,6 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
@@ -249,8 +294,42 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+#define SCMI_PERF_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PERF_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PERF_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PERF_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PERF_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set, db->addr);
+ }
+#endif
+}
+
+static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
@@ -272,8 +351,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_set_addr) {
+ iowrite32(max_perf, dom->fc_info->limit_set_addr);
+ iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
+ scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
@@ -284,7 +379,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret) {
@@ -298,8 +393,23 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_get_addr) {
+ *max_perf = ioread32(dom->fc_info->limit_get_addr);
+ *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
@@ -321,8 +431,23 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_set_addr) {
+ iowrite32(level, dom->fc_info->level_set_addr);
+ scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_set(handle, domain, level, poll);
+}
+
+static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
@@ -333,16 +458,128 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return ret;
t->hdr.poll_completion = poll;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
- *level = le32_to_cpu(*(__le32 *)t->rx.buf);
+ *level = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_get_addr) {
+ *level = ioread32(dom->fc_info->level_get_addr);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_get(handle, domain, level, poll);
+}
+
+static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
+{
+ if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
+ return true;
+ if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
+ return true;
+ return false;
+}
+
+static void
+scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+ u32 message_id, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db;
+ struct scmi_perf_get_fc_info *info;
+ struct scmi_msg_resp_perf_desc_fc *resp;
+
+ if (!p_addr)
+ return;
+
+ ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
+ SCMI_PROTOCOL_PERF,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ return;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (!scmi_perf_fc_size_is_valid(message_id, size))
+ goto err_xfer;
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ goto err_xfer;
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+ *p_db = db;
+ }
+err_xfer:
+ scmi_xfer_put(handle, t);
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+ &fc->level_set_addr, &fc->level_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+ &fc->level_get_addr, NULL);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+ &fc->limit_set_addr, &fc->limit_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+ &fc->limit_get_addr, NULL);
+ *p_fc = fc;
+}
+
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
@@ -494,6 +731,9 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_attributes_get(handle, domain, dom);
scmi_perf_describe_levels_get(handle, domain, dom);
+
+ if (dom->perf_fastchannels)
+ scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
handle->perf_ops = &perf_ops;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 62f3401a1f01..5abef7079c0a 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -96,7 +96,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -147,11 +147,11 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
- *state = le32_to_cpu(*(__le32 *)t->rx.buf);
+ *state = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 000000000000..64cc81915581
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_reset_protocol_cmd {
+ RESET_DOMAIN_ATTRIBUTES = 0x3,
+ RESET = 0x4,
+ RESET_NOTIFY = 0x5,
+};
+
+enum scmi_reset_protocol_notify {
+ RESET_ISSUED = 0x0,
+};
+
+#define NUM_RESET_DOMAIN_MASK 0xffff
+#define RESET_NOTIFY_ENABLE BIT(0)
+
+struct scmi_msg_resp_reset_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+ __le32 latency;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+ __le32 domain_id;
+ __le32 flags;
+#define AUTONOMOUS_RESET BIT(0)
+#define EXPLICIT_RESET_ASSERT BIT(1)
+#define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+#define ARCH_RESET_TYPE BIT(31)
+#define COLD_RESET_STATE BIT(0)
+#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
+};
+
+struct reset_dom_info {
+ bool async_reset;
+ bool reset_notify;
+ u32 latency_us;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+ int num_domains;
+ struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+ struct scmi_reset_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ u32 attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ attr = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+ struct reset_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_reset_domain_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, sizeof(domain),
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u32 attributes = le32_to_cpu(attr->attributes);
+
+ dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+ dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ dom_info->latency_us = le32_to_cpu(attr->latency);
+ if (dom_info->latency_us == U32_MAX)
+ dom_info->latency_us = 0;
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+
+ return pi->num_domains;
+}
+
+static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+ u32 flags, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *rdom = pi->dom_info + domain;
+
+ if (rdom->async_reset)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
+ sizeof(*dom), 0, &t);
+ if (ret)
+ return ret;
+
+ dom = t->tx.buf;
+ dom->domain_id = cpu_to_le32(domain);
+ dom->flags = cpu_to_le32(flags);
+ dom->domain_id = cpu_to_le32(state);
+
+ if (rdom->async_reset)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+}
+
+static struct scmi_reset_ops reset_ops = {
+ .num_domains_get = scmi_reset_num_domains_get,
+ .name_get = scmi_reset_name_get,
+ .latency_get = scmi_reset_latency_get,
+ .reset = scmi_reset_domain_reset,
+ .assert = scmi_reset_domain_assert,
+ .deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_protocol_init(struct scmi_handle *handle)
+{
+ int domain;
+ u32 version;
+ struct scmi_reset_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+
+ dev_dbg(handle->dev, "Reset Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_reset_attributes_get(handle, pinfo);
+
+ pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_reset_domain_attributes_get(handle, domain, dom);
+ }
+
+ handle->reset_ops = &reset_ops;
+ handle->reset_priv = pinfo;
+
+ return 0;
+}
+
+static int __init scmi_reset_init(void)
+{
+ return scmi_protocol_register(SCMI_PROTOCOL_RESET,
+ &scmi_reset_protocol_init);
+}
+subsys_initcall(scmi_reset_init);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 0e94ab56f679..a400ea805fc2 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -9,8 +9,8 @@
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
- SENSOR_CONFIG_SET = 0x4,
- SENSOR_TRIP_POINT_SET = 0x5,
+ SENSOR_TRIP_POINT_NOTIFY = 0x4,
+ SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
};
@@ -42,9 +42,10 @@ struct scmi_msg_resp_sensor_description {
} desc[0];
};
-struct scmi_msg_set_sensor_config {
+struct scmi_msg_sensor_trip_point_notify {
__le32 id;
__le32 event_control;
+#define SENSOR_TP_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@@ -119,7 +120,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
do {
/* Set the number of sensors to be skipped/already read */
- *(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
+ put_unaligned_le32(desc_index, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (ret)
@@ -135,9 +136,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
for (cnt = 0; cnt < num_returned; cnt++) {
- u32 attrh;
+ u32 attrh, attrl;
struct scmi_sensor_info *s;
+ attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
s->id = le32_to_cpu(buf->desc[cnt].id);
@@ -146,6 +148,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
/* Sign extend to a full s8 */
if (s->scale & SENSOR_SCALE_SIGN)
s->scale |= SENSOR_SCALE_EXTEND;
+ s->async = SUPPORTS_ASYNC_READ(attrl);
+ s->num_trip_points = NUM_TRIP_POINTS(attrl);
strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
}
@@ -160,15 +164,15 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
return ret;
}
-static int
-scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
{
int ret;
- u32 evt_cntl = BIT(0);
+ u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
- struct scmi_msg_set_sensor_config *cfg;
+ struct scmi_msg_sensor_trip_point_notify *cfg;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -183,15 +187,16 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
return ret;
}
-static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
- u32 sensor_id, u8 trip_id, u64 trip_value)
+static int
+scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
+ u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
if (ret)
return ret;
@@ -209,11 +214,13 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
}
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
- u32 sensor_id, bool async, u64 *value)
+ u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
@@ -223,14 +230,18 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
- sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
- ret = scmi_do_xfer(handle, t);
- if (!ret) {
- __le32 *pval = t->rx.buf;
-
- *value = le32_to_cpu(*pval);
- *value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64((void *)
+ ((__le32 *)t->rx.buf + 1));
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
}
scmi_xfer_put(handle, t);
@@ -255,8 +266,8 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
- .configuration_set = scmi_sensor_configuration_set,
- .trip_point_set = scmi_sensor_trip_point_set,
+ .trip_point_notify = scmi_sensor_trip_point_notify,
+ .trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
};
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 725164b83242..a80c331c3a6e 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -1011,10 +1011,6 @@ static int scpi_probe(struct platform_device *pdev)
scpi_info->firmware_version));
scpi_info->scpi_ops = &scpi_ops;
- ret = devm_device_add_groups(dev, versions_groups);
- if (ret)
- dev_err(dev, "unable to create sysfs version group\n");
-
return devm_of_platform_populate(dev);
}
@@ -1030,6 +1026,7 @@ static struct platform_driver scpi_driver = {
.driver = {
.name = "scpi_protocol",
.of_match_table = scpi_of_match,
+ .dev_groups = versions_groups,
},
.probe = scpi_probe,
.remove = scpi_remove,
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d4ea929e8b34..178ee8106828 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -180,6 +180,19 @@ config RESET_ATTACK_MITIGATION
have been evicted, since otherwise it will trigger even on clean
reboots.
+config EFI_RCI2_TABLE
+ bool "EFI Runtime Configuration Interface Table Version 2 Support"
+ help
+ Displays the content of the Runtime Configuration Interface
+ Table version 2 on Dell EMC PowerEdge systems as a binary
+ attribute 'rci2' under /sys/firmware/efi/tables directory.
+
+ RCI2 table contains BIOS HII in XML format and is used to populate
+ BIOS setup page in Dell EMC OpenManage Server Administrator tool.
+ The BIOS setup page contains BIOS tokens which can be configured.
+
+ Say Y here for Dell EMC PowerEdge systems.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index d2d0d2030620..4ac2de4dfa72 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
+obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 8fa977c7861f..addf0749dd8b 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -390,6 +390,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+ /* Fatal errors call __ghes_panic() before AER handler prints this */
+ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+ (gdata->error_severity & CPER_SEV_FATAL)) {
+ struct aer_capability_regs *aer;
+
+ aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+ pfx, aer->uncor_status, aer->uncor_mask);
+ printk("%saer_uncor_severity: 0x%08x\n",
+ pfx, aer->uncor_severity);
+ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+ aer->header_log.dw0, aer->header_log.dw1,
+ aer->header_log.dw2, aer->header_log.dw3);
+ }
}
static void cper_print_tstamp(const char *pfx,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ad3b1f4866b3..8f1ab04f6743 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -39,11 +39,9 @@ struct efi __read_mostly efi = {
.acpi20 = EFI_INVALID_TABLE_ADDR,
.smbios = EFI_INVALID_TABLE_ADDR,
.smbios3 = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
.boot_info = EFI_INVALID_TABLE_ADDR,
.hcdp = EFI_INVALID_TABLE_ADDR,
.uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
.fw_vendor = EFI_INVALID_TABLE_ADDR,
.runtime = EFI_INVALID_TABLE_ADDR,
.config_table = EFI_INVALID_TABLE_ADDR,
@@ -57,25 +55,6 @@ struct efi __read_mostly efi = {
};
EXPORT_SYMBOL(efi);
-static unsigned long *efi_tables[] = {
- &efi.mps,
- &efi.acpi,
- &efi.acpi20,
- &efi.smbios,
- &efi.smbios3,
- &efi.sal_systab,
- &efi.boot_info,
- &efi.hcdp,
- &efi.uga,
- &efi.uv_systab,
- &efi.fw_vendor,
- &efi.runtime,
- &efi.config_table,
- &efi.esrt,
- &efi.properties_table,
- &efi.mem_attr_table,
-};
-
struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
@@ -476,7 +455,6 @@ static __initdata efi_config_table_type_t common_tables[] = {
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
{HCDP_TABLE_GUID, "HCDP", &efi.hcdp},
{MPS_TABLE_GUID, "MPS", &efi.mps},
- {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
{SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
{SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
@@ -487,6 +465,9 @@ static __initdata efi_config_table_type_t common_tables[] = {
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
{LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
+#ifdef CONFIG_EFI_RCI2_TABLE
+ {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
+#endif
{NULL_GUID, NULL, NULL},
};
@@ -964,20 +945,6 @@ int efi_status_to_err(efi_status_t status)
return err;
}
-bool efi_is_table_address(unsigned long phys_addr)
-{
- unsigned int i;
-
- if (phys_addr == EFI_INVALID_TABLE_ADDR)
- return false;
-
- for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
- if (*(efi_tables[i]) == phys_addr)
- return true;
-
- return false;
-}
-
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1db780c0f07b..3caae7f2cf56 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -927,17 +927,33 @@ fail:
return status;
}
+#define GET_EFI_CONFIG_TABLE(bits) \
+static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
+ efi_guid_t guid) \
+{ \
+ efi_system_table_##bits##_t *sys_table; \
+ efi_config_table_##bits##_t *tables; \
+ int i; \
+ \
+ sys_table = (typeof(sys_table))_sys_table; \
+ tables = (typeof(tables))(unsigned long)sys_table->tables; \
+ \
+ for (i = 0; i < sys_table->nr_tables; i++) { \
+ if (efi_guidcmp(tables[i].guid, guid) != 0) \
+ continue; \
+ \
+ return (void *)(unsigned long)tables[i].table; \
+ } \
+ \
+ return NULL; \
+}
+GET_EFI_CONFIG_TABLE(32)
+GET_EFI_CONFIG_TABLE(64)
+
void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
{
- efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables;
- int i;
-
- for (i = 0; i < sys_table->nr_tables; i++) {
- if (efi_guidcmp(tables[i].guid, guid) != 0)
- continue;
-
- return (void *)tables[i].table;
- }
-
- return NULL;
+ if (efi_is_64bit())
+ return get_efi_config_table64(sys_table, guid);
+ else
+ return get_efi_config_table32(sys_table, guid);
}
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
new file mode 100644
index 000000000000..3e290f96620a
--- /dev/null
+++ b/drivers/firmware/efi/rci2-table.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Export Runtime Configuration Interface Table Version 2 (RCI2)
+ * to sysfs
+ *
+ * Copyright (C) 2019 Dell Inc
+ * by Narendra K <Narendra.K@dell.com>
+ *
+ * System firmware advertises the address of the RCI2 Table via
+ * an EFI Configuration Table entry. This code retrieves the RCI2
+ * table from the address and exports it to sysfs as a binary
+ * attribute 'rci2' under /sys/firmware/efi/tables directory.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/efi.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define RCI_SIGNATURE "_RC_"
+
+struct rci2_table_global_hdr {
+ u16 type;
+ u16 resvd0;
+ u16 hdr_len;
+ u8 rci2_sig[4];
+ u16 resvd1;
+ u32 resvd2;
+ u32 resvd3;
+ u8 major_rev;
+ u8 minor_rev;
+ u16 num_of_structs;
+ u32 rci2_len;
+ u16 rci2_chksum;
+} __packed;
+
+static u8 *rci2_base;
+static u32 rci2_table_len;
+unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ memcpy(buf, attr->private + pos, count);
+ return count;
+}
+
+static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0);
+
+static u16 checksum(void)
+{
+ u8 len_is_odd = rci2_table_len % 2;
+ u32 chksum_len = rci2_table_len;
+ u16 *base = (u16 *)rci2_base;
+ u8 buf[2] = {0};
+ u32 offset = 0;
+ u16 chksum = 0;
+
+ if (len_is_odd)
+ chksum_len -= 1;
+
+ while (offset < chksum_len) {
+ chksum += *base;
+ offset += 2;
+ base++;
+ }
+
+ if (len_is_odd) {
+ buf[0] = *(u8 *)base;
+ chksum += *(u16 *)(buf);
+ }
+
+ return chksum;
+}
+
+int __init efi_rci2_sysfs_init(void)
+{
+ struct kobject *tables_kobj;
+ int ret = -ENOMEM;
+
+ rci2_base = memremap(rci2_table_phys,
+ sizeof(struct rci2_table_global_hdr),
+ MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table init failed - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (strncmp(rci2_base +
+ offsetof(struct rci2_table_global_hdr, rci2_sig),
+ RCI_SIGNATURE, 4)) {
+ pr_debug("RCI2 table init failed - incorrect signature\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ rci2_table_len = *(u32 *)(rci2_base +
+ offsetof(struct rci2_table_global_hdr,
+ rci2_len));
+
+ memunmap(rci2_base);
+
+ if (!rci2_table_len) {
+ pr_debug("RCI2 table init failed - incorrect table length\n");
+ goto err;
+ }
+
+ rci2_base = memremap(rci2_table_phys, rci2_table_len, MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (checksum() != 0) {
+ pr_debug("RCI2 table - incorrect checksum\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ tables_kobj = kobject_create_and_add("tables", efi_kobj);
+ if (!tables_kobj) {
+ pr_debug("RCI2 table - tables_kobj creation failed\n");
+ goto err_unmap;
+ }
+
+ bin_attr_rci2.size = rci2_table_len;
+ bin_attr_rci2.private = rci2_base;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_rci2);
+ if (ret != 0) {
+ pr_debug("RCI2 table - rci2 sysfs bin file creation failed\n");
+ kobject_del(tables_kobj);
+ kobject_put(tables_kobj);
+ goto err_unmap;
+ }
+
+ return 0;
+
+ err_unmap:
+ memunmap(rci2_base);
+ err:
+ pr_debug("RCI2 table - sysfs initialization failed\n");
+ return ret;
+}
+late_initcall(efi_rci2_sysfs_init);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 0739f3b70347..db0812263d46 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
return VPD_OK;
}
-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
- const u8 *value, s32 value_len,
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
void *arg)
{
int ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index 92e3258552fc..dda525c0f968 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -9,8 +9,8 @@
#include "vpd_decode.h"
-static int vpd_decode_len(const s32 max_len, const u8 *in,
- s32 *length, s32 *decoded_len)
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+ u32 *length, u32 *decoded_len)
{
u8 more;
int i = 0;
@@ -30,18 +30,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
} while (more);
*decoded_len = i;
+ return VPD_OK;
+}
+
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+ u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+ u32 decoded_len;
+ u32 consumed = *_consumed;
+
+ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+ entry_len, &decoded_len) != VPD_OK)
+ return VPD_FAIL;
+ if (max_len - consumed < decoded_len)
+ return VPD_FAIL;
+
+ consumed += decoded_len;
+ *entry = input_buf + consumed;
+
+ /* entry_len is untrusted data and must be checked again. */
+ if (max_len - consumed < *entry_len)
+ return VPD_FAIL;
+ consumed += decoded_len;
+ *_consumed = consumed;
return VPD_OK;
}
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
vpd_decode_callback callback, void *callback_arg)
{
int type;
- int res;
- s32 key_len;
- s32 value_len;
- s32 decoded_len;
+ u32 key_len;
+ u32 value_len;
const u8 *key;
const u8 *value;
@@ -56,26 +77,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
case VPD_TYPE_STRING:
(*consumed)++;
- /* key */
- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
- &key_len, &decoded_len);
- if (res != VPD_OK || *consumed + decoded_len >= max_len)
+ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+ &key_len) != VPD_OK)
return VPD_FAIL;
- *consumed += decoded_len;
- key = &input_buf[*consumed];
- *consumed += key_len;
-
- /* value */
- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
- &value_len, &decoded_len);
- if (res != VPD_OK || *consumed + decoded_len > max_len)
+ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+ &value_len) != VPD_OK)
return VPD_FAIL;
- *consumed += decoded_len;
- value = &input_buf[*consumed];
- *consumed += value_len;
-
if (type == VPD_TYPE_STRING)
return callback(key, key_len, value, value_len,
callback_arg);
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
index cf8c2ace155a..8dbe41cac599 100644
--- a/drivers/firmware/google/vpd_decode.h
+++ b/drivers/firmware/google/vpd_decode.h
@@ -25,8 +25,8 @@ enum {
};
/* Callback for vpd_decode_string to invoke. */
-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
- const u8 *value, s32 value_len,
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
void *arg);
/*
@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
* If one entry is successfully decoded, sends it to callback and returns the
* result.
*/
-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
vpd_decode_callback callback, void *callback_arg);
#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 42b566f8903f..0dbee32da4c6 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config IMX_DSP
+ bool "IMX DSP Protocol driver"
+ depends on IMX_MBOX
+ help
+ This enables DSP IPC protocol between host AP (Linux)
+ and the firmware running on DSP.
+ DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP).
+
+ It acts like a doorbell. Client might use shared memory to
+ exchange information with DSP side.
+
config IMX_SCU
bool "IMX SCU Protocol driver"
depends on IMX_MBOX
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 802c4ad8e8f9..08bc9ddfbdfb 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
new file mode 100644
index 000000000000..a43d2db5cbdb
--- /dev/null
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ *
+ * Implementation of the DSP IPC interface (host side)
+ */
+
+#include <linux/firmware/imx/dsp.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
+ *
+ * @dsp: DSP IPC handle
+ * @chan_idx: index of the channel where to trigger the interrupt
+ *
+ * Returns non-negative value for success, negative value for error
+ */
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
+{
+ int ret;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return -EINVAL;
+
+ dsp_chan = &ipc->chans[idx];
+ ret = mbox_send_message(dsp_chan->ch, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_dsp_ring_doorbell);
+
+/*
+ * imx_dsp_handle_rx - rx callback used by imx mailbox
+ *
+ * @c: mbox client
+ * @msg: message received
+ *
+ * Users of DSP IPC will need to privde handle_reply and handle_request
+ * callbacks.
+ */
+static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
+{
+ struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
+
+ if (chan->idx == 0) {
+ chan->ipc->ops->handle_reply(chan->ipc);
+ } else {
+ chan->ipc->ops->handle_request(chan->ipc);
+ imx_dsp_ring_doorbell(chan->ipc, 1);
+ }
+}
+
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ struct imx_dsp_chan *dsp_chan;
+ struct mbox_client *cl;
+ char *chan_name;
+ int ret;
+ int i, j;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ if (i < 2)
+ chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
+ else
+ chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+
+ dsp_chan = &dsp_ipc->chans[i];
+ cl = &dsp_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_dsp_handle_rx;
+
+ dsp_chan->ipc = dsp_ipc;
+ dsp_chan->idx = i % 2;
+ dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(dsp_chan->ch)) {
+ ret = PTR_ERR(dsp_chan->ch);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ chan_name, ret);
+ goto out;
+ }
+
+ dev_dbg(dev, "request mbox chan %s\n", chan_name);
+ /* chan_name is not used anymore by framework */
+ kfree(chan_name);
+ }
+
+ dsp_ipc->dev = dev;
+
+ dev_set_drvdata(dev, dsp_ipc);
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return devm_of_platform_populate(dev);
+out:
+ kfree(chan_name);
+ for (j = 0; j < i; j++) {
+ dsp_chan = &dsp_ipc->chans[j];
+ mbox_free_channel(dsp_chan->ch);
+ }
+
+ return ret;
+}
+
+static int imx_dsp_remove(struct platform_device *pdev)
+{
+ struct imx_dsp_chan *dsp_chan;
+ struct imx_dsp_ipc *dsp_ipc;
+ int i;
+
+ dsp_ipc = dev_get_drvdata(&pdev->dev);
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ dsp_chan = &dsp_ipc->chans[i];
+ mbox_free_channel(dsp_chan->ch);
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_dsp_driver = {
+ .driver = {
+ .name = "imx-dsp",
+ },
+ .probe = imx_dsp_probe,
+ .remove = imx_dsp_remove,
+};
+builtin_platform_driver(imx_dsp_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
+MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 480cec69e2c9..b556612207e5 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -92,7 +92,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
- { "mu", IMX_SC_R_MU_0A, 14, true, 0 },
+ { "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
+ { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
/* CONN SS */
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
@@ -130,6 +131,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
{ "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+ { "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
/* VPU SS */
{ "vpu", IMX_SC_R_VPU, 1, false, 0 },
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ab3aa3983833..7e12cbdf957c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBFT_ISCSI_VERSION);
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
struct ibft_hdr {
u8 id;
u8 version;
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index f82ccd39a913..84f4ff351c62 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state)
return state & mask;
}
-static inline bool psci_power_state_is_valid(u32 state)
+bool psci_power_state_is_valid(u32 state)
{
const u32 valid_mask = psci_has_ext_power_state() ?
PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -277,175 +277,24 @@ static int __init psci_features(u32 psci_func_id)
}
#ifdef CONFIG_CPU_IDLE
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
-{
- int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
-
- if (err) {
- pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
- return err;
- }
-
- if (!psci_power_state_is_valid(*state)) {
- pr_warn("Invalid PSCI power state %#x\n", *state);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
-{
- int i, ret = 0, count = 0;
- u32 *psci_states;
- struct device_node *state_node;
-
- /* Count idle states */
- while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- count))) {
- count++;
- of_node_put(state_node);
- }
-
- if (!count)
- return -ENODEV;
-
- psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
- if (!psci_states)
- return -ENOMEM;
-
- for (i = 0; i < count; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
- ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
- of_node_put(state_node);
-
- if (ret)
- goto free_mem;
-
- pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
- }
-
- /* Idle states parsed correctly, initialize per-cpu pointer */
- per_cpu(psci_power_state, cpu) = psci_states;
- return 0;
-
-free_mem:
- kfree(psci_states);
- return ret;
-}
-
-#ifdef CONFIG_ACPI
-#include <acpi/processor.h>
-
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
-{
- int i, count;
- u32 *psci_states;
- struct acpi_lpi_state *lpi;
- struct acpi_processor *pr = per_cpu(processors, cpu);
-
- if (unlikely(!pr || !pr->flags.has_lpi))
- return -EINVAL;
-
- count = pr->power.count - 1;
- if (count <= 0)
- return -ENODEV;
-
- psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
- if (!psci_states)
- return -ENOMEM;
-
- for (i = 0; i < count; i++) {
- u32 state;
-
- lpi = &pr->power.lpi_states[i + 1];
- /*
- * Only bits[31:0] represent a PSCI power_state while
- * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
- */
- state = lpi->address;
- if (!psci_power_state_is_valid(state)) {
- pr_warn("Invalid PSCI power state %#x\n", state);
- kfree(psci_states);
- return -EINVAL;
- }
- psci_states[i] = state;
- }
- /* Idle states parsed correctly, initialize per-cpu pointer */
- per_cpu(psci_power_state, cpu) = psci_states;
- return 0;
-}
-#else
-static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
-{
- return -EINVAL;
-}
-#endif
-
-int psci_cpu_init_idle(unsigned int cpu)
-{
- struct device_node *cpu_node;
- int ret;
-
- /*
- * If the PSCI cpu_suspend function hook has not been initialized
- * idle states must not be enabled, so bail out
- */
- if (!psci_ops.cpu_suspend)
- return -EOPNOTSUPP;
-
- if (!acpi_disabled)
- return psci_acpi_cpu_init_idle(cpu);
-
- cpu_node = of_get_cpu_node(cpu, NULL);
- if (!cpu_node)
- return -ENODEV;
-
- ret = psci_dt_cpu_init_idle(cpu_node, cpu);
-
- of_node_put(cpu_node);
-
- return ret;
-}
-
-static int psci_suspend_finisher(unsigned long index)
+static int psci_suspend_finisher(unsigned long state)
{
- u32 *state = __this_cpu_read(psci_power_state);
+ u32 power_state = state;
- return psci_ops.cpu_suspend(state[index - 1],
- __pa_symbol(cpu_resume));
+ return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume));
}
-int psci_cpu_suspend_enter(unsigned long index)
+int psci_cpu_suspend_enter(u32 state)
{
int ret;
- u32 *state = __this_cpu_read(psci_power_state);
- /*
- * idle state index 0 corresponds to wfi, should never be called
- * from the cpu_suspend operations
- */
- if (WARN_ON_ONCE(!index))
- return -EINVAL;
- if (!psci_power_state_loses_context(state[index - 1]))
- ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ if (!psci_power_state_loses_context(state))
+ ret = psci_ops.cpu_suspend(state, 0);
else
- ret = cpu_suspend(index, psci_suspend_finisher);
+ ret = cpu_suspend(state, psci_suspend_finisher);
return ret;
}
-
-/* ARM specific CPU idle operations */
-#ifdef CONFIG_ARM
-static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
- .suspend = psci_cpu_suspend_enter,
- .init = psci_dt_cpu_init_idle,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
-#endif
#endif
static int psci_system_suspend(unsigned long unused)
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index f3659443f8c2..6a445397771c 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -228,8 +228,11 @@ out_free_cpus:
static void dummy_callback(struct timer_list *unused) {}
-static int suspend_cpu(int index, bool broadcast)
+static int suspend_cpu(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
int ret;
arch_cpu_idle_enter();
@@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast)
}
}
- /*
- * Replicate the common ARM cpuidle enter function
- * (arm_enter_idle_state).
- */
- ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+ ret = state->enter(dev, drv, index);
if (broadcast)
tick_broadcast_exit();
@@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg)
* doesn't use PSCI).
*/
for (index = 1; index < drv->state_count; ++index) {
- struct cpuidle_state *state = &drv->states[index];
- bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
int ret;
+ struct cpuidle_state *state = &drv->states[index];
/*
* Set the timer to wake this CPU up in some time (which
@@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg)
/* IRQs must be disabled during suspend operations. */
local_irq_disable();
- ret = suspend_cpu(index, broadcast);
+ ret = suspend_cpu(dev, drv, index);
/*
* We have woken up. Re-enable IRQs to handle any
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 2ddc118dba1b..4802ab170fe5 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -425,21 +426,23 @@ EXPORT_SYMBOL(qcom_scm_set_remote_state);
* @mem_sz: size of the region.
* @srcvm: vmid for current set of owners, each set bit in
* flag indicate a unique owner
- * @newvm: array having new owners and corrsponding permission
+ * @newvm: array having new owners and corresponding permission
* flags
* @dest_cnt: number of owners in next set.
*
- * Return negative errno on failure, 0 on success, with @srcvm updated.
+ * Return negative errno on failure or 0 on success with @srcvm updated.
*/
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *srcvm,
- struct qcom_scm_vmperm *newvm, int dest_cnt)
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt)
{
struct qcom_scm_current_perm_info *destvm;
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
phys_addr_t ptr_phys;
+ dma_addr_t ptr_dma;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@@ -447,52 +450,50 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
int next_vm;
__le32 *src;
void *ptr;
- int ret;
- int len;
- int i;
+ int ret, i, b;
+ unsigned long srcvm_bits = *srcvm;
- src_sz = hweight_long(*srcvm) * sizeof(*src);
+ src_sz = hweight_long(srcvm_bits) * sizeof(*src);
mem_to_map_sz = sizeof(*mem_to_map);
dest_sz = dest_cnt * sizeof(*destvm);
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
+ ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
- len = hweight_long(*srcvm);
- for (i = 0; i < len; i++) {
- src[i] = cpu_to_le32(ffs(*srcvm) - 1);
- *srcvm ^= 1 << (ffs(*srcvm) - 1);
- }
+ i = 0;
+ for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
+ src[i++] = cpu_to_le32(b);
/* Fill details of mem buff to map */
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
- mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
- mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+ mem_to_map->mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map->mem_size = cpu_to_le64(mem_sz);
next_vm = 0;
/* Fill details of next vmid detail */
destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
- for (i = 0; i < dest_cnt; i++) {
- destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
- destvm[i].perm = cpu_to_le32(newvm[i].perm);
- destvm[i].ctx = 0;
- destvm[i].ctx_size = 0;
- next_vm |= BIT(newvm[i].vmid);
+ for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
+ destvm->vmid = cpu_to_le32(newvm->vmid);
+ destvm->perm = cpu_to_le32(newvm->perm);
+ destvm->ctx = 0;
+ destvm->ctx_size = 0;
+ next_vm |= BIT(newvm->vmid);
}
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
if (ret) {
dev_err(__scm->dev,
- "Assign memory protection call failed %d.\n", ret);
+ "Assign memory protection call failed %d\n", ret);
return -EINVAL;
}
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 000000000000..bb008c019920
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#define RSU_STATE_MASK GENMASK_ULL(31, 0)
+#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
+#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
+#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
+
+#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
+
+#define INVALID_RETRY_COUNTER 0xFFFFFFFF
+
+typedef void (*rsu_callback)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data);
+/**
+ * struct stratix10_rsu_priv - rsu data structure
+ * @chan: pointer to the allocated service channel
+ * @client: active service client
+ * @completion: state for callback completion
+ * @lock: a mutex to protect callback completion state
+ * @status.current_image: address of image currently running in flash
+ * @status.fail_image: address of failed image in flash
+ * @status.version: the version number of RSU firmware
+ * @status.state: the state of RSU system
+ * @status.error_details: error code
+ * @status.error_location: the error offset inside the image that failed
+ * @retry_counter: the current image's retry counter
+ */
+struct stratix10_rsu_priv {
+ struct stratix10_svc_chan *chan;
+ struct stratix10_svc_client client;
+ struct completion completion;
+ struct mutex lock;
+ struct {
+ unsigned long current_image;
+ unsigned long fail_image;
+ unsigned int version;
+ unsigned int state;
+ unsigned int error_details;
+ unsigned int error_location;
+ } status;
+ unsigned int retry_counter;
+};
+
+/**
+ * rsu_status_callback() - Status callback from Intel Service Layer
+ * @client: pointer to service client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU status request. Status is
+ * only updated after a system reboot, so a get updated status call is
+ * made during driver probe.
+ */
+static void rsu_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ priv->status.version = FIELD_GET(RSU_VERSION_MASK,
+ res->a2);
+ priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
+ priv->status.fail_image = res->a1;
+ priv->status.current_image = res->a0;
+ priv->status.error_location =
+ FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
+ priv->status.error_details =
+ FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
+ } else {
+ dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
+ res->a0);
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->status.fail_image = 0;
+ priv->status.current_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ }
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_command_callback() - Update callback from Intel Service Layer
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU commands.
+ */
+static void rsu_command_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+
+ if (data->status != BIT(SVC_STATUS_RSU_OK))
+ dev_err(client->dev, "RSU returned status is %i\n",
+ data->status);
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_retry_callback() - Callback from Intel service layer for getting
+ * the current image's retry counter from firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for retry counter, which is used by
+ * user to know how many times the images is still allowed to reload
+ * itself before giving up and starting RSU fail-over flow.
+ */
+static void rsu_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *counter = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_RSU_OK))
+ priv->retry_counter = *counter;
+ else
+ dev_err(client->dev, "Failed to get retry counter %i\n",
+ data->status);
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_send_msg() - send a message to Intel service layer
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, the bitstream address or notify status
+ * @callback: function pointer for the callback (status or update)
+ *
+ * Start an Intel service layer transaction to perform the SMC call that
+ * is necessary to get RSU boot log or set the address of bitstream to
+ * boot after reboot.
+ *
+ * Returns 0 on success or -ETIMEDOUT on error.
+ */
+static int rsu_send_msg(struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_callback callback)
+{
+ struct stratix10_svc_client_msg msg;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ reinit_completion(&priv->completion);
+ priv->client.receive_cb = callback;
+
+ msg.command = command;
+ if (arg)
+ msg.arg[0] = arg;
+
+ ret = stratix10_svc_send(priv->chan, &msg);
+ if (ret < 0)
+ goto status_done;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->completion,
+ RSU_TIMEOUT);
+ if (!ret) {
+ dev_err(priv->client.dev,
+ "timeout waiting for SMC call\n");
+ ret = -ETIMEDOUT;
+ goto status_done;
+ } else if (ret < 0) {
+ dev_err(priv->client.dev,
+ "error %d waiting for SMC call\n", ret);
+ goto status_done;
+ } else {
+ ret = 0;
+ }
+
+status_done:
+ stratix10_svc_done(priv->chan);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
+ * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
+ * related. They allow user space software to query the configuration system
+ * status and to request optional reboot behavior specific to Intel FPGAs.
+ */
+
+static ssize_t current_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.current_image);
+}
+
+static ssize_t fail_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.version);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.state);
+}
+
+static ssize_t error_location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_location);
+}
+
+static ssize_t error_details_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_details);
+}
+
+static ssize_t retry_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->retry_counter);
+}
+
+static ssize_t reboot_image_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long address;
+ int ret;
+
+ if (priv == 0)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &address);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
+ address, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU update returned %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t notify_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long status;
+ int ret;
+
+ if (priv == 0)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &status);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
+ status, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU notify returned %i\n", ret);
+ return ret;
+ }
+
+ /* to get the updated state */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ return ret;
+ }
+
+ /* only 19.3 or late version FW supports retry counter feature */
+ if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
+ 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev,
+ "Error, getting RSU retry %i\n", ret);
+ return ret;
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(current_image);
+static DEVICE_ATTR_RO(fail_image);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(error_location);
+static DEVICE_ATTR_RO(error_details);
+static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_WO(reboot_image);
+static DEVICE_ATTR_WO(notify);
+
+static struct attribute *rsu_attrs[] = {
+ &dev_attr_current_image.attr,
+ &dev_attr_fail_image.attr,
+ &dev_attr_state.attr,
+ &dev_attr_version.attr,
+ &dev_attr_error_location.attr,
+ &dev_attr_error_details.attr,
+ &dev_attr_retry_counter.attr,
+ &dev_attr_reboot_image.attr,
+ &dev_attr_notify.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rsu);
+
+static int stratix10_rsu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_rsu_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client.dev = dev;
+ priv->client.receive_cb = NULL;
+ priv->client.priv = priv;
+ priv->status.current_image = 0;
+ priv->status.fail_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->retry_counter = INVALID_RETRY_COUNTER;
+
+ mutex_init(&priv->lock);
+ priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+ SVC_CLIENT_RSU);
+ if (IS_ERR(priv->chan)) {
+ dev_err(dev, "couldn't get service channel %s\n",
+ SVC_CLIENT_RSU);
+ return PTR_ERR(priv->chan);
+ }
+
+ init_completion(&priv->completion);
+ platform_set_drvdata(pdev, priv);
+
+ /* get the initial state from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ /* only 19.3 or late version FW supports retry counter feature */
+ if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
+ rsu_retry_callback);
+ if (ret) {
+ dev_err(dev,
+ "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+ }
+
+ return ret;
+}
+
+static int stratix10_rsu_remove(struct platform_device *pdev)
+{
+ struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
+
+ stratix10_svc_free_channel(priv->chan);
+ return 0;
+}
+
+static struct platform_driver stratix10_rsu_driver = {
+ .probe = stratix10_rsu_probe,
+ .remove = stratix10_rsu_remove,
+ .driver = {
+ .name = "stratix10-rsu",
+ .dev_groups = rsu_groups,
+ },
+};
+
+module_platform_driver(stratix10_rsu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Remote System Update Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 6e6514825ad0..b485321189e1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -38,6 +38,9 @@
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
+/* stratix10 service layer clients */
+#define STRATIX10_RSU "stratix10-rsu"
+
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -45,6 +48,14 @@ typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
struct stratix10_svc_chan;
/**
+ * struct stratix10_svc - svc private data
+ * @stratix10_svc_rsu: pointer to stratix10 RSU device
+ */
+struct stratix10_svc {
+ struct platform_device *stratix10_svc_rsu;
+};
+
+/**
* struct stratix10_svc_sh_memory - service shared memory structure
* @sync_complete: state for a completion
* @addr: physical address of shared memory block
@@ -296,7 +307,12 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
break;
case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ break;
+ case COMMAND_RSU_RETRY:
cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->kaddr1 = &res.a1;
break;
default:
pr_warn("it shouldn't happen\n");
@@ -386,6 +402,16 @@ static int svc_normal_to_secure_thread(void *data)
a1 = pdata->arg[0];
a2 = 0;
break;
+ case COMMAND_RSU_NOTIFY:
+ a0 = INTEL_SIP_SMC_RSU_NOTIFY;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
+ a1 = 0;
+ a2 = 0;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -438,7 +464,28 @@ static int svc_normal_to_secure_thread(void *data)
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
+ switch (pdata->command) {
+ /* for FPGA mgr */
+ case COMMAND_RECONFIG_DATA_CLAIM:
+ case COMMAND_RECONFIG:
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ case COMMAND_RECONFIG_STATUS:
+ cbdata->status =
+ BIT(SVC_STATUS_RECONFIG_ERROR);
+ break;
+
+ /* for RSU */
+ case COMMAND_RSU_STATUS:
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ case COMMAND_RSU_RETRY:
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_ERROR);
+ break;
+ }
+
cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
@@ -530,7 +577,7 @@ static int svc_get_sh_memory(struct platform_device *pdev,
if (!sh_memory->addr || !sh_memory->size) {
dev_err(dev,
- "fails to get shared memory info from secure world\n");
+ "failed to get shared memory info from secure world\n");
return -ENOMEM;
}
@@ -768,7 +815,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
"svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
- "fails to create svc_smc_hvc_thread\n");
+ "failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
@@ -913,6 +960,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
struct stratix10_svc_chan *chans;
struct gen_pool *genpool;
struct stratix10_svc_sh_memory *sh_memory;
+ struct stratix10_svc *svc;
+
svc_invoke_fn *invoke_fn;
size_t fifo_size;
int ret;
@@ -957,7 +1006,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
- dev_err(dev, "fails to allocate FIFO\n");
+ dev_err(dev, "failed to allocate FIFO\n");
return ret;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -975,6 +1024,24 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
+ /* add svc client device(s) */
+ svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+ if (!svc->stratix10_svc_rsu) {
+ dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(svc->stratix10_svc_rsu);
+ if (ret) {
+ platform_device_put(svc->stratix10_svc_rsu);
+ return ret;
+ }
+ dev_set_drvdata(dev, svc);
+
pr_info("Intel Service Layer Driver Initialized\n");
return ret;
@@ -982,8 +1049,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
static int stratix10_svc_drv_remove(struct platform_device *pdev)
{
+ struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ platform_device_unregister(svc->stratix10_svc_rsu);
+
kfifo_free(&ctrl->svc_fifo);
if (ctrl->task) {
kthread_stop(ctrl->task);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index cdee0b45943d..4126be9e3216 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -635,6 +635,7 @@ fail:
/**
* ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * that can be shared with other hosts.
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
@@ -642,12 +643,30 @@ fail:
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
- * NOTE: The request is for exclusive access for the processor.
- *
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
+ * TISCI that is exclusively owned by the
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
return ti_sci_set_device_state(handle, id,
MSG_FLAG_DEVICE_EXCLUSIVE,
MSG_DEVICE_SW_STATE_ON);
@@ -666,6 +685,26 @@ static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
*/
static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
+ * TISCI that is exclusively owned by
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
return ti_sci_set_device_state(handle, id,
MSG_FLAG_DEVICE_EXCLUSIVE,
MSG_DEVICE_SW_STATE_RETENTION);
@@ -2894,7 +2933,9 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
core_ops->reboot_device = ti_sci_cmd_core_reboot;
dops->get_device = ti_sci_cmd_get_device;
+ dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
dops->idle_device = ti_sci_cmd_idle_device;
+ dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
dops->put_device = ti_sci_cmd_put_device;
dops->is_valid = ti_sci_cmd_dev_is_valid;
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
new file mode 100644
index 000000000000..72be58960e54
--- /dev/null
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox rWTM firmware driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/hw_random.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "turris-mox-rwtm"
+
+/*
+ * The macros and constants below come from Turris Mox's rWTM firmware code.
+ * This firmware is open source and it's sources can be found at
+ * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
+ */
+
+#define MBOX_STS_SUCCESS (0 << 30)
+#define MBOX_STS_FAIL (1 << 30)
+#define MBOX_STS_BADCMD (2 << 30)
+#define MBOX_STS_ERROR(s) ((s) & (3 << 30))
+#define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff)
+#define MBOX_STS_CMD(s) ((s) & 0x3ff)
+
+enum mbox_cmd {
+ MBOX_CMD_GET_RANDOM = 1,
+ MBOX_CMD_BOARD_INFO = 2,
+ MBOX_CMD_ECDSA_PUB_KEY = 3,
+ MBOX_CMD_HASH = 4,
+ MBOX_CMD_SIGN = 5,
+ MBOX_CMD_VERIFY = 6,
+
+ MBOX_CMD_OTP_READ = 7,
+ MBOX_CMD_OTP_WRITE = 8,
+};
+
+struct mox_kobject;
+
+struct mox_rwtm {
+ struct device *dev;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox;
+ struct mox_kobject *kobj;
+ struct hwrng hwrng;
+
+ struct armada_37xx_rwtm_rx_msg reply;
+
+ void *buf;
+ dma_addr_t buf_phys;
+
+ struct mutex busy;
+ struct completion cmd_done;
+
+ /* board information */
+ int has_board_info;
+ u64 serial_number;
+ int board_version, ram_size;
+ u8 mac_address1[6], mac_address2[6];
+
+ /* public key burned in eFuse */
+ int has_pubkey;
+ u8 pubkey[135];
+};
+
+struct mox_kobject {
+ struct kobject kobj;
+ struct mox_rwtm *rwtm;
+};
+
+static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
+{
+ return &rwtm->kobj->kobj;
+}
+
+static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
+{
+ return container_of(kobj, struct mox_kobject, kobj)->rwtm;
+}
+
+static void mox_kobj_release(struct kobject *kobj)
+{
+ kfree(to_rwtm(kobj)->kobj);
+}
+
+static struct kobj_type mox_kobj_ktype = {
+ .release = mox_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static int mox_kobj_create(struct mox_rwtm *rwtm)
+{
+ rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
+ if (!rwtm->kobj)
+ return -ENOMEM;
+
+ kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
+ if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
+ kobject_put(rwtm_to_kobj(rwtm));
+ return -ENXIO;
+ }
+
+ rwtm->kobj->rwtm = rwtm;
+
+ return 0;
+}
+
+#define MOX_ATTR_RO(name, format, cat) \
+static ssize_t \
+name##_show(struct kobject *kobj, struct kobj_attribute *a, \
+ char *buf) \
+{ \
+ struct mox_rwtm *rwtm = to_rwtm(kobj); \
+ if (!rwtm->has_##cat) \
+ return -ENODATA; \
+ return sprintf(buf, format, rwtm->name); \
+} \
+static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
+
+MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
+MOX_ATTR_RO(board_version, "%i\n", board_info);
+MOX_ATTR_RO(ram_size, "%i\n", board_info);
+MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
+MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
+MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+
+static int mox_get_status(enum mbox_cmd cmd, u32 retval)
+{
+ if (MBOX_STS_CMD(retval) != cmd ||
+ MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+ return -EIO;
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
+ return -(int)MBOX_STS_VALUE(retval);
+ else
+ return MBOX_STS_VALUE(retval);
+}
+
+static const struct attribute *mox_rwtm_attrs[] = {
+ &mox_attr_serial_number.attr,
+ &mox_attr_board_version.attr,
+ &mox_attr_ram_size.attr,
+ &mox_attr_mac_address1.attr,
+ &mox_attr_mac_address2.attr,
+ &mox_attr_pubkey.attr,
+ NULL
+};
+
+static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+{
+ struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+ struct armada_37xx_rwtm_rx_msg *msg = data;
+
+ rwtm->reply = *msg;
+ complete(&rwtm->cmd_done);
+}
+
+static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
+{
+ mac[0] = t1 >> 8;
+ mac[1] = t1;
+ mac[2] = t2 >> 24;
+ mac[3] = t2 >> 16;
+ mac[4] = t2 >> 8;
+ mac[5] = t2;
+}
+
+static int mox_get_board_info(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ int ret;
+
+ msg.command = MBOX_CMD_BOARD_INFO;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ if (ret < 0 && ret != -ENODATA) {
+ return ret;
+ } else if (ret == -ENODATA) {
+ dev_warn(rwtm->dev,
+ "Board does not have manufacturing information burned!\n");
+ } else {
+ rwtm->serial_number = reply->status[1];
+ rwtm->serial_number <<= 32;
+ rwtm->serial_number |= reply->status[0];
+ rwtm->board_version = reply->status[2];
+ rwtm->ram_size = reply->status[3];
+ reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
+ reply->status[5]);
+ reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
+ reply->status[7]);
+ rwtm->has_board_info = 1;
+
+ pr_info("Turris Mox serial number %016llX\n",
+ rwtm->serial_number);
+ pr_info(" board version %i\n", rwtm->board_version);
+ pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
+ }
+
+ msg.command = MBOX_CMD_ECDSA_PUB_KEY;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ if (ret < 0 && ret != -ENODATA) {
+ return ret;
+ } else if (ret == -ENODATA) {
+ dev_warn(rwtm->dev, "Board has no public key burned!\n");
+ } else {
+ u32 *s = reply->status;
+
+ rwtm->has_pubkey = 1;
+ sprintf(rwtm->pubkey,
+ "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
+ ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
+ s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
+ }
+
+ return 0;
+}
+
+static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ if (max > 4096)
+ max = 4096;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = (max + 3) & ~3;
+
+ if (!wait) {
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+ } else {
+ mutex_lock(&rwtm->busy);
+ }
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ memcpy(data, rwtm->buf, max);
+ ret = max;
+
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+static int turris_mox_rwtm_probe(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
+ if (!rwtm)
+ return -ENOMEM;
+
+ rwtm->dev = dev;
+ rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
+ GFP_KERNEL);
+ if (!rwtm->buf)
+ return -ENOMEM;
+
+ ret = mox_kobj_create(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
+ return ret;
+ }
+
+ ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create sysfs files!\n");
+ goto put_kobj;
+ }
+
+ platform_set_drvdata(pdev, rwtm);
+
+ mutex_init(&rwtm->busy);
+
+ rwtm->mbox_client.dev = dev;
+ rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+
+ rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
+ if (IS_ERR(rwtm->mbox)) {
+ ret = PTR_ERR(rwtm->mbox);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot request mailbox channel: %i\n",
+ ret);
+ goto remove_files;
+ }
+
+ init_completion(&rwtm->cmd_done);
+
+ ret = mox_get_board_info(rwtm);
+ if (ret < 0)
+ dev_warn(dev, "Cannot read board information: %i\n", ret);
+
+ rwtm->hwrng.name = DRIVER_NAME "_hwrng";
+ rwtm->hwrng.read = mox_hwrng_read;
+ rwtm->hwrng.priv = (unsigned long) rwtm;
+ rwtm->hwrng.quality = 1024;
+
+ ret = devm_hwrng_register(dev, &rwtm->hwrng);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register HWRNG: %i\n", ret);
+ goto free_channel;
+ }
+
+ return 0;
+
+free_channel:
+ mbox_free_channel(rwtm->mbox);
+remove_files:
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+put_kobj:
+ kobject_put(rwtm_to_kobj(rwtm));
+ return ret;
+}
+
+static int turris_mox_rwtm_remove(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ kobject_put(rwtm_to_kobj(rwtm));
+ mbox_free_channel(rwtm->mbox);
+
+ return 0;
+}
+
+static const struct of_device_id turris_mox_rwtm_match[] = {
+ { .compatible = "cznic,turris-mox-rwtm", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
+
+static struct platform_driver turris_mox_rwtm_driver = {
+ .probe = turris_mox_rwtm_probe,
+ .remove = turris_mox_rwtm_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = turris_mox_rwtm_match,
+ },
+};
+module_platform_driver(turris_mox_rwtm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 474f304ec109..73c779e920ed 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -40,16 +40,17 @@ config ALTERA_PR_IP_CORE_PLAT
config FPGA_MGR_ALTERA_PS_SPI
tristate "Altera FPGA Passive Serial over SPI"
depends on SPI
+ select BITREVERSE
help
FPGA manager driver support for Altera Arria/Cyclone/Stratix
using the passive serial interface over SPI.
config FPGA_MGR_ALTERA_CVP
- tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ tristate "Altera CvP FPGA Manager"
depends on PCI
help
- FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
- and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V,
+ Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe.
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 312b9371742f..4865b74b00a4 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -39,8 +39,9 @@ obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
-dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+dfl-afu-objs += dfl-afu-error.o
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 770915fb97f9..4e0edb60bfba 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,10 +22,10 @@
#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
/* Vendor Specific Extended Capability Registers */
-#define VSE_PCIE_EXT_CAP_ID 0x200
+#define VSE_PCIE_EXT_CAP_ID 0x0
#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
-#define VSE_CVP_STATUS 0x21c /* 32bit */
+#define VSE_CVP_STATUS 0x1c /* 32bit */
#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
@@ -33,41 +33,93 @@
#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
-#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */
+#define VSE_CVP_MODE_CTRL 0x20 /* 32bit */
#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
-#define VSE_CVP_DATA 0x228 /* 32bit */
-#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */
+#define VSE_CVP_DATA 0x28 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x2c /* 32bit */
#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+#define VSE_CVP_PROG_CTRL_MASK GENMASK(1, 0)
-#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */
+#define VSE_UNCOR_ERR_STATUS 0x34 /* 32bit */
#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+#define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */
+/* V2 Defines */
+#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
+
+#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CHECK_CREDIT_US 10
+#define V2_POLL_TIMEOUT_US 1000000
+#define V2_USER_TIMEOUT_US 500000
+
+#define V1_POLL_TIMEOUT_US 10
+
#define DRV_NAME "altera-cvp"
#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+/* Write block sizes */
+#define ALTERA_CVP_V1_SIZE 4
+#define ALTERA_CVP_V2_SIZE 4096
+
/* Optional CvP config error status check for debugging */
static bool altera_cvp_chkcfg;
+struct cvp_priv;
+
struct altera_cvp_conf {
struct fpga_manager *mgr;
struct pci_dev *pci_dev;
void __iomem *map;
- void (*write_data)(struct altera_cvp_conf *, u32);
+ void (*write_data)(struct altera_cvp_conf *conf,
+ u32 data);
char mgr_name[64];
u8 numclks;
+ u32 sent_packets;
+ u32 vsec_offset;
+ const struct cvp_priv *priv;
+};
+
+struct cvp_priv {
+ void (*switch_clk)(struct altera_cvp_conf *conf);
+ int (*clear_state)(struct altera_cvp_conf *conf);
+ int (*wait_credit)(struct fpga_manager *mgr, u32 blocks);
+ size_t block_size;
+ int poll_time_us;
+ int user_time_us;
};
+static int altera_read_config_byte(struct altera_cvp_conf *conf,
+ int where, u8 *val)
+{
+ return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_read_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 *val)
+{
+ return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
+static int altera_write_config_dword(struct altera_cvp_conf *conf,
+ int where, u32 val)
+{
+ return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where,
+ val);
+}
+
static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
{
struct altera_cvp_conf *conf = mgr->priv;
u32 status;
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &status);
if (status & VSE_CVP_STATUS_CFG_DONE)
return FPGA_MGR_STATE_OPERATING;
@@ -85,7 +137,8 @@ static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
{
- pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+ pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA,
+ val);
}
/* switches between CvP clock and internal clock */
@@ -95,10 +148,10 @@ static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
u32 val;
/* set 1 CVP clock cycle for every CVP Data Register Write */
- pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
- pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
for (i = 0; i < CVP_DUMMY_WR; i++)
conf->write_data(conf, 0); /* dummy data, could be any value */
@@ -115,7 +168,7 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
retries++;
do {
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
if ((val & status_mask) == status_val)
return 0;
@@ -126,32 +179,136 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
return -ETIMEDOUT;
}
+static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+ int ret;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
+ if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+/*
+ * CvP Version2 Functions
+ * Recent Intel FPGAs use a credit mechanism to throttle incoming
+ * bitstreams and a different method of clearing the state.
+ */
+
+static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf)
+{
+ u32 val;
+ int ret;
+
+ /* Clear the START_XFER and CVP_CONFIG bits */
+ ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Program Control Register\n");
+ return ret;
+ }
+
+ val &= ~VSE_CVP_PROG_CTRL_MASK;
+ ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error writing CVP Program Control Register\n");
+ return ret;
+ }
+
+ return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
+}
+
+static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr,
+ u32 blocks)
+{
+ u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US;
+ struct altera_cvp_conf *conf = mgr->priv;
+ int ret;
+ u8 val;
+
+ do {
+ ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "Error reading CVP Credit Register\n");
+ return ret;
+ }
+
+ /* Return if there is space in FIFO */
+ if (val - (u8)conf->sent_packets)
+ return 0;
+
+ ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE);
+ if (ret) {
+ dev_err(&conf->pci_dev->dev,
+ "CE Bit error credit reg[0x%x]:sent[0x%x]\n",
+ val, conf->sent_packets);
+ return -EAGAIN;
+ }
+
+ /* Limit the check credit byte traffic */
+ usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1);
+ } while (timeout--);
+
+ dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n");
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_send_block(struct altera_cvp_conf *conf,
+ const u32 *data, size_t len)
+{
+ u32 mask, words = len / sizeof(u32);
+ int i, remainder;
+
+ for (i = 0; i < words; i++)
+ conf->write_data(conf, *data++);
+
+ /* write up to 3 trailing bytes, if any */
+ remainder = len % sizeof(u32);
+ if (remainder) {
+ mask = BIT(remainder * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+ }
+
+ return 0;
+}
+
static int altera_cvp_teardown(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
int ret;
u32 val;
/* STEP 12 - reset START_XFER bit */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
val &= ~VSE_CVP_PROG_CTRL_START_XFER;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/* STEP 13 - reset CVP_CONFIG bit */
val &= ~VSE_CVP_PROG_CTRL_CONFIG;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/*
* STEP 14
* - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
* writes to the HIP
*/
- altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
/* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
- ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
+ conf->priv->poll_time_us);
if (ret)
dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
@@ -163,7 +320,6 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
const char *buf, size_t count)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
u32 iflags, val;
int ret;
@@ -183,7 +339,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
conf->numclks = 1; /* for uncompressed and unencrypted images */
/* STEP 1 - read CVP status and check CVP_EN flag */
- pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+ altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
if (!(val & VSE_CVP_STATUS_CVP_EN)) {
dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
return -ENODEV;
@@ -201,30 +357,42 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
* - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
*/
/* switch from fabric to PMA clock */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/* set CVP mode */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val |= VSE_CVP_MODE_CTRL_CVP_MODE;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/*
* STEP 3
* - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
*/
- altera_cvp_dummy_write(conf);
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (conf->priv->clear_state) {
+ ret = conf->priv->clear_state(conf);
+ if (ret) {
+ dev_err(&mgr->dev, "Problem clearing out state\n");
+ return ret;
+ }
+ }
+
+ conf->sent_packets = 0;
/* STEP 4 - set CVP_CONFIG bit */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
/* request control block to begin transfer using CVP */
val |= VSE_CVP_PROG_CTRL_CONFIG;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
- /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */
ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
- VSE_CVP_STATUS_CFG_RDY, 10);
+ VSE_CVP_STATUS_CFG_RDY,
+ conf->priv->poll_time_us);
if (ret) {
dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
return ret;
@@ -234,33 +402,28 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
* STEP 6
* - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
*/
- altera_cvp_dummy_write(conf);
+ if (conf->priv->switch_clk)
+ conf->priv->switch_clk(conf);
+
+ if (altera_cvp_chkcfg) {
+ ret = altera_cvp_chk_error(mgr, 0);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+ }
/* STEP 7 - set START_XFER */
- pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
val |= VSE_CVP_PROG_CTRL_START_XFER;
- pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
/* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
- val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
- val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
-
- return 0;
-}
-
-static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
-{
- struct altera_cvp_conf *conf = mgr->priv;
- u32 val;
-
- /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
- pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
- if (val & VSE_CVP_STATUS_CFG_ERR) {
- dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
- bytes);
- return -EPROTO;
+ if (conf->priv->switch_clk) {
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
}
return 0;
}
@@ -269,20 +432,32 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
size_t count)
{
struct altera_cvp_conf *conf = mgr->priv;
+ size_t done, remaining, len;
const u32 *data;
- size_t done, remaining;
int status = 0;
- u32 mask;
/* STEP 9 - write 32-bit data from RBF file to CVP data register */
data = (u32 *)buf;
remaining = count;
done = 0;
- while (remaining >= 4) {
- conf->write_data(conf, *data++);
- done += 4;
- remaining -= 4;
+ while (remaining) {
+ /* Use credit throttling if available */
+ if (conf->priv->wait_credit) {
+ status = conf->priv->wait_credit(mgr, done);
+ if (status) {
+ dev_err(&conf->pci_dev->dev,
+ "Wait Credit ERR: 0x%x\n", status);
+ return status;
+ }
+ }
+
+ len = min(conf->priv->block_size, remaining);
+ altera_cvp_send_block(conf, data, len);
+ data += len / sizeof(u32);
+ done += len;
+ remaining -= len;
+ conf->sent_packets++;
/*
* STEP 10 (optional) and STEP 11
@@ -300,11 +475,6 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
}
}
- /* write up to 3 trailing bytes, if any */
- mask = BIT(remaining * 8) - 1;
- if (mask)
- conf->write_data(conf, *data & mask);
-
if (altera_cvp_chkcfg)
status = altera_cvp_chk_error(mgr, count);
@@ -315,31 +485,30 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_cvp_conf *conf = mgr->priv;
- struct pci_dev *pdev = conf->pci_dev;
+ u32 mask, val;
int ret;
- u32 mask;
- u32 val;
ret = altera_cvp_teardown(mgr, info);
if (ret)
return ret;
/* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
- pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+ altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);
if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
return -EPROTO;
}
/* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
- pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
- pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+ altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
/* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
- ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+ ret = altera_cvp_wait_status(conf, mask, mask,
+ conf->priv->user_time_us);
if (ret)
dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
@@ -353,6 +522,21 @@ static const struct fpga_manager_ops altera_cvp_ops = {
.write_complete = altera_cvp_write_complete,
};
+static const struct cvp_priv cvp_priv_v1 = {
+ .switch_clk = altera_cvp_dummy_write,
+ .block_size = ALTERA_CVP_V1_SIZE,
+ .poll_time_us = V1_POLL_TIMEOUT_US,
+ .user_time_us = TIMEOUT_US,
+};
+
+static const struct cvp_priv cvp_priv_v2 = {
+ .clear_state = altera_cvp_v2_clear_state,
+ .wait_credit = altera_cvp_v2_wait_for_credit,
+ .block_size = ALTERA_CVP_V2_SIZE,
+ .poll_time_us = V2_POLL_TIMEOUT_US,
+ .user_time_us = V2_USER_TIMEOUT_US,
+};
+
static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
@@ -394,22 +578,29 @@ static int altera_cvp_probe(struct pci_dev *pdev,
{
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
+ int ret, offset;
u16 cmd, val;
u32 regval;
- int ret;
+
+ /* Discover the Vendor Specific Offset for this device */
+ offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
+ if (!offset) {
+ dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
+ return -ENODEV;
+ }
/*
* First check if this is the expected FPGA device. PCI config
* space access works without enabling the PCI device, memory
* space access is enabled further down.
*/
- pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+ pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
return -ENODEV;
}
- pci_read_config_dword(pdev, VSE_CVP_STATUS, &regval);
+ pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, &regval);
if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
dev_err(&pdev->dev,
"CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
@@ -421,6 +612,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
if (!conf)
return -ENOMEM;
+ conf->vsec_offset = offset;
+
/*
* Enable memory BAR access. We cannot use pci_enable_device() here
* because it will make the driver unusable with FPGA devices that
@@ -445,6 +638,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,
conf->pci_dev = pdev;
conf->write_data = altera_cvp_write_data_iomem;
+ if (conf->vsec_offset == V1_VSEC_OFFSET)
+ conf->priv = &cvp_priv_v1;
+ else
+ conf->priv = &cvp_priv_v2;
+
conf->map = pci_iomap(pdev, CVP_BAR, 0);
if (!conf->map) {
dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index b293d83143f1..99b9cc0e70f0 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -32,7 +32,9 @@ static int alt_pr_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- return alt_pr_unregister(dev);
+ alt_pr_unregister(dev);
+
+ return 0;
}
static const struct of_device_id alt_pr_of_match[] = {
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index a7a3bf0b5202..2cf25fd5e897 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -201,15 +201,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
}
EXPORT_SYMBOL_GPL(alt_pr_register);
-int alt_pr_unregister(struct device *dev)
+void alt_pr_unregister(struct device *dev)
{
struct fpga_manager *mgr = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
fpga_mgr_unregister(mgr);
-
- return 0;
}
EXPORT_SYMBOL_GPL(alt_pr_unregister);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index a13f224303c6..0221dee8dd4c 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
return -EIO;
}
- if (!IS_ERR(conf->confd)) {
+ if (conf->confd) {
if (!gpiod_get_raw_value_cansleep(conf->confd)) {
dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
return -EIO;
@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
return PTR_ERR(conf->status);
}
- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
if (IS_ERR(conf->confd)) {
- dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
- PTR_ERR(conf->confd));
+ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ return PTR_ERR(conf->confd);
+ } else if (!conf->confd) {
+ dev_warn(&spi->dev, "Not using confd gpio");
}
/* Register manager with unique name */
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
new file mode 100644
index 000000000000..c1467ae1a6b6
--- /dev/null
+++ b/drivers/fpga/dfl-afu-error.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@linux.intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+#define PORT_ERROR_MASK 0x8
+#define PORT_ERROR 0x10
+#define PORT_FIRST_ERROR 0x18
+#define PORT_MALFORMED_REQ0 0x20
+#define PORT_MALFORMED_REQ1 0x28
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+/* mask or unmask port errors by the error mask register. */
+static void __afu_port_err_mask(struct device *dev, bool mask)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
+}
+
+static void afu_port_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ mutex_lock(&pdata->lock);
+ __afu_port_err_mask(dev, mask);
+ mutex_unlock(&pdata->lock);
+}
+
+/* clear port errors. */
+static int afu_port_err_clear(struct device *dev, u64 err)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *base_err, *base_hdr;
+ int ret = -EBUSY;
+ u64 v;
+
+ base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+
+ /*
+ * clear Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* if device is still in AP6 power state, can not clear any error. */
+ v = readq(base_hdr + PORT_HDR_STS);
+ if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ goto done;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __afu_port_disable(pdev);
+ if (ret)
+ goto done;
+
+ /* Mask all errors */
+ __afu_port_err_mask(dev, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ v = readq(base_err + PORT_ERROR);
+
+ if (v == err) {
+ writeq(v, base_err + PORT_ERROR);
+
+ v = readq(base_err + PORT_FIRST_ERROR);
+ writeq(v, base_err + PORT_FIRST_ERROR);
+ } else {
+ ret = -EINVAL;
+ }
+
+ /* Clear mask */
+ __afu_port_err_mask(dev, false);
+
+ /* Enable the Port by clear the reset */
+ __afu_port_enable(pdev);
+
+done:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+
+static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ u64 value;
+ int ret;
+
+ if (kstrtou64(buff, 0, &value))
+ return -EINVAL;
+
+ ret = afu_port_err_clear(dev, value);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 error;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ error = readq(base + PORT_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)error);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t first_malformed_req_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 req0, req1;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+
+ mutex_lock(&pdata->lock);
+ req0 = readq(base + PORT_MALFORMED_REQ0);
+ req1 = readq(base + PORT_MALFORMED_REQ1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%016llx%016llx\n",
+ (unsigned long long)req1, (unsigned long long)req0);
+}
+static DEVICE_ATTR_RO(first_malformed_req);
+
+static struct attribute *port_err_attrs[] = {
+ &dev_attr_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_first_malformed_req.attr,
+ NULL,
+};
+
+static umode_t port_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group port_err_group = {
+ .name = "errors",
+ .attrs = port_err_attrs,
+ .is_visible = port_err_attrs_visible,
+};
+
+static int port_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void port_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ afu_port_err_mask(&pdev->dev, true);
+}
+
+const struct dfl_feature_id port_err_id_table[] = {
+ {.id = PORT_FEATURE_ID_ERROR,},
+ {0,}
+};
+
+const struct dfl_feature_ops port_err_ops = {
+ .init = port_err_init,
+ .uinit = port_err_uinit,
+};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 02baa6a227c0..e4a34dc7947f 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -22,14 +22,17 @@
#include "dfl-afu.h"
/**
- * port_enable - enable a port
+ * __afu_port_enable - enable a port by clear reset
* @pdev: port platform device.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
- * port_enable function should only be used after port_disable function.
+ * __afu_port_enable function should only be used after __afu_port_disable
+ * function.
+ *
+ * The caller needs to hold lock for protection.
*/
-static void port_enable(struct platform_device *pdev)
+void __afu_port_enable(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
@@ -52,13 +55,14 @@ static void port_enable(struct platform_device *pdev)
#define RST_POLL_TIMEOUT 1000 /* us */
/**
- * port_disable - disable a port
+ * __afu_port_disable - disable a port by hold reset
* @pdev: port platform device.
*
- * Disable Port by setting the port soft reset bit, it puts the port into
- * reset.
+ * Disable Port by setting the port soft reset bit, it puts the port into reset.
+ *
+ * The caller needs to hold lock for protection.
*/
-static int port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
@@ -104,9 +108,9 @@ static int __port_reset(struct platform_device *pdev)
{
int ret;
- ret = port_disable(pdev);
+ ret = __afu_port_disable(pdev);
if (!ret)
- port_enable(pdev);
+ __afu_port_enable(pdev);
return ret;
}
@@ -141,27 +145,267 @@ id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(id);
-static const struct attribute *port_hdr_attrs[] = {
+static ssize_t
+ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
+}
+
+static ssize_t
+ltr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool ltr;
+ u64 v;
+
+ if (kstrtobool(buf, &ltr))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_LATENCY;
+ v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
+ writeq(v, base + PORT_HDR_CTRL);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ltr);
+
+static ssize_t
+ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
+}
+
+static ssize_t
+ap1_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap1_event);
+
+static ssize_t
+ap2_event_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
+}
+
+static ssize_t
+ap2_event_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ bool clear;
+
+ if (kstrtobool(buf, &clear) || !clear)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ap2_event);
+
+static ssize_t
+power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + PORT_HDR_STS);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
+}
+static DEVICE_ATTR_RO(power_state);
+
+static ssize_t
+userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freq_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freq_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcmd);
+
+static ssize_t
+userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntr_cmd;
+ void __iomem *base;
+
+ if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(userclk_freqcntrcmd);
+
+static ssize_t
+userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
+}
+static DEVICE_ATTR_RO(userclk_freqsts);
+
+static ssize_t
+userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ u64 userclk_freqcntrsts;
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)userclk_freqcntrsts);
+}
+static DEVICE_ATTR_RO(userclk_freqcntrsts);
+
+static struct attribute *port_hdr_attrs[] = {
&dev_attr_id.attr,
+ &dev_attr_ltr.attr,
+ &dev_attr_ap1_event.attr,
+ &dev_attr_ap2_event.attr,
+ &dev_attr_power_state.attr,
+ &dev_attr_userclk_freqcmd.attr,
+ &dev_attr_userclk_freqcntrcmd.attr,
+ &dev_attr_userclk_freqsts.attr,
+ &dev_attr_userclk_freqcntrsts.attr,
NULL,
};
-static int port_hdr_init(struct platform_device *pdev,
- struct dfl_feature *feature)
+static umode_t port_hdr_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- dev_dbg(&pdev->dev, "PORT HDR Init.\n");
+ struct device *dev = kobj_to_dev(kobj);
+ umode_t mode = attr->mode;
+ void __iomem *base;
- port_reset(pdev);
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+
+ if (dfl_feature_revision(base) > 0) {
+ /*
+ * userclk sysfs interfaces are only visible in case port
+ * revision is 0, as hardware with revision >0 doesn't
+ * support this.
+ */
+ if (attr == &dev_attr_userclk_freqcmd.attr ||
+ attr == &dev_attr_userclk_freqcntrcmd.attr ||
+ attr == &dev_attr_userclk_freqsts.attr ||
+ attr == &dev_attr_userclk_freqcntrsts.attr)
+ mode = 0;
+ }
- return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
+ return mode;
}
-static void port_hdr_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static const struct attribute_group port_hdr_group = {
+ .attrs = port_hdr_attrs,
+ .is_visible = port_hdr_attrs_visible,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
{
- dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
+ port_reset(pdev);
- sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
+ return 0;
}
static long
@@ -185,9 +429,13 @@ port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
return ret;
}
+static const struct dfl_feature_id port_hdr_id_table[] = {
+ {.id = PORT_FEATURE_ID_HEADER,},
+ {0,}
+};
+
static const struct dfl_feature_ops port_hdr_ops = {
.init = port_hdr_init,
- .uinit = port_hdr_uinit,
.ioctl = port_hdr_ioctl,
};
@@ -214,52 +462,91 @@ afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(afu_id);
-static const struct attribute *port_afu_attrs[] = {
+static struct attribute *port_afu_attrs[] = {
&dev_attr_afu_id.attr,
NULL
};
+static umode_t port_afu_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group port_afu_group = {
+ .attrs = port_afu_attrs,
+ .is_visible = port_afu_attrs_visible,
+};
+
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
struct resource *res = &pdev->resource[feature->resource_index];
- int ret;
- dev_dbg(&pdev->dev, "PORT AFU Init.\n");
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
+}
- ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
- DFL_PORT_REGION_INDEX_AFU, resource_size(res),
- res->start, DFL_PORT_REGION_READ |
- DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
- if (ret)
- return ret;
+static const struct dfl_feature_id port_afu_id_table[] = {
+ {.id = PORT_FEATURE_ID_AFU,},
+ {0,}
+};
- return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
-}
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+};
-static void port_afu_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static int port_stp_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
{
- dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
+ struct resource *res = &pdev->resource[feature->resource_index];
- sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
+ return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_STP,
+ resource_size(res), res->start,
+ DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE);
}
-static const struct dfl_feature_ops port_afu_ops = {
- .init = port_afu_init,
- .uinit = port_afu_uinit,
+static const struct dfl_feature_id port_stp_id_table[] = {
+ {.id = PORT_FEATURE_ID_STP,},
+ {0,}
+};
+
+static const struct dfl_feature_ops port_stp_ops = {
+ .init = port_stp_init,
};
static struct dfl_feature_driver port_feature_drvs[] = {
{
- .id = PORT_FEATURE_ID_HEADER,
+ .id_table = port_hdr_id_table,
.ops = &port_hdr_ops,
},
{
- .id = PORT_FEATURE_ID_AFU,
+ .id_table = port_afu_id_table,
.ops = &port_afu_ops,
},
{
+ .id_table = port_err_id_table,
+ .ops = &port_err_ops,
+ },
+ {
+ .id_table = port_stp_id_table,
+ .ops = &port_stp_ops,
+ },
+ {
.ops = NULL,
}
};
@@ -545,9 +832,9 @@ static int port_enable_set(struct platform_device *pdev, bool enable)
mutex_lock(&pdata->lock);
if (enable)
- port_enable(pdev);
+ __afu_port_enable(pdev);
else
- ret = port_disable(pdev);
+ ret = __afu_port_disable(pdev);
mutex_unlock(&pdata->lock);
return ret;
@@ -599,9 +886,17 @@ static int afu_remove(struct platform_device *pdev)
return 0;
}
+static const struct attribute_group *afu_dev_groups[] = {
+ &port_hdr_group,
+ &port_afu_group,
+ &port_err_group,
+ NULL
+};
+
static struct platform_driver afu_driver = {
.driver = {
- .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .dev_groups = afu_dev_groups,
},
.probe = afu_probe,
.remove = afu_remove,
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 0c7630ae3cda..576e94960086 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -79,6 +79,10 @@ struct dfl_afu {
struct dfl_feature_platform_data *pdata;
};
+/* hold pdata->lock when call __afu_port_enable/disable */
+void __afu_port_enable(struct platform_device *pdev);
+int __afu_port_disable(struct platform_device *pdev);
+
void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
@@ -97,4 +101,9 @@ int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
struct dfl_afu_dma_region *
afu_dma_region_find(struct dfl_feature_platform_data *pdata,
u64 iova, u64 size);
+
+extern const struct dfl_feature_ops port_err_ops;
+extern const struct dfl_feature_id port_err_id_table[];
+extern const struct attribute_group port_err_group;
+
#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
new file mode 100644
index 000000000000..f897d414b923
--- /dev/null
+++ b/drivers/fpga/dfl-fme-error.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine Error Management
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/uaccess.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+#define FME_ERROR_MASK 0x8
+#define FME_ERROR 0x10
+#define MBP_ERROR BIT_ULL(6)
+#define PCIE0_ERROR_MASK 0x18
+#define PCIE0_ERROR 0x20
+#define PCIE1_ERROR_MASK 0x28
+#define PCIE1_ERROR 0x30
+#define FME_FIRST_ERROR 0x38
+#define FME_NEXT_ERROR 0x40
+#define RAS_NONFAT_ERROR_MASK 0x48
+#define RAS_NONFAT_ERROR 0x50
+#define RAS_CATFAT_ERROR_MASK 0x58
+#define RAS_CATFAT_ERROR 0x60
+#define RAS_ERROR_INJECT 0x68
+#define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
+
+#define ERROR_MASK GENMASK_ULL(63, 0)
+
+static ssize_t pcie0_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE0_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie0_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
+
+ v = readq(base + PCIE0_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE0_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE0_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie0_errors);
+
+static ssize_t pcie1_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + PCIE1_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t pcie1_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ int ret = 0;
+ u64 v, val;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
+
+ v = readq(base + PCIE1_ERROR);
+ if (val == v)
+ writeq(v, base + PCIE1_ERROR);
+ else
+ ret = -EINVAL;
+
+ writeq(0ULL, base + PCIE1_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(pcie1_errors);
+
+static ssize_t nonfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_NONFAT_ERROR));
+}
+static DEVICE_ATTR_RO(nonfatal_errors);
+
+static ssize_t catfatal_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)readq(base + RAS_CATFAT_ERROR));
+}
+static DEVICE_ATTR_RO(catfatal_errors);
+
+static ssize_t inject_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n",
+ (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
+}
+
+static ssize_t inject_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u8 inject_error;
+ u64 v;
+
+ if (kstrtou8(buf, 0, &inject_error))
+ return -EINVAL;
+
+ if (inject_error & ~INJECT_ERROR_MASK)
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ v = readq(base + RAS_ERROR_INJECT);
+ v &= ~INJECT_ERROR_MASK;
+ v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
+ writeq(v, base + RAS_ERROR_INJECT);
+ mutex_unlock(&pdata->lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_errors);
+
+static ssize_t fme_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+
+static ssize_t fme_errors_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 v, val;
+ int ret = 0;
+
+ if (kstrtou64(buf, 0, &val))
+ return -EINVAL;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
+
+ v = readq(base + FME_ERROR);
+ if (val == v)
+ writeq(v, base + FME_ERROR);
+ else
+ ret = -EINVAL;
+
+ /* Workaround: disable MBP_ERROR if feature revision is 0 */
+ writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
+ base + FME_ERROR_MASK);
+ mutex_unlock(&pdata->lock);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(fme_errors);
+
+static ssize_t first_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_FIRST_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(first_error);
+
+static ssize_t next_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 value;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+ value = readq(base + FME_NEXT_ERROR);
+ mutex_unlock(&pdata->lock);
+
+ return sprintf(buf, "0x%llx\n", (unsigned long long)value);
+}
+static DEVICE_ATTR_RO(next_error);
+
+static struct attribute *fme_global_err_attrs[] = {
+ &dev_attr_pcie0_errors.attr,
+ &dev_attr_pcie1_errors.attr,
+ &dev_attr_nonfatal_errors.attr,
+ &dev_attr_catfatal_errors.attr,
+ &dev_attr_inject_errors.attr,
+ &dev_attr_fme_errors.attr,
+ &dev_attr_first_error.attr,
+ &dev_attr_next_error.attr,
+ NULL,
+};
+
+static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ /*
+ * sysfs entries are visible only if related private feature is
+ * enumerated.
+ */
+ if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group fme_global_err_group = {
+ .name = "errors",
+ .attrs = fme_global_err_attrs,
+ .is_visible = fme_global_err_attrs_visible,
+};
+
+static void fme_err_mask(struct device *dev, bool mask)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+
+ mutex_lock(&pdata->lock);
+
+ /* Workaround: keep MBP_ERROR always masked if revision is 0 */
+ if (dfl_feature_revision(base))
+ writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
+ else
+ writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
+
+ writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
+ writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
+
+ mutex_unlock(&pdata->lock);
+}
+
+static int fme_global_err_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, false);
+
+ return 0;
+}
+
+static void fme_global_err_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ fme_err_mask(&pdev->dev, true);
+}
+
+const struct dfl_feature_id fme_global_err_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_ERR,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_global_err_ops = {
+ .init = fme_global_err_init,
+ .uinit = fme_global_err_uinit,
+};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 086ad2420ade..4d78e182878f 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/fpga-dfl.h>
#include "dfl.h"
@@ -72,50 +73,126 @@ static ssize_t bitstream_metadata_show(struct device *dev,
}
static DEVICE_ATTR_RO(bitstream_metadata);
-static const struct attribute *fme_hdr_attrs[] = {
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t fabric_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
+}
+static DEVICE_ATTR_RO(fabric_version);
+
+static ssize_t socket_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
+}
+static DEVICE_ATTR_RO(socket_id);
+
+static struct attribute *fme_hdr_attrs[] = {
&dev_attr_ports_num.attr,
&dev_attr_bitstream_id.attr,
&dev_attr_bitstream_metadata.attr,
+ &dev_attr_cache_size.attr,
+ &dev_attr_fabric_version.attr,
+ &dev_attr_socket_id.attr,
NULL,
};
-static int fme_hdr_init(struct platform_device *pdev,
- struct dfl_feature *feature)
+static const struct attribute_group fme_hdr_group = {
+ .attrs = fme_hdr_attrs,
+};
+
+static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
{
- void __iomem *base = feature->ioaddr;
- int ret;
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
- dev_dbg(&pdev->dev, "FME HDR Init.\n");
- dev_dbg(&pdev->dev, "FME cap %llx.\n",
- (unsigned long long)readq(base + FME_HDR_CAP));
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
- ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
- if (ret)
- return ret;
+ return dfl_fpga_cdev_release_port(cdev, port_id);
+}
- return 0;
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ int port_id;
+
+ if (get_user(port_id, (int __user *)arg))
+ return -EFAULT;
+
+ return dfl_fpga_cdev_assign_port(cdev, port_id);
}
-static void fme_hdr_uinit(struct platform_device *pdev,
- struct dfl_feature *feature)
+static long fme_hdr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
{
- dev_dbg(&pdev->dev, "FME HDR UInit.\n");
- sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_RELEASE:
+ return fme_hdr_ioctl_release_port(pdata, arg);
+ case DFL_FPGA_FME_PORT_ASSIGN:
+ return fme_hdr_ioctl_assign_port(pdata, arg);
+ }
+
+ return -ENODEV;
}
+static const struct dfl_feature_id fme_hdr_id_table[] = {
+ {.id = FME_FEATURE_ID_HEADER,},
+ {0,}
+};
+
static const struct dfl_feature_ops fme_hdr_ops = {
- .init = fme_hdr_init,
- .uinit = fme_hdr_uinit,
+ .ioctl = fme_hdr_ioctl,
};
static struct dfl_feature_driver fme_feature_drvs[] = {
{
- .id = FME_FEATURE_ID_HEADER,
+ .id_table = fme_hdr_id_table,
.ops = &fme_hdr_ops,
},
{
- .id = FME_FEATURE_ID_PR_MGMT,
- .ops = &pr_mgmt_ops,
+ .id_table = fme_pr_mgmt_id_table,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .id_table = fme_global_err_id_table,
+ .ops = &fme_global_err_ops,
},
{
.ops = NULL,
@@ -263,9 +340,16 @@ static int fme_remove(struct platform_device *pdev)
return 0;
}
+static const struct attribute_group *fme_dev_groups[] = {
+ &fme_hdr_group,
+ &fme_global_err_group,
+ NULL
+};
+
static struct platform_driver fme_driver = {
.driver = {
- .name = DFL_FPGA_FEATURE_DEV_FME,
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ .dev_groups = fme_dev_groups,
},
.probe = fme_probe,
.remove = fme_remove,
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index 3c71dc3faaf5..a233a53db708 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -470,7 +470,12 @@ static long fme_pr_ioctl(struct platform_device *pdev,
return ret;
}
-const struct dfl_feature_ops pr_mgmt_ops = {
+const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_PR_MGMT,},
+ {0}
+};
+
+const struct dfl_feature_ops fme_pr_mgmt_ops = {
.init = pr_mgmt_init,
.uinit = pr_mgmt_uinit,
.ioctl = fme_pr_ioctl,
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 5394a216c5c0..6685c8ef965b 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -33,6 +33,10 @@ struct dfl_fme {
struct dfl_feature_platform_data *pdata;
};
-extern const struct dfl_feature_ops pr_mgmt_ops;
+extern const struct dfl_feature_ops fme_pr_mgmt_ops;
+extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
+extern const struct dfl_feature_ops fme_global_err_ops;
+extern const struct dfl_feature_id fme_global_err_id_table[];
+extern const struct attribute_group fme_global_err_group;
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 66b5720582bb..89ca292236ad 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -223,8 +223,43 @@ disable_error_report_exit:
return ret;
}
+static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_cdev *cdev = drvdata->cdev;
+ int ret = 0;
+
+ if (!num_vfs) {
+ /*
+ * disable SRIOV and then put released ports back to default
+ * PF access mode.
+ */
+ pci_disable_sriov(pcidev);
+
+ dfl_fpga_cdev_config_ports_pf(cdev);
+
+ } else {
+ /*
+ * before enable SRIOV, put released ports into VF access mode
+ * first of all.
+ */
+ ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = pci_enable_sriov(pcidev, num_vfs);
+ if (ret)
+ dfl_fpga_cdev_config_ports_pf(cdev);
+ }
+
+ return ret;
+}
+
static void cci_pci_remove(struct pci_dev *pcidev)
{
+ if (dev_is_pf(&pcidev->dev))
+ cci_pci_sriov_configure(pcidev, 0);
+
cci_remove_feature_devs(pcidev);
pci_disable_pcie_error_reporting(pcidev);
}
@@ -234,6 +269,7 @@ static struct pci_driver cci_pci_driver = {
.id_table = cci_pcie_id_tbl,
.probe = cci_pci_probe,
.remove = cci_pci_remove,
+ .sriov_configure = cci_pci_sriov_configure,
};
module_pci_driver(cci_pci_driver);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 4b66aaa32b5a..96a2b8274a33 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -231,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
*/
int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
{
- struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
- int port_id;
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fpga_port_ops *port_ops;
+
+ if (pdata->id != FEATURE_DEV_ID_UNUSED)
+ return pdata->id == *(int *)pport_id;
+ port_ops = dfl_fpga_port_ops_get(pdev);
if (!port_ops || !port_ops->get_id)
return 0;
- port_id = port_ops->get_id(pdev);
+ pdata->id = port_ops->get_id(pdev);
dfl_fpga_port_ops_put(port_ops);
- return port_id == *(int *)pport_id;
+ return pdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -255,7 +259,8 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
dfl_fpga_dev_for_each_feature(pdata, feature)
if (feature->ops) {
- feature->ops->uinit(pdev, feature);
+ if (feature->ops->uinit)
+ feature->ops->uinit(pdev, feature);
feature->ops = NULL;
}
}
@@ -266,17 +271,34 @@ static int dfl_feature_instance_init(struct platform_device *pdev,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
- int ret;
+ int ret = 0;
- ret = drv->ops->init(pdev, feature);
- if (ret)
- return ret;
+ if (drv->ops->init) {
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+ }
feature->ops = drv->ops;
return ret;
}
+static bool dfl_feature_drv_match(struct dfl_feature *feature,
+ struct dfl_feature_driver *driver)
+{
+ const struct dfl_feature_id *ids = driver->id_table;
+
+ if (ids) {
+ while (ids->id) {
+ if (ids->id == feature->id)
+ return true;
+ ids++;
+ }
+ }
+ return false;
+}
+
/**
* dfl_fpga_dev_feature_init - init for sub features of dfl feature device
* @pdev: feature device.
@@ -297,8 +319,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
while (drv->ops) {
dfl_fpga_dev_for_each_feature(pdata, feature) {
- /* match feature and drv using id */
- if (feature->id == drv->id) {
+ if (dfl_feature_drv_match(feature, drv)) {
ret = dfl_feature_instance_init(pdev, pdata,
feature, drv);
if (ret)
@@ -474,6 +495,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
pdata->dev = fdev;
pdata->num = binfo->feature_num;
pdata->dfl_cdev = binfo->cdev;
+ pdata->id = FEATURE_DEV_ID_UNUSED;
mutex_init(&pdata->lock);
lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
@@ -973,25 +995,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
struct dfl_feature_platform_data *pdata, *ptmp;
- remove_feature_devs(cdev);
-
mutex_lock(&cdev->lock);
- if (cdev->fme_dev) {
- /* the fme should be unregistered. */
- WARN_ON(device_is_registered(cdev->fme_dev));
+ if (cdev->fme_dev)
put_device(cdev->fme_dev);
- }
list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
struct platform_device *port_dev = pdata->dev;
- /* the port should be unregistered. */
- WARN_ON(device_is_registered(&port_dev->dev));
+ /* remove released ports */
+ if (!device_is_registered(&port_dev->dev)) {
+ dfl_id_free(feature_dev_id_type(port_dev),
+ port_dev->id);
+ platform_device_put(port_dev);
+ }
+
list_del(&pdata->node);
put_device(&port_dev->dev);
}
mutex_unlock(&cdev->lock);
+ remove_feature_devs(cdev);
+
fpga_region_unregister(cdev->region);
devm_kfree(cdev->parent, cdev);
}
@@ -1042,6 +1066,170 @@ static int __init dfl_fpga_init(void)
return ret;
}
+/**
+ * dfl_fpga_cdev_release_port - release a port platform device
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to release a port platform device. This is a
+ * mandatory step before turn a port from PF into VF for SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (!device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+ if (ret)
+ goto put_dev_exit;
+
+ platform_device_del(port_pdev);
+ cdev->released_port_num++;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
+
+/**
+ * dfl_fpga_cdev_assign_port - assign a port platform device back
+ *
+ * @cdev: parent container device.
+ * @port_id: id of the port platform device.
+ *
+ * This function allows user to assign a port platform device back. This is
+ * a mandatory step after disable SRIOV support.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
+{
+ struct platform_device *port_pdev;
+ int ret = -ENODEV;
+
+ mutex_lock(&cdev->lock);
+ port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ goto unlock_exit;
+
+ if (device_is_registered(&port_pdev->dev)) {
+ ret = -EBUSY;
+ goto put_dev_exit;
+ }
+
+ ret = platform_device_add(port_pdev);
+ if (ret)
+ goto put_dev_exit;
+
+ dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+ cdev->released_port_num--;
+put_dev_exit:
+ put_device(&port_pdev->dev);
+unlock_exit:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
+
+static void config_port_access_mode(struct device *fme_dev, int port_id,
+ bool is_vf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_PORT_OFST(port_id));
+
+ v &= ~FME_PORT_OFST_ACC_CTRL;
+ v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
+ is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
+
+ writeq(v, base + FME_HDR_PORT_OFST(port_id));
+}
+
+#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
+#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
+
+/**
+ * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
+ *
+ * @cdev: parent container device.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the all released ports from VF access mode to PF.
+ */
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_pf_mode(cdev->fme_dev, pdata->id);
+ }
+ mutex_unlock(&cdev->lock);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
+
+/**
+ * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
+ *
+ * @cdev: parent container device.
+ * @num_vfs: VF device number.
+ *
+ * This function is needed in sriov configuration routine. It could be used to
+ * configure the released ports from PF access mode to VF.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
+{
+ struct dfl_feature_platform_data *pdata;
+ int ret = 0;
+
+ mutex_lock(&cdev->lock);
+ /*
+ * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
+ * device, so if released port number doesn't match VF device number,
+ * then reject the request with -EINVAL error code.
+ */
+ if (cdev->released_port_num != num_vfs) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ if (device_is_registered(&pdata->dev->dev))
+ continue;
+
+ config_port_vf_mode(cdev->fme_dev, pdata->id);
+ }
+done:
+ mutex_unlock(&cdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
+
static void __exit dfl_fpga_exit(void)
{
dfl_chardev_uinit();
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index a8b869e9e5b7..9f0e656de720 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -30,8 +30,8 @@
/* plus one for fme device */
#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
-/* Reserved 0x0 for Header Group Register and 0xff for AFU */
-#define FEATURE_ID_FIU_HEADER 0x0
+/* Reserved 0xfe for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0xfe
#define FEATURE_ID_AFU 0xff
#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
@@ -119,6 +119,11 @@
#define PORT_HDR_NEXT_AFU NEXT_AFU
#define PORT_HDR_CAP 0x30
#define PORT_HDR_CTRL 0x38
+#define PORT_HDR_STS 0x40
+#define PORT_HDR_USRCLK_CMD0 0x50
+#define PORT_HDR_USRCLK_CMD1 0x58
+#define PORT_HDR_USRCLK_STS0 0x60
+#define PORT_HDR_USRCLK_STS1 0x68
/* Port Capability Register Bitfield */
#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
@@ -130,6 +135,16 @@
/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
#define PORT_CTRL_LATENCY BIT_ULL(2)
#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+
+/* Port Status Register Bitfield */
+#define PORT_STS_AP2_EVT BIT_ULL(13) /* AP2 event detected */
+#define PORT_STS_AP1_EVT BIT_ULL(12) /* AP1 event detected */
+#define PORT_STS_PWR_STATE GENMASK_ULL(11, 8) /* AFU power states */
+#define PORT_STS_PWR_STATE_NORM 0
+#define PORT_STS_PWR_STATE_AP1 1 /* 50% throttling */
+#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
+#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -154,13 +169,22 @@ void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
/**
- * struct dfl_feature_driver - sub feature's driver
+ * struct dfl_feature_id - dfl private feature id
*
- * @id: sub feature id.
- * @ops: ops of this sub feature.
+ * @id: unique dfl private feature id.
*/
-struct dfl_feature_driver {
+struct dfl_feature_id {
u64 id;
+};
+
+/**
+ * struct dfl_feature_driver - dfl private feature driver
+ *
+ * @id_table: id_table for dfl private features supported by this driver.
+ * @ops: ops of this dfl private feature driver.
+ */
+struct dfl_feature_driver {
+ const struct dfl_feature_id *id_table;
const struct dfl_feature_ops *ops;
};
@@ -183,6 +207,8 @@ struct dfl_feature {
#define DEV_STATUS_IN_USE 0
+#define FEATURE_DEV_ID_UNUSED (-1)
+
/**
* struct dfl_feature_platform_data - platform data for feature devices
*
@@ -191,6 +217,7 @@ struct dfl_feature {
* @cdev: cdev of feature dev.
* @dev: ptr to platform device linked with this platform data.
* @dfl_cdev: ptr to container device.
+ * @id: id used for this feature device.
* @disable_count: count for port disable.
* @num: number for sub features.
* @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
@@ -203,6 +230,7 @@ struct dfl_feature_platform_data {
struct cdev cdev;
struct platform_device *dev;
struct dfl_fpga_cdev *dfl_cdev;
+ int id;
unsigned int disable_count;
unsigned long dev_status;
void *private;
@@ -331,6 +359,11 @@ static inline bool dfl_feature_is_port(void __iomem *base)
(FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
}
+static inline u8 dfl_feature_revision(void __iomem *base)
+{
+ return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH));
+}
+
/**
* struct dfl_fpga_enum_info - DFL FPGA enumeration information
*
@@ -373,6 +406,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
* @fme_dev: FME feature device under this container device.
* @lock: mutex lock to protect the port device list.
* @port_dev_list: list of all port feature devices under this container device.
+ * @released_port_num: released port number under this container device.
*/
struct dfl_fpga_cdev {
struct device *parent;
@@ -380,6 +414,7 @@ struct dfl_fpga_cdev {
struct device *fme_dev;
struct mutex lock;
struct list_head port_dev_list;
+ int released_port_num;
};
struct dfl_fpga_cdev *
@@ -407,4 +442,9 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
return pdev;
}
+
+int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
+int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
+void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
+int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 80bd8f1b2aa6..4bab9028940a 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -19,11 +19,6 @@ static struct class *fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
static spinlock_t bridge_list_lock;
-static int fpga_bridge_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/**
* fpga_bridge_enable - Enable transactions on the bridge
*
@@ -104,8 +99,7 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
{
struct device *dev;
- dev = class_find_device(fpga_bridge_class, NULL, np,
- fpga_bridge_of_node_match);
+ dev = class_find_device_by_of_node(fpga_bridge_class, np);
if (!dev)
return ERR_PTR(-ENODEV);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c3866816456a..e05104f5e40c 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -482,11 +482,6 @@ struct fpga_manager *fpga_mgr_get(struct device *dev)
}
EXPORT_SYMBOL_GPL(fpga_mgr_get);
-static int fpga_mgr_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/**
* of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr.
*
@@ -498,8 +493,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
{
struct device *dev;
- dev = class_find_device(fpga_mgr_class, NULL, node,
- fpga_mgr_of_node_match);
+ dev = class_find_device_by_of_node(fpga_mgr_class, node);
if (!dev)
return ERR_PTR(-ENODEV);
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 343153d47e5b..004dc03ccf09 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -38,8 +38,7 @@
#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
#define SCOM_STATUS_PIB_RESP_SHIFT 12
-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
- SCOM_STATUS_PROTECTION | \
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
SCOM_STATUS_PARITY | \
SCOM_STATUS_PIB_ABORT | \
SCOM_STATUS_PIB_RESP_MASK)
@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
/* Return -EBUSY on PIB abort to force a retry */
if (status & SCOM_STATUS_PIB_ABORT)
return -EBUSY;
- if (status & SCOM_STATUS_ERR_SUMMARY) {
- fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
- sizeof(uint32_t));
- return -EIO;
- }
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index bb13c266c329..38e096e6925f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -275,7 +275,7 @@ config GPIO_ICH
config GPIO_IOP
tristate "Intel IOP GPIO"
- depends on ARCH_IOP32X || ARCH_IOP33X || COMPILE_TEST
+ depends on ARCH_IOP32X || COMPILE_TEST
select GPIO_GENERIC
help
Say yes here to support the GPIO functionality of a number of Intel
@@ -288,7 +288,7 @@ config GPIO_IXP4XX
depends on ARM # For <asm/mach-types.h>
depends on ARCH_IXP4XX
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support the GPIO functionality of a number of Intel
@@ -311,6 +311,13 @@ config GPIO_LPC18XX
Select this option to enable GPIO driver for
NXP LPC18XX/43XX devices.
+config GPIO_LPC32XX
+ tristate "NXP LPC32XX GPIO support"
+ depends on OF_GPIO && (ARCH_LPC32XX || COMPILE_TEST)
+ help
+ Select this option to enable GPIO driver for
+ NXP LPC32XX devices.
+
config GPIO_LYNXPOINT
tristate "Intel Lynxpoint GPIO support"
depends on ACPI && X86
@@ -539,6 +546,7 @@ config GPIO_THUNDERX
tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
depends on PCI_MSI
+ select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
help
@@ -1445,6 +1453,15 @@ config GPIO_XRA1403
help
GPIO driver for EXAR XRA1403 16-bit SPI-based GPIO expander.
+config GPIO_MOXTET
+ tristate "Turris Mox Moxtet bus GPIO expander"
+ depends on MOXTET
+ help
+ Say yes here if you are building for the Turris Mox router.
+ This is the driver needed for configuring the GPIOs via the Moxtet
+ bus. For example the Mox module with SFP cage needs this driver
+ so that phylink can use corresponding GPIOs.
+
endmenu
menu "USB GPIO expanders"
@@ -1465,7 +1482,6 @@ endmenu
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
select IRQ_SIM
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a4e91175c708..d2fd19c15bae 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -67,14 +67,13 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
-obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
-obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
@@ -93,6 +92,7 @@ obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
+obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index c07fad975049..5640efe5e750 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -142,7 +142,7 @@ static const struct gpio_chip template_chip = {
static int arizona_gpio_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
- struct arizona_pdata *pdata = dev_get_platdata(arizona->dev);
+ struct arizona_pdata *pdata = &arizona->pdata;
struct arizona_gpio *arizona_gpio;
int ret;
@@ -177,7 +177,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (pdata && pdata->gpio_base)
+ if (pdata->gpio_base)
arizona_gpio->gpio_chip.base = pdata->gpio_base;
else
arizona_gpio->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 13d80bfbc3b6..09e53c5f3b0a 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -52,6 +52,7 @@ struct aspeed_gpio_config {
*/
struct aspeed_gpio {
struct gpio_chip chip;
+ struct irq_chip irqc;
spinlock_t lock;
void __iomem *base;
int irq;
@@ -661,12 +662,14 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *ic = irq_desc_get_chip(desc);
struct aspeed_gpio *data = gpiochip_get_data(gc);
- unsigned int i, p, girq;
+ unsigned int i, p, girq, banks;
unsigned long reg;
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
chained_irq_enter(ic, desc);
- for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) {
+ banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
+ for (i = 0; i < banks; i++) {
const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
reg = ioread32(bank_reg(data, bank, reg_irq_status));
@@ -681,16 +684,11 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(ic, desc);
}
-static struct irq_chip aspeed_gpio_irqchip = {
- .name = "aspeed-gpio",
- .irq_ack = aspeed_gpio_irq_ack,
- .irq_mask = aspeed_gpio_irq_mask,
- .irq_unmask = aspeed_gpio_irq_unmask,
- .irq_set_type = aspeed_gpio_set_type,
-};
-
-static void set_irq_valid_mask(struct aspeed_gpio *gpio)
+static void aspeed_init_irq_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
{
+ struct aspeed_gpio *gpio = gpiochip_get_data(gc);
const struct aspeed_bank_props *props = gpio->config->props;
while (!is_bank_props_sentinel(props)) {
@@ -701,42 +699,16 @@ static void set_irq_valid_mask(struct aspeed_gpio *gpio)
for_each_clear_bit(offset, &input, 32) {
unsigned int i = props->bank * 32 + offset;
- if (i >= gpio->config->nr_gpios)
+ if (i >= gpio->chip.ngpio)
break;
- clear_bit(i, gpio->chip.irq.valid_mask);
+ clear_bit(i, valid_mask);
}
props++;
}
}
-static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
- struct platform_device *pdev)
-{
- int rc;
-
- rc = platform_get_irq(pdev, 0);
- if (rc < 0)
- return rc;
-
- gpio->irq = rc;
-
- set_irq_valid_mask(gpio);
-
- rc = gpiochip_irqchip_add(&gpio->chip, &aspeed_gpio_irqchip,
- 0, handle_bad_irq, IRQ_TYPE_NONE);
- if (rc) {
- dev_info(&pdev->dev, "Could not add irqchip\n");
- return rc;
- }
-
- gpiochip_set_chained_irqchip(&gpio->chip, &aspeed_gpio_irqchip,
- gpio->irq, aspeed_gpio_irq_handler);
-
- return 0;
-}
-
static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
@@ -1040,10 +1012,10 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
unsigned long flags;
if (!gpio->cf_copro_bankmap)
- gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL);
+ gpio->cf_copro_bankmap = kzalloc(gpio->chip.ngpio >> 3, GFP_KERNEL);
if (!gpio->cf_copro_bankmap)
return -ENOMEM;
- if (offset < 0 || offset > gpio->config->nr_gpios)
+ if (offset < 0 || offset > gpio->chip.ngpio)
return -EINVAL;
bindex = offset >> 3;
@@ -1088,7 +1060,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
if (!gpio->cf_copro_bankmap)
return -ENXIO;
- if (offset < 0 || offset > gpio->config->nr_gpios)
+ if (offset < 0 || offset > gpio->chip.ngpio)
return -EINVAL;
bindex = offset >> 3;
@@ -1141,9 +1113,25 @@ static const struct aspeed_gpio_config ast2500_config =
/* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
{ .nr_gpios = 232, .props = ast2500_bank_props, };
+static const struct aspeed_bank_props ast2600_bank_props[] = {
+ /* input output */
+ {5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */
+ {6, 0xffff0000, 0x0fff0000}, /* Y/Z */
+ { },
+};
+
+static const struct aspeed_gpio_config ast2600_config =
+ /*
+ * ast2600 has two controllers one with 208 GPIOs and one with 36 GPIOs.
+ * We expect ngpio being set in the device tree and this is a fallback
+ * option.
+ */
+ { .nr_gpios = 208, .props = ast2600_bank_props, };
+
static const struct of_device_id aspeed_gpio_of_table[] = {
{ .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
{ .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
+ { .compatible = "aspeed,ast2600-gpio", .data = &ast2600_config, },
{}
};
MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
@@ -1152,7 +1140,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
- int rc, i, banks;
+ int rc, i, banks, err;
+ u32 ngpio;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -1178,7 +1167,10 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->config = gpio_id->data;
gpio->chip.parent = &pdev->dev;
- gpio->chip.ngpio = gpio->config->nr_gpios;
+ err = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpio);
+ gpio->chip.ngpio = (u16) ngpio;
+ if (err)
+ gpio->chip.ngpio = gpio->config->nr_gpios;
gpio->chip.direction_input = aspeed_gpio_dir_in;
gpio->chip.direction_output = aspeed_gpio_dir_out;
gpio->chip.get_direction = aspeed_gpio_get_direction;
@@ -1189,10 +1181,9 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
- gpio->chip.irq.need_valid_mask = true;
/* Allocate a cache of the output registers */
- banks = gpio->config->nr_gpios >> 5;
+ banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
gpio->dcache = devm_kcalloc(&pdev->dev,
banks, sizeof(u32), GFP_KERNEL);
if (!gpio->dcache)
@@ -1212,16 +1203,42 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
}
- rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (rc < 0)
- return rc;
+ /* Optionally set up an irqchip if there is an IRQ */
+ rc = platform_get_irq(pdev, 0);
+ if (rc > 0) {
+ struct gpio_irq_chip *girq;
+
+ gpio->irq = rc;
+ girq = &gpio->chip.irq;
+ girq->chip = &gpio->irqc;
+ girq->chip->name = dev_name(&pdev->dev);
+ girq->chip->irq_ack = aspeed_gpio_irq_ack;
+ girq->chip->irq_mask = aspeed_gpio_irq_mask;
+ girq->chip->irq_unmask = aspeed_gpio_irq_unmask;
+ girq->chip->irq_set_type = aspeed_gpio_set_type;
+ girq->parent_handler = aspeed_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->init_valid_mask = aspeed_init_irq_valid_mask;
+ }
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
if (!gpio->offset_timer)
return -ENOMEM;
- return aspeed_gpio_setup_irqs(gpio, pdev);
+ rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
static struct platform_driver aspeed_gpio_driver = {
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index fd85605d2dab..0c1ead12d883 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -36,7 +36,7 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio,
break;
default:
dev_err(bdgpio->chip.dev,
- "Invalid debouce value %u\n", debounce);
+ "Invalid debounce value %u\n", debounce);
return -EINVAL;
}
return regmap_update_bits(bdgpio->chip.regmap, GPIO_IN_REG(offset),
@@ -153,7 +153,7 @@ static int bd70528_gpio_get_i(struct bd70528_gpio *bdgpio, unsigned int offset)
static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- int ret = -EINVAL;
+ int ret;
struct bd70528_gpio *bdgpio = gpiochip_get_data(chip);
/*
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index af936dcca659..05e3f99ae59c 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -636,10 +636,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "interrupt-controller")) {
priv->parent_irq = platform_get_irq(pdev, 0);
- if (priv->parent_irq <= 0) {
- dev_err(dev, "Couldn't get IRQ");
+ if (priv->parent_irq <= 0)
return -ENOENT;
- }
} else {
priv->parent_irq = -ENOENT;
}
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index 712ae212b0b4..a4d3239d2594 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -214,27 +214,33 @@ static int cdns_gpio_probe(struct platform_device *pdev)
goto err_revert_dir;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- goto err_disable_clk;
- }
-
/*
- * irq_chip support
+ * Optional irq_chip support
*/
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
- ret = gpiochip_irqchip_add(&cgpio->gc, &cdns_gpio_irqchip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "Could not add irqchip, %d\n",
- ret);
+ struct gpio_irq_chip *girq;
+
+ girq = &cgpio->gc.irq;
+ girq->chip = &cdns_gpio_irqchip;
+ girq->parent_handler = cdns_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto err_disable_clk;
}
- gpiochip_set_chained_irqchip(&cgpio->gc, &cdns_gpio_irqchip,
- irq, cdns_gpio_irq_handler);
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ goto err_disable_clk;
}
cgpio->bypass_orig = ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 8cbc94d0d424..ff19a8ad5663 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -137,7 +137,6 @@ static int creg_gpio_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct creg_gpio *hcg;
- struct resource *mem;
u32 ngpios;
int ret;
@@ -145,8 +144,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
if (!hcg)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcg->regs = devm_ioremap_resource(dev, mem);
+ hcg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hcg->regs))
return PTR_ERR(hcg->regs);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 3108be5e208c..92e127e74813 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include "gpiolib.h"
+#include "gpiolib-acpi.h"
#define GPIO_SWPORTA_DR 0x00
#define GPIO_SWPORTA_DDR 0x04
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 7b9ac4a12c20..fe7a73f52329 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -584,10 +584,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->type = pdata->type;
sprd_eic->irq = platform_get_irq(pdev, 0);
- if (sprd_eic->irq < 0) {
- dev_err(&pdev->dev, "Failed to get EIC interrupt.\n");
+ if (sprd_eic->irq < 0)
return sprd_eic->irq;
- }
for (i = 0; i < SPRD_EIC_MAX_BANK; i++) {
/*
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index a87951293aaa..620f25b7efb4 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -272,11 +272,12 @@ static int em_gio_probe(struct platform_device *pdev)
struct resource *io[2], *irq[2];
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
- const char *name = dev_name(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
unsigned int ngpios;
int ret;
- p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
@@ -290,27 +291,27 @@ static int em_gio_probe(struct platform_device *pdev)
irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
- dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
+ dev_err(dev, "missing IRQ or IOMEM\n");
return -EINVAL;
}
- p->base0 = devm_ioremap_nocache(&pdev->dev, io[0]->start,
+ p->base0 = devm_ioremap_nocache(dev, io[0]->start,
resource_size(io[0]));
if (!p->base0)
return -ENOMEM;
- p->base1 = devm_ioremap_nocache(&pdev->dev, io[1]->start,
+ p->base1 = devm_ioremap_nocache(dev, io[1]->start,
resource_size(io[1]));
if (!p->base1)
return -ENOMEM;
- if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
- dev_err(&pdev->dev, "Missing ngpios OF property\n");
+ if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) {
+ dev_err(dev, "Missing ngpios OF property\n");
return -EINVAL;
}
gpio_chip = &p->gpio_chip;
- gpio_chip->of_node = pdev->dev.of_node;
+ gpio_chip->of_node = dev->of_node;
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
@@ -319,7 +320,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->request = em_gio_request;
gpio_chip->free = em_gio_free;
gpio_chip->label = name;
- gpio_chip->parent = &pdev->dev;
+ gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
gpio_chip->base = -1;
gpio_chip->ngpio = ngpios;
@@ -333,33 +334,33 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
+ p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
- dev_err(&pdev->dev, "cannot initialize irq domain\n");
+ dev_err(dev, "cannot initialize irq domain\n");
return -ENXIO;
}
- ret = devm_add_action_or_reset(&pdev->dev, em_gio_irq_domain_remove,
+ ret = devm_add_action_or_reset(dev, em_gio_irq_domain_remove,
p->irq_domain);
if (ret)
return ret;
- if (devm_request_irq(&pdev->dev, irq[0]->start,
+ if (devm_request_irq(dev, irq[0]->start,
em_gio_irq_handler, 0, name, p)) {
- dev_err(&pdev->dev, "failed to request low IRQ\n");
+ dev_err(dev, "failed to request low IRQ\n");
return -ENOENT;
}
- if (devm_request_irq(&pdev->dev, irq[1]->start,
+ if (devm_request_irq(dev, irq[1]->start,
em_gio_irq_handler, 0, name, p)) {
- dev_err(&pdev->dev, "failed to request high IRQ\n");
+ dev_err(dev, "failed to request high IRQ\n");
return -ENOENT;
}
- ret = devm_gpiochip_add_data(&pdev->dev, gpio_chip, p);
+ ret = devm_gpiochip_add_data(dev, gpio_chip, p);
if (ret) {
- dev_err(&pdev->dev, "failed to add GPIO controller\n");
+ dev_err(dev, "failed to add GPIO controller\n");
return ret;
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index a90870a60c15..226da8df6f10 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -269,56 +269,6 @@ static struct irq_chip ep93xx_gpio_irq_chip = {
.irq_set_type = ep93xx_gpio_irq_type,
};
-static int ep93xx_gpio_init_irq(struct platform_device *pdev,
- struct ep93xx_gpio *epg)
-{
- int ab_parent_irq = platform_get_irq(pdev, 0);
- struct device *dev = &pdev->dev;
- int gpio_irq;
- int ret;
- int i;
-
- /* The A bank */
- ret = gpiochip_irqchip_add(&epg->gc[0], &ep93xx_gpio_irq_chip,
- 64, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "Could not add irqchip 0\n");
- return ret;
- }
- gpiochip_set_chained_irqchip(&epg->gc[0], &ep93xx_gpio_irq_chip,
- ab_parent_irq,
- ep93xx_gpio_ab_irq_handler);
-
- /* The B bank */
- ret = gpiochip_irqchip_add(&epg->gc[1], &ep93xx_gpio_irq_chip,
- 72, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "Could not add irqchip 1\n");
- return ret;
- }
- gpiochip_set_chained_irqchip(&epg->gc[1], &ep93xx_gpio_irq_chip,
- ab_parent_irq,
- ep93xx_gpio_ab_irq_handler);
-
- /* The F bank */
- for (i = 0; i < 8; i++) {
- gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
- irq_set_chip_data(gpio_irq, &epg->gc[5]);
- irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
- handle_level_irq);
- irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
- }
-
- for (i = 1; i <= 8; i++)
- irq_set_chained_handler_and_data(platform_get_irq(pdev, i),
- ep93xx_gpio_f_irq_handler,
- &epg->gc[i]);
- return 0;
-}
-
-
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
@@ -328,26 +278,33 @@ struct ep93xx_gpio_bank {
int dir;
int base;
bool has_irq;
+ bool has_hierarchical_irq;
+ unsigned int irq_base;
};
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
.base = _base, \
.has_irq = _has_irq, \
+ .has_hierarchical_irq = _has_hier, \
+ .irq_base = _irq_base, \
}
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true), /* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true), /* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false),
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true), /* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
+ /* Bank A has 8 IRQs */
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
+ /* Bank B has 8 IRQs */
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
+ /* Bank F has 8 IRQs */
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
};
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@@ -369,12 +326,15 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
return EP93XX_GPIO_F_IRQ_BASE + offset;
}
-static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
+static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
+ struct platform_device *pdev,
struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank)
{
void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir;
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
int err;
err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0);
@@ -384,8 +344,59 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->label = bank->label;
gc->base = bank->base;
- if (bank->has_irq)
+ girq = &gc->irq;
+ if (bank->has_irq || bank->has_hierarchical_irq) {
gc->set_config = ep93xx_gpio_set_config;
+ girq->chip = &ep93xx_gpio_irq_chip;
+ }
+
+ if (bank->has_irq) {
+ int ab_parent_irq = platform_get_irq(pdev, 0);
+
+ girq->parent_handler = ep93xx_gpio_ab_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->parents[0] = ab_parent_irq;
+ girq->first = bank->irq_base;
+ }
+
+ /* Only bank F has especially funky IRQ handling */
+ if (bank->has_hierarchical_irq) {
+ int gpio_irq;
+ int i;
+
+ /*
+ * FIXME: convert this to use hierarchical IRQ support!
+ * this requires fixing the root irqchip to be hierarchial.
+ */
+ girq->parent_handler = ep93xx_gpio_f_irq_handler;
+ girq->num_parents = 8;
+ girq->parents = devm_kcalloc(dev, 8,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ /* Pick resources 1..8 for these IRQs */
+ for (i = 1; i <= 8; i++)
+ girq->parents[i - 1] = platform_get_irq(pdev, i);
+ for (i = 0; i < 8; i++) {
+ gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
+ irq_set_chip_data(gpio_irq, &epg->gc[5]);
+ irq_set_chip_and_handler(gpio_irq,
+ &ep93xx_gpio_irq_chip,
+ handle_level_irq);
+ irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ gc->to_irq = ep93xx_gpio_f_to_irq;
+ }
return devm_gpiochip_add_data(dev, gc, epg);
}
@@ -407,16 +418,11 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
struct gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
- if (ep93xx_gpio_add_bank(gc, &pdev->dev, epg, bank))
+ if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
bank->label);
- /* Only bank F has especially funky IRQ handling */
- if (i == 5)
- gc->to_irq = ep93xx_gpio_f_to_irq;
}
- ep93xx_gpio_init_irq(pdev, epg);
-
return 0;
}
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 250e71f3e688..fbddb1662428 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -290,16 +290,14 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto dis_clk;
+ }
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parents[0] = irq;
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
- if (ret)
- goto dis_clk;
-
/* Disable, unmask and clear all interrupts */
writel(0x0, g->base + GPIO_INT_EN);
writel(0x0, g->base + GPIO_INT_MASK);
@@ -308,6 +306,10 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret)
+ goto dis_clk;
+
platform_set_drvdata(pdev, g);
dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 0937b605e134..08234e64993a 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -408,8 +408,6 @@ static int grgpio_probe(struct platform_device *ofdev)
* Continue without irq functionality for that
* gpio line
*/
- dev_err(priv->dev,
- "Failed to get irq for offset %d\n", i);
continue;
}
priv->uirqs[lirq->index].uirq = ret;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index e5fa00f8145f..4a17599f6d44 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -244,43 +244,45 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
ngpios = 32;
hlwd->gpioc.ngpio = ngpios;
- res = devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
- if (res)
- return res;
-
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
iowrite32be(0xffffffff, hlwd->regs + HW_GPIOB_INTFLAG);
/*
* If this GPIO controller is not marked as an interrupt controller in
- * the DT, return.
+ * the DT, skip interrupt support.
*/
- if (!of_property_read_bool(pdev->dev.of_node, "interrupt-controller"))
- return 0;
-
- hlwd->irq = platform_get_irq(pdev, 0);
- if (hlwd->irq < 0) {
- dev_info(&pdev->dev, "platform_get_irq returned %d\n",
- hlwd->irq);
- return hlwd->irq;
+ if (of_property_read_bool(pdev->dev.of_node, "interrupt-controller")) {
+ struct gpio_irq_chip *girq;
+
+ hlwd->irq = platform_get_irq(pdev, 0);
+ if (hlwd->irq < 0) {
+ dev_info(&pdev->dev, "platform_get_irq returned %d\n",
+ hlwd->irq);
+ return hlwd->irq;
+ }
+
+ hlwd->irqc.name = dev_name(&pdev->dev);
+ hlwd->irqc.irq_mask = hlwd_gpio_irq_mask;
+ hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask;
+ hlwd->irqc.irq_enable = hlwd_gpio_irq_enable;
+ hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type;
+
+ girq = &hlwd->gpioc.irq;
+ girq->chip = &hlwd->irqc;
+ girq->parent_handler = hlwd_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = hlwd->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
}
- hlwd->irqc.name = dev_name(&pdev->dev);
- hlwd->irqc.irq_mask = hlwd_gpio_irq_mask;
- hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask;
- hlwd->irqc.irq_enable = hlwd_gpio_irq_enable;
- hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type;
-
- res = gpiochip_irqchip_add(&hlwd->gpioc, &hlwd->irqc, 0,
- handle_level_irq, IRQ_TYPE_NONE);
- if (res)
- return res;
-
- gpiochip_set_chained_irqchip(&hlwd->gpioc, &hlwd->irqc,
- hlwd->irq, hlwd_gpio_irqhandler);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 9d3ac51a765c..6eb56f7ab9c9 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -118,20 +118,6 @@ static void egpio_handler(struct irq_desc *desc)
}
}
-int htc_egpio_get_wakeup_irq(struct device *dev)
-{
- struct egpio_info *ei = dev_get_drvdata(dev);
-
- /* Read current pins. */
- u16 readval = egpio_readw(ei, ei->ack_register);
- /* Ack/unmask interrupts. */
- ack_irqs(ei);
- /* Return first set pin. */
- readval &= ei->irqs_enabled;
- return ei->irq_start + ffs(readval) - 1;
-}
-EXPORT_SYMBOL(htc_egpio_get_wakeup_irq);
-
static inline int egpio_pos(struct egpio_info *ei, int bit)
{
return bit >> ei->reg_shift;
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 4e803baf980e..4d835f9089df 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -329,6 +329,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
u32 gpio_base;
u32 irq_base;
int retval;
+ struct gpio_irq_chip *girq;
struct intel_mid_gpio_ddata *ddata =
(struct intel_mid_gpio_ddata *)id->driver_data;
@@ -369,6 +370,22 @@ static int intel_gpio_probe(struct pci_dev *pdev,
spin_lock_init(&priv->lock);
+ girq = &priv->chip.irq;
+ girq->chip = &intel_mid_irqchip;
+ girq->parent_handler = intel_mid_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
+ intel_mid_irq_init_hw(priv);
+
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
@@ -376,24 +393,6 @@ static int intel_gpio_probe(struct pci_dev *pdev,
return retval;
}
- retval = gpiochip_irqchip_add(&priv->chip,
- &intel_mid_irqchip,
- irq_base,
- handle_simple_irq,
- IRQ_TYPE_NONE);
- if (retval) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- return retval;
- }
-
- intel_mid_irq_init_hw(priv);
-
- gpiochip_set_chained_irqchip(&priv->chip,
- &intel_mid_irqchip,
- pdev->irq,
- intel_mid_irq_handler);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 670c2a85a35b..b3b050604e0b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -47,7 +47,6 @@
* @dev: containing device for this instance
* @fwnode: the fwnode for this GPIO chip
* @gc: gpiochip for this instance
- * @domain: irqdomain for this chip instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
@@ -56,48 +55,22 @@ struct ixp4xx_gpio {
struct device *dev;
struct fwnode_handle *fwnode;
struct gpio_chip gc;
- struct irq_domain *domain;
void __iomem *base;
unsigned long long irq_edge;
};
-/**
- * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
- * @gpio_offset: offset of the IXP4 GPIO line
- * @parent_hwirq: hwirq on the parent IRQ controller
- */
-struct ixp4xx_gpio_map {
- int gpio_offset;
- int parent_hwirq;
-};
-
-/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
-const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
- { .gpio_offset = 0, .parent_hwirq = 6 },
- { .gpio_offset = 1, .parent_hwirq = 7 },
- { .gpio_offset = 2, .parent_hwirq = 19 },
- { .gpio_offset = 3, .parent_hwirq = 20 },
- { .gpio_offset = 4, .parent_hwirq = 21 },
- { .gpio_offset = 5, .parent_hwirq = 22 },
- { .gpio_offset = 6, .parent_hwirq = 23 },
- { .gpio_offset = 7, .parent_hwirq = 24 },
- { .gpio_offset = 8, .parent_hwirq = 25 },
- { .gpio_offset = 9, .parent_hwirq = 26 },
- { .gpio_offset = 10, .parent_hwirq = 27 },
- { .gpio_offset = 11, .parent_hwirq = 28 },
- { .gpio_offset = 12, .parent_hwirq = 29 },
-};
-
static void ixp4xx_gpio_irq_ack(struct irq_data *d)
{
- struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
__raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
}
static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
{
- struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
/* ACK when unmasking if not edge-triggered */
if (!(g->irq_edge & BIT(d->hwirq)))
@@ -108,7 +81,8 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
int line = d->hwirq;
unsigned long flags;
u32 int_style;
@@ -187,122 +161,31 @@ static struct irq_chip ixp4xx_gpio_irqchip = {
.irq_set_type = ixp4xx_gpio_irq_set_type,
};
-static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
{
- struct ixp4xx_gpio *g = gpiochip_get_data(gc);
- struct irq_fwspec fwspec;
-
- fwspec.fwnode = g->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = offset;
- fwspec.param[1] = IRQ_TYPE_NONE;
-
- return irq_create_fwspec_mapping(&fwspec);
-}
+ /* All these interrupts are level high in the CPU */
+ *parent_type = IRQ_TYPE_LEVEL_HIGH;
-static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq,
- unsigned int *type)
-{
- int ret;
-
- /* We support standard DT translation */
- if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- return irq_domain_translate_twocell(domain, fwspec,
- hwirq, type);
+ /* GPIO lines 0..12 have dedicated IRQs */
+ if (child == 0) {
+ *parent = 6;
+ return 0;
}
-
- /* This goes away when we transition to DT */
- if (is_fwnode_irqchip(fwspec->fwnode)) {
- ret = irq_domain_translate_twocell(domain, fwspec,
- hwirq, type);
- if (ret)
- return ret;
- WARN_ON(*type == IRQ_TYPE_NONE);
+ if (child == 1) {
+ *parent = 7;
return 0;
}
- return -EINVAL;
-}
-
-static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
- unsigned int irq, unsigned int nr_irqs,
- void *data)
-{
- struct ixp4xx_gpio *g = d->host_data;
- irq_hw_number_t hwirq;
- unsigned int type = IRQ_TYPE_NONE;
- struct irq_fwspec *fwspec = data;
- int ret;
- int i;
-
- ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
- irq, irq + nr_irqs - 1,
- hwirq, hwirq + nr_irqs - 1);
-
- for (i = 0; i < nr_irqs; i++) {
- struct irq_fwspec parent_fwspec;
- const struct ixp4xx_gpio_map *map;
- int j;
-
- /* Not all lines support IRQs */
- for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
- map = &ixp4xx_gpiomap[j];
- if (map->gpio_offset == hwirq)
- break;
- }
- if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
- dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
- return -EINVAL;
- }
- dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
-
- /*
- * We set handle_bad_irq because the .set_type() should
- * always be invoked and set the right type of handler.
- */
- irq_domain_set_info(d,
- irq + i,
- hwirq + i,
- &ixp4xx_gpio_irqchip,
- g,
- handle_bad_irq,
- NULL, NULL);
- irq_set_probe(irq + i);
-
- /*
- * Create a IRQ fwspec to send up to the parent irqdomain:
- * specify the hwirq we address on the parent and tie it
- * all together up the chain.
- */
- parent_fwspec.fwnode = d->parent->fwnode;
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = map->parent_hwirq;
- /* This parent only handles asserted level IRQs */
- parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
- dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
- irq + i, map->parent_hwirq);
- ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
- &parent_fwspec);
- if (ret)
- dev_err(g->dev,
- "failed to allocate parent hwirq %d for hwirq %lu\n",
- map->parent_hwirq, hwirq);
+ if (child >= 2 && child <= 12) {
+ *parent = child + 17;
+ return 0;
}
-
- return 0;
+ return -EINVAL;
}
-static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
- .translate = ixp4xx_gpio_irq_domain_translate,
- .alloc = ixp4xx_gpio_irq_domain_alloc,
- .free = irq_domain_free_irqs_common,
-};
-
static int ixp4xx_gpio_probe(struct platform_device *pdev)
{
unsigned long flags;
@@ -311,8 +194,8 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent;
struct resource *res;
struct ixp4xx_gpio *g;
+ struct gpio_irq_chip *girq;
int ret;
- int i;
g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
if (!g)
@@ -321,9 +204,36 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
g->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(g->base)) {
- dev_err(dev, "ioremap error\n");
+ if (IS_ERR(g->base))
return PTR_ERR(g->base);
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(&res->start);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
}
/*
@@ -360,7 +270,6 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- g->gc.to_irq = ixp4xx_gpio_to_irq;
g->gc.ngpio = 16;
g->gc.label = "IXP4XX_GPIO_CHIP";
/*
@@ -372,86 +281,22 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->gc.parent = &pdev->dev;
g->gc.owner = THIS_MODULE;
+ girq = &g->gc.irq;
+ girq->chip = &ixp4xx_gpio_irqchip;
+ girq->fwnode = g->fwnode;
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
ret = devm_gpiochip_add_data(dev, &g->gc, g);
if (ret) {
dev_err(dev, "failed to add SoC gpiochip\n");
return ret;
}
- /*
- * When we convert to device tree we will simply look up the
- * parent irqdomain using irq_find_host(parent) as parent comes
- * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
- * the fwnode. For now we need this boardfile style code.
- */
- if (np) {
- struct device_node *irq_parent;
-
- irq_parent = of_irq_find_parent(np);
- if (!irq_parent) {
- dev_err(dev, "no IRQ parent node\n");
- return -ENODEV;
- }
- parent = irq_find_host(irq_parent);
- if (!parent) {
- dev_err(dev, "no IRQ parent domain\n");
- return -ENODEV;
- }
- g->fwnode = of_node_to_fwnode(np);
- } else {
- parent = ixp4xx_get_irq_domain();
- g->fwnode = irq_domain_alloc_fwnode(g->base);
- if (!g->fwnode) {
- dev_err(dev, "no domain base\n");
- return -ENODEV;
- }
- }
- g->domain = irq_domain_create_hierarchy(parent,
- IRQ_DOMAIN_FLAG_HIERARCHY,
- ARRAY_SIZE(ixp4xx_gpiomap),
- g->fwnode,
- &ixp4xx_gpio_irqdomain_ops,
- g);
- if (!g->domain) {
- irq_domain_free_fwnode(g->fwnode);
- dev_err(dev, "no hierarchical irq domain\n");
- return ret;
- }
-
- /*
- * After adding OF support, this is no longer needed: irqs
- * will be allocated for the respective fwnodes.
- */
- if (!np) {
- for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
- const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
- struct irq_fwspec fwspec;
-
- fwspec.fwnode = g->fwnode;
- /* This is the hwirq for the GPIO line side of things */
- fwspec.param[0] = map->gpio_offset;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
- fwspec.param_count = 2;
- ret = __irq_domain_alloc_irqs(g->domain,
- -1, /* just pick something */
- 1,
- NUMA_NO_NODE,
- &fwspec,
- false,
- NULL);
- if (ret < 0) {
- irq_domain_free_fwnode(g->fwnode);
- dev_err(dev,
- "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
- map->gpio_offset, map->parent_hwirq,
- ret);
- return ret;
- }
- }
- }
-
platform_set_drvdata(pdev, g);
- dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+ dev_info(dev, "IXP4 GPIO registered\n");
return 0;
}
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
deleted file mode 100644
index a0f87c124894..000000000000
--- a/drivers/gpio/gpio-ks8695.c
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-ks8695/gpio.c
- *
- * Copyright (C) 2006 Andrew Victor
- * Updated to GPIOLIB, Copyright 2008 Simtec Electronics
- * Daniel Silverstone <dsilvers@simtec.co.uk>
- */
-#include <linux/gpio/driver.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/mach/irq.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/gpio-ks8695.h>
-
-/*
- * Configure a GPIO line for either GPIO function, or its internal
- * function (Interrupt, Timer, etc).
- */
-static void ks8695_gpio_mode(unsigned int pin, short gpio)
-{
- unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
- unsigned long x, flags;
-
- if (pin > KS8695_GPIO_5) /* only GPIO 0..5 have internal functions */
- return;
-
- local_irq_save(flags);
-
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
- if (gpio) /* GPIO: set bit to 0 */
- x &= ~enable[pin];
- else /* Internal function: set bit to 1 */
- x |= enable[pin];
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPC);
-
- local_irq_restore(flags);
-}
-
-
-static unsigned short gpio_irq[] = { KS8695_IRQ_EXTERN0, KS8695_IRQ_EXTERN1, KS8695_IRQ_EXTERN2, KS8695_IRQ_EXTERN3 };
-
-/*
- * Configure GPIO pin as external interrupt source.
- */
-int ks8695_gpio_interrupt(unsigned int pin, unsigned int type)
-{
- unsigned long x, flags;
-
- if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
- return -EINVAL;
-
- local_irq_save(flags);
-
- /* set pin as input */
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
- x &= ~IOPM(pin);
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
- local_irq_restore(flags);
-
- /* Set IRQ triggering type */
- irq_set_irq_type(gpio_irq[pin], type);
-
- /* enable interrupt mode */
- ks8695_gpio_mode(pin, 0);
-
- return 0;
-}
-EXPORT_SYMBOL(ks8695_gpio_interrupt);
-
-
-
-/* .... Generic GPIO interface .............................................. */
-
-/*
- * Configure the GPIO line as an input.
- */
-static int ks8695_gpio_direction_input(struct gpio_chip *gc, unsigned int pin)
-{
- unsigned long x, flags;
-
- if (pin > KS8695_GPIO_15)
- return -EINVAL;
-
- /* set pin to GPIO mode */
- ks8695_gpio_mode(pin, 1);
-
- local_irq_save(flags);
-
- /* set pin as input */
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
- x &= ~IOPM(pin);
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-/*
- * Configure the GPIO line as an output, with default state.
- */
-static int ks8695_gpio_direction_output(struct gpio_chip *gc,
- unsigned int pin, int state)
-{
- unsigned long x, flags;
-
- if (pin > KS8695_GPIO_15)
- return -EINVAL;
-
- /* set pin to GPIO mode */
- ks8695_gpio_mode(pin, 1);
-
- local_irq_save(flags);
-
- /* set line state */
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
- if (state)
- x |= IOPD(pin);
- else
- x &= ~IOPD(pin);
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
-
- /* set pin as output */
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
- x |= IOPM(pin);
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-/*
- * Set the state of an output GPIO line.
- */
-static void ks8695_gpio_set_value(struct gpio_chip *gc,
- unsigned int pin, int state)
-{
- unsigned long x, flags;
-
- if (pin > KS8695_GPIO_15)
- return;
-
- local_irq_save(flags);
-
- /* set output line state */
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
- if (state)
- x |= IOPD(pin);
- else
- x &= ~IOPD(pin);
- __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
-
- local_irq_restore(flags);
-}
-
-
-/*
- * Read the state of a GPIO line.
- */
-static int ks8695_gpio_get_value(struct gpio_chip *gc, unsigned int pin)
-{
- unsigned long x;
-
- if (pin > KS8695_GPIO_15)
- return -EINVAL;
-
- x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
- return (x & IOPD(pin)) != 0;
-}
-
-
-/*
- * Map GPIO line to IRQ number.
- */
-static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
-{
- if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
- return -EINVAL;
-
- return gpio_irq[pin];
-}
-
-/* GPIOLIB interface */
-
-static struct gpio_chip ks8695_gpio_chip = {
- .label = "KS8695",
- .direction_input = ks8695_gpio_direction_input,
- .direction_output = ks8695_gpio_direction_output,
- .get = ks8695_gpio_get_value,
- .set = ks8695_gpio_set_value,
- .to_irq = ks8695_gpio_to_irq,
- .base = 0,
- .ngpio = 16,
- .can_sleep = false,
-};
-
-/* Register the GPIOs */
-void ks8695_register_gpios(void)
-{
- if (gpiochip_add_data(&ks8695_gpio_chip, NULL))
- printk(KERN_ERR "Unable to register core GPIOs\n");
-}
-
-/* .... Debug interface ..................................................... */
-
-#ifdef CONFIG_DEBUG_FS
-
-static int ks8695_gpio_show(struct seq_file *s, void *unused)
-{
- unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
- unsigned int intmask[] = { IOPC_IOEINT0TM, IOPC_IOEINT1TM, IOPC_IOEINT2TM, IOPC_IOEINT3TM };
- unsigned long mode, ctrl, data;
- int i;
-
- mode = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
- ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
- data = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
-
- seq_printf(s, "Pin\tI/O\tFunction\tState\n\n");
-
- for (i = KS8695_GPIO_0; i <= KS8695_GPIO_15 ; i++) {
- seq_printf(s, "%i:\t", i);
-
- seq_printf(s, "%s\t", (mode & IOPM(i)) ? "Output" : "Input");
-
- if (i <= KS8695_GPIO_3) {
- if (ctrl & enable[i]) {
- seq_printf(s, "EXT%i ", i);
-
- switch ((ctrl & intmask[i]) >> (4 * i)) {
- case IOPC_TM_LOW:
- seq_printf(s, "(Low)"); break;
- case IOPC_TM_HIGH:
- seq_printf(s, "(High)"); break;
- case IOPC_TM_RISING:
- seq_printf(s, "(Rising)"); break;
- case IOPC_TM_FALLING:
- seq_printf(s, "(Falling)"); break;
- case IOPC_TM_EDGE:
- seq_printf(s, "(Edges)"); break;
- }
- } else
- seq_printf(s, "GPIO\t");
- } else if (i <= KS8695_GPIO_5) {
- if (ctrl & enable[i])
- seq_printf(s, "TOUT%i\t", i - KS8695_GPIO_4);
- else
- seq_printf(s, "GPIO\t");
- } else {
- seq_printf(s, "GPIO\t");
- }
-
- seq_printf(s, "\t");
-
- seq_printf(s, "%i\n", (data & IOPD(i)) ? 1 : 0);
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ks8695_gpio);
-
-static int __init ks8695_gpio_debugfs_init(void)
-{
- /* /sys/kernel/debug/ks8695_gpio */
- debugfs_create_file("ks8695_gpio", S_IFREG | S_IRUGO, NULL, NULL,
- &ks8695_gpio_fops);
- return 0;
-}
-postcore_initcall(ks8695_gpio_debugfs_init);
-
-#endif
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 24885b3db3d5..4e626c4235c2 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -16,36 +16,33 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
-#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
-#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
-#define LPC32XX_GPIO_P3_OUTP_CLR _GPREG(0x008)
-#define LPC32XX_GPIO_P3_OUTP_STATE _GPREG(0x00C)
-#define LPC32XX_GPIO_P2_DIR_SET _GPREG(0x010)
-#define LPC32XX_GPIO_P2_DIR_CLR _GPREG(0x014)
-#define LPC32XX_GPIO_P2_DIR_STATE _GPREG(0x018)
-#define LPC32XX_GPIO_P2_INP_STATE _GPREG(0x01C)
-#define LPC32XX_GPIO_P2_OUTP_SET _GPREG(0x020)
-#define LPC32XX_GPIO_P2_OUTP_CLR _GPREG(0x024)
-#define LPC32XX_GPIO_P2_MUX_SET _GPREG(0x028)
-#define LPC32XX_GPIO_P2_MUX_CLR _GPREG(0x02C)
-#define LPC32XX_GPIO_P2_MUX_STATE _GPREG(0x030)
-#define LPC32XX_GPIO_P0_INP_STATE _GPREG(0x040)
-#define LPC32XX_GPIO_P0_OUTP_SET _GPREG(0x044)
-#define LPC32XX_GPIO_P0_OUTP_CLR _GPREG(0x048)
-#define LPC32XX_GPIO_P0_OUTP_STATE _GPREG(0x04C)
-#define LPC32XX_GPIO_P0_DIR_SET _GPREG(0x050)
-#define LPC32XX_GPIO_P0_DIR_CLR _GPREG(0x054)
-#define LPC32XX_GPIO_P0_DIR_STATE _GPREG(0x058)
-#define LPC32XX_GPIO_P1_INP_STATE _GPREG(0x060)
-#define LPC32XX_GPIO_P1_OUTP_SET _GPREG(0x064)
-#define LPC32XX_GPIO_P1_OUTP_CLR _GPREG(0x068)
-#define LPC32XX_GPIO_P1_OUTP_STATE _GPREG(0x06C)
-#define LPC32XX_GPIO_P1_DIR_SET _GPREG(0x070)
-#define LPC32XX_GPIO_P1_DIR_CLR _GPREG(0x074)
-#define LPC32XX_GPIO_P1_DIR_STATE _GPREG(0x078)
+#define LPC32XX_GPIO_P3_INP_STATE (0x000)
+#define LPC32XX_GPIO_P3_OUTP_SET (0x004)
+#define LPC32XX_GPIO_P3_OUTP_CLR (0x008)
+#define LPC32XX_GPIO_P3_OUTP_STATE (0x00C)
+#define LPC32XX_GPIO_P2_DIR_SET (0x010)
+#define LPC32XX_GPIO_P2_DIR_CLR (0x014)
+#define LPC32XX_GPIO_P2_DIR_STATE (0x018)
+#define LPC32XX_GPIO_P2_INP_STATE (0x01C)
+#define LPC32XX_GPIO_P2_OUTP_SET (0x020)
+#define LPC32XX_GPIO_P2_OUTP_CLR (0x024)
+#define LPC32XX_GPIO_P2_MUX_SET (0x028)
+#define LPC32XX_GPIO_P2_MUX_CLR (0x02C)
+#define LPC32XX_GPIO_P2_MUX_STATE (0x030)
+#define LPC32XX_GPIO_P0_INP_STATE (0x040)
+#define LPC32XX_GPIO_P0_OUTP_SET (0x044)
+#define LPC32XX_GPIO_P0_OUTP_CLR (0x048)
+#define LPC32XX_GPIO_P0_OUTP_STATE (0x04C)
+#define LPC32XX_GPIO_P0_DIR_SET (0x050)
+#define LPC32XX_GPIO_P0_DIR_CLR (0x054)
+#define LPC32XX_GPIO_P0_DIR_STATE (0x058)
+#define LPC32XX_GPIO_P1_INP_STATE (0x060)
+#define LPC32XX_GPIO_P1_OUTP_SET (0x064)
+#define LPC32XX_GPIO_P1_OUTP_CLR (0x068)
+#define LPC32XX_GPIO_P1_OUTP_STATE (0x06C)
+#define LPC32XX_GPIO_P1_DIR_SET (0x070)
+#define LPC32XX_GPIO_P1_DIR_CLR (0x074)
+#define LPC32XX_GPIO_P1_DIR_STATE (0x078)
#define GPIO012_PIN_TO_BIT(x) (1 << (x))
#define GPIO3_PIN_TO_BIT(x) (1 << ((x) + 25))
@@ -72,12 +69,12 @@
#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
struct gpio_regs {
- void __iomem *inp_state;
- void __iomem *outp_state;
- void __iomem *outp_set;
- void __iomem *outp_clr;
- void __iomem *dir_set;
- void __iomem *dir_clr;
+ unsigned long inp_state;
+ unsigned long outp_state;
+ unsigned long outp_set;
+ unsigned long outp_clr;
+ unsigned long dir_set;
+ unsigned long dir_clr;
};
/*
@@ -165,16 +162,27 @@ static struct gpio_regs gpio_grp_regs_p3 = {
struct lpc32xx_gpio_chip {
struct gpio_chip chip;
struct gpio_regs *gpio_grp;
+ void __iomem *reg_base;
};
+static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset)
+{
+ return __raw_readl(group->reg_base + offset);
+}
+
+static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset)
+{
+ __raw_writel(val, group->reg_base + offset);
+}
+
static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group,
unsigned pin, int input)
{
if (input)
- __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->dir_clr);
else
- __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->dir_set);
}
@@ -184,19 +192,19 @@ static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group,
u32 u = GPIO3_PIN_TO_BIT(pin);
if (input)
- __raw_writel(u, group->gpio_grp->dir_clr);
+ gpreg_write(group, u, group->gpio_grp->dir_clr);
else
- __raw_writel(u, group->gpio_grp->dir_set);
+ gpreg_write(group, u, group->gpio_grp->dir_set);
}
static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group,
unsigned pin, int high)
{
if (high)
- __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->outp_set);
else
- __raw_writel(GPIO012_PIN_TO_BIT(pin),
+ gpreg_write(group, GPIO012_PIN_TO_BIT(pin),
group->gpio_grp->outp_clr);
}
@@ -206,31 +214,31 @@ static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group,
u32 u = GPIO3_PIN_TO_BIT(pin);
if (high)
- __raw_writel(u, group->gpio_grp->outp_set);
+ gpreg_write(group, u, group->gpio_grp->outp_set);
else
- __raw_writel(u, group->gpio_grp->outp_clr);
+ gpreg_write(group, u, group->gpio_grp->outp_clr);
}
static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group,
unsigned pin, int high)
{
if (high)
- __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
+ gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
else
- __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
+ gpreg_write(group, GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
}
static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
- return GPIO012_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state),
+ return GPIO012_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state),
pin);
}
static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
- int state = __raw_readl(group->gpio_grp->inp_state);
+ int state = gpreg_read(group, group->gpio_grp->inp_state);
/*
* P3 GPIO pin input mapping is not contiguous, GPIOP3-0..4 is mapped
@@ -242,13 +250,13 @@ static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
- return GPI3_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), pin);
+ return GPI3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->inp_state), pin);
}
static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group,
unsigned pin)
{
- return GPO3_PIN_IN_SEL(__raw_readl(group->gpio_grp->outp_state), pin);
+ return GPO3_PIN_IN_SEL(gpreg_read(group, group->gpio_grp->outp_state), pin);
}
/*
@@ -497,12 +505,18 @@ static int lpc32xx_of_xlate(struct gpio_chip *gc,
static int lpc32xx_gpio_probe(struct platform_device *pdev)
{
int i;
+ void __iomem *reg_base;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
if (pdev->dev.of_node) {
lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
+ lpc32xx_gpiochip[i].reg_base = reg_base;
}
devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
&lpc32xx_gpiochip[i]);
@@ -527,3 +541,7 @@ static struct platform_driver lpc32xx_gpio_driver = {
};
module_platform_driver(lpc32xx_gpio_driver);
+
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIO driver for LPC32xx SoC");
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 31b4a091ab60..6bb9741ad036 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -358,25 +358,30 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->can_sleep = false;
gc->parent = dev;
- ret = devm_gpiochip_add_data(dev, gc, lg);
- if (ret) {
- dev_err(dev, "failed adding lp-gpio chip\n");
- return ret;
- }
-
/* set up interrupts */
if (irq_rc && irq_rc->start) {
+ struct gpio_irq_chip *girq;
+
+ girq = &gc->irq;
+ girq->chip = &lp_irqchip;
+ girq->parent_handler = lp_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = (unsigned)irq_rc->start;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
lp_gpio_irq_init_hw(lg);
- ret = gpiochip_irqchip_add(gc, &lp_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "failed to add irqchip\n");
- return ret;
- }
+ }
- gpiochip_set_chained_irqchip(gc, &lp_irqchip,
- (unsigned)irq_rc->start,
- lp_gpio_irq_handler);
+ ret = devm_gpiochip_add_data(dev, gc, lg);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 4dbc837d1215..7086f8b5388f 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -120,7 +120,7 @@ static const struct gpio_chip madera_gpio_chip = {
static int madera_gpio_probe(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
- struct madera_pdata *pdata = dev_get_platdata(madera->dev);
+ struct madera_pdata *pdata = &madera->pdata;
struct madera_gpio *madera_gpio;
int ret;
@@ -136,6 +136,9 @@ static int madera_gpio_probe(struct platform_device *pdev)
madera_gpio->gpio_chip.parent = pdev->dev.parent;
switch (madera->type) {
+ case CS47L15:
+ madera_gpio->gpio_chip.ngpio = CS47L15_NUM_GPIOS;
+ break;
case CS47L35:
madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS;
break;
@@ -147,13 +150,18 @@ static int madera_gpio_probe(struct platform_device *pdev)
case CS47L91:
madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS;
break;
+ case CS42L92:
+ case CS47L92:
+ case CS47L93:
+ madera_gpio->gpio_chip.ngpio = CS47L92_NUM_GPIOS;
+ break;
default:
dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type);
return -EINVAL;
}
/* We want to be usable on systems that don't use devicetree or acpi */
- if (pdata && pdata->gpio_base)
+ if (pdata->gpio_base)
madera_gpio->gpio_chip.base = pdata->gpio_base;
else
madera_gpio->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index b7d89e30131e..47d05e357e61 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -270,10 +270,8 @@ static int max77620_gpio_probe(struct platform_device *pdev)
int ret;
gpio_irq = platform_get_irq(pdev, 0);
- if (gpio_irq <= 0) {
- dev_err(&pdev->dev, "GPIO irq not available %d\n", gpio_irq);
+ if (gpio_irq <= 0)
return -ENODEV;
- }
mgpio = devm_kzalloc(&pdev->dev, sizeof(*mgpio), GFP_KERNEL);
if (!mgpio)
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index 3f03f4e8956c..3075f2513c6f 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -188,3 +188,4 @@ module_platform_driver(max77650_gpio_driver);
MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver");
MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:max77650-gpio");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 8f466993cd24..501e89548f53 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include "gpiolib.h"
+#include "gpiolib-acpi.h"
/*
* Only first 8bits of a register correspond to each pin,
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 3302125e5265..4f27ddfe1e2f 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -397,6 +397,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
{
const struct mrfld_gpio_pinrange *range;
const char *pinctrl_dev_name;
+ struct gpio_irq_chip *girq;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -444,6 +445,21 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock);
+ girq = &priv->chip.irq;
+ girq->chip = &mrfld_irqchip;
+ girq->parent_handler = mrfld_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = pdev->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ mrfld_irq_init_hw(priv);
+
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
@@ -465,18 +481,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
}
- retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
- handle_bad_irq, IRQ_TYPE_NONE);
- if (retval) {
- dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
- return retval;
- }
-
- mrfld_irq_init_hw(priv);
-
- gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
- mrfld_irq_handler);
-
return 0;
}
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index f1a9c0544e3f..213aedc97dc2 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
.read = gpio_mockup_debugfs_read,
.write = gpio_mockup_debugfs_write,
.llseek = no_llseek,
+ .release = single_release,
};
static void gpio_mockup_debugfs_setup(struct device *dev,
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
new file mode 100644
index 000000000000..3fd729994a38
--- /dev/null
+++ b/drivers/gpio/gpio-moxtet.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox Moxtet GPIO expander
+ *
+ * Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/moxtet.h>
+#include <linux/module.h>
+
+#define MOXTET_GPIO_NGPIOS 12
+#define MOXTET_GPIO_INPUTS 4
+
+struct moxtet_gpio_desc {
+ u16 in_mask;
+ u16 out_mask;
+};
+
+static const struct moxtet_gpio_desc descs[] = {
+ [TURRIS_MOX_MODULE_SFP] = {
+ .in_mask = GENMASK(2, 0),
+ .out_mask = GENMASK(5, 4),
+ },
+};
+
+struct moxtet_gpio_chip {
+ struct device *dev;
+ struct gpio_chip gpio_chip;
+ const struct moxtet_gpio_desc *desc;
+};
+
+static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+ struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ if (chip->desc->in_mask & BIT(offset)) {
+ ret = moxtet_device_read(chip->dev);
+ } else if (chip->desc->out_mask & BIT(offset)) {
+ ret = moxtet_device_written(chip->dev);
+ if (ret >= 0)
+ ret <<= MOXTET_GPIO_INPUTS;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return !!(ret & BIT(offset));
+}
+
+static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
+ int state;
+
+ state = moxtet_device_written(chip->dev);
+ if (state < 0)
+ return;
+
+ offset -= MOXTET_GPIO_INPUTS;
+
+ if (val)
+ state |= BIT(offset);
+ else
+ state &= ~BIT(offset);
+
+ moxtet_device_write(chip->dev, state);
+}
+
+static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
+
+ /* All lines are hard wired to be either input or output, not both. */
+ if (chip->desc->in_mask & BIT(offset))
+ return 1;
+ else if (chip->desc->out_mask & BIT(offset))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int moxtet_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
+
+ if (chip->desc->in_mask & BIT(offset))
+ return 0;
+ else if (chip->desc->out_mask & BIT(offset))
+ return -ENOTSUPP;
+ else
+ return -EINVAL;
+}
+
+static int moxtet_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int val)
+{
+ struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
+
+ if (chip->desc->out_mask & BIT(offset))
+ moxtet_gpio_set_value(gc, offset, val);
+ else if (chip->desc->in_mask & BIT(offset))
+ return -ENOTSUPP;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int moxtet_gpio_probe(struct device *dev)
+{
+ struct moxtet_gpio_chip *chip;
+ struct device_node *nc = dev->of_node;
+ int id;
+
+ id = to_moxtet_device(dev)->id;
+
+ if (id >= ARRAY_SIZE(descs)) {
+ dev_err(dev, "%pOF Moxtet device id 0x%x is not supported by gpio-moxtet driver\n",
+ nc, id);
+ return -ENOTSUPP;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ chip->gpio_chip.parent = dev;
+ chip->desc = &descs[id];
+
+ dev_set_drvdata(dev, chip);
+
+ chip->gpio_chip.label = dev_name(dev);
+ chip->gpio_chip.get_direction = moxtet_gpio_get_direction;
+ chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
+ chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
+ chip->gpio_chip.get = moxtet_gpio_get_value;
+ chip->gpio_chip.set = moxtet_gpio_set_value;
+ chip->gpio_chip.base = -1;
+
+ chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
+
+ chip->gpio_chip.can_sleep = true;
+ chip->gpio_chip.owner = THIS_MODULE;
+
+ return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip);
+}
+
+static const struct of_device_id moxtet_gpio_dt_ids[] = {
+ { .compatible = "cznic,moxtet-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, moxtet_gpio_dt_ids);
+
+static const enum turris_mox_module_id moxtet_gpio_module_table[] = {
+ TURRIS_MOX_MODULE_SFP,
+ 0,
+};
+
+static struct moxtet_driver moxtet_gpio_driver = {
+ .driver = {
+ .name = "moxtet-gpio",
+ .of_match_table = moxtet_gpio_dt_ids,
+ .probe = moxtet_gpio_probe,
+ },
+ .id_table = moxtet_gpio_module_table,
+};
+module_moxtet_driver(moxtet_gpio_driver);
+
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index c8673a5d9412..16a47de29c94 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -32,6 +32,7 @@
#define GPIO_IMR 0x10
#define GPIO_ICR 0x14
#define GPIO_ICR2 0x18
+#define GPIO_IBE 0x18
struct mpc8xxx_gpio_chip {
struct gpio_chip gc;
@@ -45,6 +46,27 @@ struct mpc8xxx_gpio_chip {
unsigned int irqn;
};
+/* The GPIO Input Buffer Enable register(GPIO_IBE) is used to
+ * control the input enable of each individual GPIO port.
+ * When an individual GPIO port’s direction is set to
+ * input (GPIO_GPDIR[DRn=0]), the associated input enable must be
+ * set (GPIOxGPIE[IEn]=1) to propagate the port value to the GPIO
+ * Data Register.
+ */
+static int ls1028a_gpio_dir_in_init(struct gpio_chip *gc)
+{
+ unsigned long flags;
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ return 0;
+}
+
/*
* This hardware has a big endian bit assignment such that GPIO line 0 is
* connected to bit 31, line 1 to bit 30 ... line 31 to bit 0.
@@ -261,6 +283,7 @@ static const struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
};
struct mpc8xxx_gpio_devtype {
+ int (*gpio_dir_in_init)(struct gpio_chip *chip);
int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
int (*gpio_get)(struct gpio_chip *, unsigned int);
int (*irq_set_type)(struct irq_data *, unsigned int);
@@ -271,6 +294,10 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
.irq_set_type = mpc512x_irq_set_type,
};
+static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
+ .gpio_dir_in_init = ls1028a_gpio_dir_in_init,
+};
+
static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
.gpio_dir_out = mpc5125_gpio_dir_out,
.irq_set_type = mpc512x_irq_set_type,
@@ -291,6 +318,8 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
{ .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
{ .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
{ .compatible = "fsl,pq3-gpio", },
+ { .compatible = "fsl,ls1028a-gpio", .data = &ls1028a_gpio_devtype, },
+ { .compatible = "fsl,ls1088a-gpio", .data = &ls1028a_gpio_devtype, },
{ .compatible = "fsl,qoriq-gpio", },
{}
};
@@ -376,6 +405,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
/* ack and mask all irqs */
gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+ /* enable input buffer */
+ if (devtype->gpio_dir_in_init)
+ devtype->gpio_dir_in_init(gc);
irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 79654fb2e50f..d1d785f983a7 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -241,13 +241,6 @@ mediatek_gpio_bank_probe(struct device *dev,
if (!rg->chip.label)
return -ENOMEM;
- ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
- if (ret < 0) {
- dev_err(dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
- return ret;
- }
-
rg->irq_chip.name = dev_name(dev);
rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
@@ -256,8 +249,10 @@ mediatek_gpio_bank_probe(struct device *dev,
rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
+ struct gpio_irq_chip *girq;
+
/*
- * Manually request the irq here instead of passing
+ * Directly request the irq here instead of passing
* a flow-handler to gpiochip_set_chained_irqchip,
* because the irq is shared.
*/
@@ -271,15 +266,21 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
- ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
- 0, handle_simple_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "failed to add gpiochip_irqchip\n");
- return ret;
- }
+ girq = &rg->chip.irq;
+ girq->chip = &rg->irq_chip;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
- gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
- mtk->gpio_irq, NULL);
+ ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ if (ret < 0) {
+ dev_err(dev, "Could not register gpio %d, ret=%d\n",
+ rg->chip.ngpio, ret);
+ return ret;
}
/* set polarity to low for all gpios */
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index b2813580c582..7907a8755866 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -435,12 +435,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return port->irq;
/* the controller clock is optional */
- port->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(port->clk)) {
- if (PTR_ERR(port->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- port->clk = NULL;
- }
+ port->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(port->clk))
+ return PTR_ERR(port->clk);
err = clk_prepare_enable(port->clk);
if (err) {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 378b206d2dc9..de5d1383f28d 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -28,9 +29,9 @@
#define PCA953X_INVERT 0x02
#define PCA953X_DIRECTION 0x03
-#define REG_ADDR_MASK 0x3f
-#define REG_ADDR_EXT 0x40
-#define REG_ADDR_AI 0x80
+#define REG_ADDR_MASK GENMASK(5, 0)
+#define REG_ADDR_EXT BIT(6)
+#define REG_ADDR_AI BIT(7)
#define PCA957X_IN 0x00
#define PCA957X_INVRT 0x01
@@ -55,17 +56,17 @@
#define PCAL6524_OUT_INDCONF 0x2c
#define PCAL6524_DEBOUNCE 0x2d
-#define PCA_GPIO_MASK 0x00FF
+#define PCA_GPIO_MASK GENMASK(7, 0)
-#define PCAL_GPIO_MASK 0x1f
-#define PCAL_PINCTRL_MASK 0x60
+#define PCAL_GPIO_MASK GENMASK(4, 0)
+#define PCAL_PINCTRL_MASK GENMASK(6, 5)
-#define PCA_INT 0x0100
-#define PCA_PCAL 0x0200
+#define PCA_INT BIT(8)
+#define PCA_PCAL BIT(9)
#define PCA_LATCH_INT (PCA_PCAL | PCA_INT)
-#define PCA953X_TYPE 0x1000
-#define PCA957X_TYPE 0x2000
-#define PCA_TYPE_MASK 0xF000
+#define PCA953X_TYPE BIT(12)
+#define PCA957X_TYPE BIT(13)
+#define PCA_TYPE_MASK GENMASK(15, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -565,7 +566,7 @@ static void pca953x_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ));
+ chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
}
static void pca953x_irq_unmask(struct irq_data *d)
@@ -573,7 +574,7 @@ static void pca953x_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ);
+ chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
}
static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -604,10 +605,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
u8 new_irqs;
int level, i;
u8 invert_irq_mask[MAX_BANK];
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
if (chip->driver_data & PCA_PCAL) {
/* Enable latch on interrupt-enabled inputs */
@@ -641,7 +641,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
int bank_nb = d->hwirq / BANK_SZ;
- u8 mask = 1 << (d->hwirq % BANK_SZ);
+ u8 mask = BIT(d->hwirq % BANK_SZ);
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -666,7 +666,7 @@ static void pca953x_irq_shutdown(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 mask = 1 << (d->hwirq % BANK_SZ);
+ u8 mask = BIT(d->hwirq % BANK_SZ);
chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
@@ -679,7 +679,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
bool pending_seen = false;
bool trigger_seen = false;
u8 trigger[MAX_BANK];
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
int ret, i;
if (chip->driver_data & PCA_PCAL) {
@@ -710,8 +710,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
return false;
/* Remove output pins from the equation */
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
cur_stat[i] &= reg_direction[i];
@@ -768,7 +767,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
struct irq_chip *irq_chip = &chip->irq_chip;
- int reg_direction[MAX_BANK];
+ u8 reg_direction[MAX_BANK];
int ret, i;
if (!client->irq)
@@ -789,8 +788,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
* interrupt. We have to rely on the previous read for
* this purpose.
*/
- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
- NBANK(chip));
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
for (i = 0; i < NBANK(chip); i++)
chip->irq_stat[i] &= reg_direction[i];
mutex_init(&chip->irq_lock);
@@ -849,12 +847,12 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
- if (ret != 0)
+ if (ret)
goto out;
ret = regcache_sync_region(chip->regmap, chip->regs->direction,
chip->regs->direction + NBANK(chip));
- if (ret != 0)
+ if (ret)
goto out;
/* set platform specific polarity inversion */
@@ -949,19 +947,15 @@ static int pca953x_probe(struct i2c_client *client,
if (i2c_id) {
chip->driver_data = i2c_id->driver_data;
} else {
- const struct acpi_device_id *acpi_id;
- struct device *dev = &client->dev;
-
- chip->driver_data = (uintptr_t)of_device_get_match_data(dev);
- if (!chip->driver_data) {
- acpi_id = acpi_match_device(pca953x_acpi_ids, dev);
- if (!acpi_id) {
- ret = -ENODEV;
- goto err_exit;
- }
-
- chip->driver_data = acpi_id->driver_data;
+ const void *match;
+
+ match = device_get_match_data(&client->dev);
+ if (!match) {
+ ret = -ENODEV;
+ goto err_exit;
}
+
+ chip->driver_data = (uintptr_t)match;
}
i2c_set_clientdata(client, chip);
@@ -1041,8 +1035,7 @@ static int pca953x_remove(struct i2c_client *client)
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
- dev_err(&client->dev, "%s failed, %d\n",
- "teardown", ret);
+ dev_err(&client->dev, "teardown failed, %d\n", ret);
} else {
ret = 0;
}
@@ -1064,14 +1057,14 @@ static int pca953x_regcache_sync(struct device *dev)
*/
ret = regcache_sync_region(chip->regmap, chip->regs->direction,
chip->regs->direction + NBANK(chip));
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
ret = regcache_sync_region(chip->regmap, chip->regs->output,
chip->regs->output + NBANK(chip));
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
}
@@ -1080,7 +1073,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) {
ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
PCAL953X_IN_LATCH + NBANK(chip));
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
return ret;
@@ -1088,7 +1081,7 @@ static int pca953x_regcache_sync(struct device *dev)
ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
PCAL953X_INT_MASK + NBANK(chip));
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
return ret;
@@ -1120,7 +1113,7 @@ static int pca953x_resume(struct device *dev)
if (!atomic_read(&chip->wakeup_path)) {
ret = regulator_enable(chip->regulator);
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return 0;
}
@@ -1133,7 +1126,7 @@ static int pca953x_resume(struct device *dev)
return ret;
ret = regcache_sync(chip->regmap);
- if (ret != 0) {
+ if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 1d99293096f2..3f3d9a94b709 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -409,8 +409,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
static int __maybe_unused pch_gpio_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pch_gpio *chip = pci_get_drvdata(pdev);
+ struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
@@ -422,8 +421,7 @@ static int __maybe_unused pch_gpio_suspend(struct device *dev)
static int __maybe_unused pch_gpio_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pch_gpio *chip = pci_get_drvdata(pdev);
+ struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 24228cf79afc..05000cace9b2 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -305,10 +305,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
mutex_init(&pmic_eic->buslock);
pmic_eic->irq = platform_get_irq(pdev, 0);
- if (pmic_eic->irq < 0) {
- dev_err(&pdev->dev, "Failed to get PMIC EIC interrupt.\n");
+ if (pmic_eic->irq < 0)
return pmic_eic->irq;
- }
pmic_eic->map = dev_get_regmap(pdev->dev.parent, NULL);
if (!pmic_eic->map)
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index f5c8b3a351d5..d7314d39ab65 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -226,10 +226,8 @@ static int sprd_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
sprd_gpio->irq = platform_get_irq(pdev, 0);
- if (sprd_gpio->irq < 0) {
- dev_err(&pdev->dev, "Failed to get GPIO interrupt.\n");
+ if (sprd_gpio->irq < 0)
return sprd_gpio->irq;
- }
sprd_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_gpio->base))
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index dbf9cbe36b2b..994d542daf53 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -429,6 +429,23 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
+static void stmpe_init_irq_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+ int i;
+
+ if (!stmpe_gpio->norequest_mask)
+ return;
+
+ /* Forbid unused lines to be mapped as IRQs */
+ for (i = 0; i < sizeof(u32); i++) {
+ if (stmpe_gpio->norequest_mask & BIT(i))
+ clear_bit(i, valid_mask);
+ }
+}
+
static int stmpe_gpio_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
@@ -454,14 +471,21 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip.parent = &pdev->dev;
stmpe_gpio->chip.of_node = np;
stmpe_gpio->chip.base = -1;
+ /*
+ * REVISIT: this makes sure the valid mask gets allocated and
+ * filled in when adding the gpio_chip, but the rest of the
+ * gpio_irqchip is still filled in using the old method
+ * in gpiochip_irqchip_add_nested() so clean this up once we
+ * get the gpio_irqchip to initialize while adding the
+ * gpio_chip also for threaded irqchips.
+ */
+ stmpe_gpio->chip.irq.init_valid_mask = stmpe_init_irq_valid_mask;
if (IS_ENABLED(CONFIG_DEBUG_FS))
stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
of_property_read_u32(np, "st,norequest-mask",
&stmpe_gpio->norequest_mask);
- if (stmpe_gpio->norequest_mask)
- stmpe_gpio->chip.irq.need_valid_mask = true;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -487,14 +511,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
goto out_disable;
}
- if (stmpe_gpio->norequest_mask) {
- int i;
-
- /* Forbid unused lines to be mapped as IRQs */
- for (i = 0; i < sizeof(u32); i++)
- if (stmpe_gpio->norequest_mask & BIT(i))
- clear_bit(i, stmpe_gpio->chip.irq.valid_mask);
- }
ret = gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
&stmpe_gpio_irq_chip,
0,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index bd1f3f775ce9..5e375186f90e 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -171,10 +171,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
struct irq_chip_generic *gc;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "No interrupt specified.\n");
+ if (ret < 0)
return ret;
- }
tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 0f59161a4701..8a01d3694b28 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -624,10 +624,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
for (i = 0; i < tgi->bank_count; i++) {
ret = platform_get_irq(pdev, i);
- if (ret < 0) {
- dev_err(&pdev->dev, "Missing IRQ resource: %d\n", ret);
+ if (ret < 0)
return ret;
- }
bank = &tgi->bank_info[i];
bank->bank = i;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 715371b5102a..ddad5c7ea617 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -53,7 +53,6 @@ struct thunderx_line {
struct thunderx_gpio {
struct gpio_chip chip;
u8 __iomem *register_base;
- struct irq_domain *irqd;
struct msix_entry *msix_entries; /* per line MSI-X */
struct thunderx_line *line_entries; /* per line irq info */
raw_spinlock_t lock;
@@ -283,54 +282,60 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
}
}
-static void thunderx_gpio_irq_ack(struct irq_data *data)
+static void thunderx_gpio_irq_ack(struct irq_data *d)
{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
writeq(GPIO_INTR_INTR,
- txline->txgpio->register_base + intr_reg(txline->line));
+ txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
}
-static void thunderx_gpio_irq_mask(struct irq_data *data)
+static void thunderx_gpio_irq_mask(struct irq_data *d)
{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
writeq(GPIO_INTR_ENA_W1C,
- txline->txgpio->register_base + intr_reg(txline->line));
+ txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
}
-static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
+static void thunderx_gpio_irq_mask_ack(struct irq_data *d)
{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
- txline->txgpio->register_base + intr_reg(txline->line));
+ txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
}
-static void thunderx_gpio_irq_unmask(struct irq_data *data)
+static void thunderx_gpio_irq_unmask(struct irq_data *d)
{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
writeq(GPIO_INTR_ENA_W1S,
- txline->txgpio->register_base + intr_reg(txline->line));
+ txgpio->register_base + intr_reg(irqd_to_hwirq(d)));
}
-static int thunderx_gpio_irq_set_type(struct irq_data *data,
+static int thunderx_gpio_irq_set_type(struct irq_data *d,
unsigned int flow_type)
{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
- struct thunderx_gpio *txgpio = txline->txgpio;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
+ struct thunderx_line *txline =
+ &txgpio->line_entries[irqd_to_hwirq(d)];
u64 bit_cfg;
- irqd_set_trigger_type(data, flow_type);
+ irqd_set_trigger_type(d, flow_type);
bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
- irq_set_handler_locked(data, handle_fasteoi_ack_irq);
+ irq_set_handler_locked(d, handle_fasteoi_ack_irq);
bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
} else {
- irq_set_handler_locked(data, handle_fasteoi_mask_irq);
+ irq_set_handler_locked(d, handle_fasteoi_mask_irq);
}
raw_spin_lock(&txgpio->lock);
@@ -359,33 +364,6 @@ static void thunderx_gpio_irq_disable(struct irq_data *data)
irq_chip_disable_parent(data);
}
-static int thunderx_gpio_irq_request_resources(struct irq_data *data)
-{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
- struct thunderx_gpio *txgpio = txline->txgpio;
- int r;
-
- r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
- if (r)
- return r;
-
- r = irq_chip_request_resources_parent(data);
- if (r)
- gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-
- return r;
-}
-
-static void thunderx_gpio_irq_release_resources(struct irq_data *data)
-{
- struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
- struct thunderx_gpio *txgpio = txline->txgpio;
-
- irq_chip_release_resources_parent(data);
-
- gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
-}
-
/*
* Interrupts are chained from underlying MSI-X vectors. We have
* these irq_chip functions to be able to handle level triggering
@@ -402,48 +380,22 @@ static struct irq_chip thunderx_gpio_irq_chip = {
.irq_unmask = thunderx_gpio_irq_unmask,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_request_resources = thunderx_gpio_irq_request_resources,
- .irq_release_resources = thunderx_gpio_irq_release_resources,
.irq_set_type = thunderx_gpio_irq_set_type,
.flags = IRQCHIP_SET_TYPE_MASKED
};
-static int thunderx_gpio_irq_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- irq_hw_number_t *hwirq,
- unsigned int *type)
+static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
{
- struct thunderx_gpio *txgpio = d->host_data;
-
- if (WARN_ON(fwspec->param_count < 2))
- return -EINVAL;
- if (fwspec->param[0] >= txgpio->chip.ngpio)
- return -EINVAL;
- *hwirq = fwspec->param[0];
- *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
- return 0;
-}
-
-static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg)
-{
- struct thunderx_line *txline = arg;
+ struct thunderx_gpio *txgpio = gpiochip_get_data(gc);
- return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
- &thunderx_gpio_irq_chip, txline);
-}
-
-static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
- .alloc = thunderx_gpio_irq_alloc,
- .translate = thunderx_gpio_irq_translate
-};
-
-static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
-
- return irq_find_mapping(txgpio->irqd, offset);
+ *parent = txgpio->base_msi + (2 * child);
+ *parent_type = IRQ_TYPE_LEVEL_HIGH;
+ return 0;
}
static int thunderx_gpio_probe(struct pci_dev *pdev,
@@ -453,6 +405,7 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
struct device *dev = &pdev->dev;
struct thunderx_gpio *txgpio;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
int ngpio, i;
int err = 0;
@@ -497,8 +450,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
}
txgpio->msix_entries = devm_kcalloc(dev,
- ngpio, sizeof(struct msix_entry),
- GFP_KERNEL);
+ ngpio, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!txgpio->msix_entries) {
err = -ENOMEM;
goto out;
@@ -539,27 +492,6 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
if (err < 0)
goto out;
- /*
- * Push GPIO specific irqdomain on hierarchy created as a side
- * effect of the pci_enable_msix()
- */
- txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
- 0, 0, of_node_to_fwnode(dev->of_node),
- &thunderx_gpio_irqd_ops, txgpio);
- if (!txgpio->irqd) {
- err = -ENOMEM;
- goto out;
- }
-
- /* Push on irq_data and the domain for each line. */
- for (i = 0; i < ngpio; i++) {
- err = irq_domain_push_irq(txgpio->irqd,
- txgpio->msix_entries[i].vector,
- &txgpio->line_entries[i]);
- if (err < 0)
- dev_err(dev, "irq_domain_push_irq: %d\n", err);
- }
-
chip->label = KBUILD_MODNAME;
chip->parent = dev;
chip->owner = THIS_MODULE;
@@ -574,11 +506,28 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->set = thunderx_gpio_set;
chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
- chip->to_irq = thunderx_gpio_to_irq;
+ girq = &chip->irq;
+ girq->chip = &thunderx_gpio_irq_chip;
+ girq->fwnode = of_node_to_fwnode(dev->of_node);
+ girq->parent_domain =
+ irq_get_irq_data(txgpio->msix_entries[0].vector)->domain;
+ girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
err = devm_gpiochip_add_data(dev, chip, txgpio);
if (err)
goto out;
+ /* Push on irq_data and the domain for each line. */
+ for (i = 0; i < ngpio; i++) {
+ err = irq_domain_push_irq(chip->irq.domain,
+ txgpio->msix_entries[i].vector,
+ chip);
+ if (err < 0)
+ dev_err(dev, "irq_domain_push_irq: %d\n", err);
+ }
+
dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
ngpio, chip->base);
return 0;
@@ -593,10 +542,10 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
for (i = 0; i < txgpio->chip.ngpio; i++)
- irq_domain_pop_irq(txgpio->irqd,
+ irq_domain_pop_irq(txgpio->chip.irq.domain,
txgpio->msix_entries[i].vector);
- irq_domain_remove(txgpio->irqd);
+ irq_domain_remove(txgpio->chip.irq.domain);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index d5880db7f9d4..a3109bcaa0ac 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -214,11 +214,23 @@ static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = {
tqmx86_gpio_runtime_resume, NULL)
};
+static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ /* Only GPIOs 4-7 are valid for interrupts. Clear the others */
+ clear_bit(0, valid_mask);
+ clear_bit(1, valid_mask);
+ clear_bit(2, valid_mask);
+ clear_bit(3, valid_mask);
+}
+
static int tqmx86_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tqmx86_gpio_data *gpio;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
void __iomem *io_base;
struct resource *res;
int ret, irq;
@@ -259,17 +271,10 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
chip->get = tqmx86_gpio_get;
chip->set = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
- chip->irq.need_valid_mask = true;
chip->parent = pdev->dev.parent;
pm_runtime_enable(&pdev->dev);
- ret = devm_gpiochip_add_data(dev, chip, gpio);
- if (ret) {
- dev_err(dev, "Could not register GPIO chip\n");
- goto out_pm_dis;
- }
-
if (irq) {
struct irq_chip *irq_chip = &gpio->irq_chip;
u8 irq_status;
@@ -287,23 +292,28 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS);
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
- ret = gpiochip_irqchip_add(chip, irq_chip,
- 0, handle_simple_irq,
- IRQ_TYPE_EDGE_BOTH);
- if (ret) {
- dev_err(dev, "Could not add irq chip\n");
+ girq = &chip->irq;
+ girq->chip = irq_chip;
+ girq->parent_handler = tqmx86_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto out_pm_dis;
}
-
- gpiochip_set_chained_irqchip(chip, irq_chip,
- irq, tqmx86_gpio_irq_handler);
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->init_valid_mask = tqmx86_init_irq_valid_mask;
}
- /* Only GPIOs 4-7 are valid for interrupts. Clear the others */
- clear_bit(0, chip->irq.valid_mask);
- clear_bit(1, chip->irq.valid_mask);
- clear_bit(2, chip->irq.valid_mask);
- clear_bit(3, chip->irq.valid_mask);
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
+ if (ret) {
+ dev_err(dev, "Could not register GPIO chip\n");
+ goto out_pm_dis;
+ }
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7ba668db171b..58776f2d69ff 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -243,6 +243,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
struct irq_chip *ic;
int i;
int ret;
@@ -318,10 +319,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
ic->irq_set_type = vf610_gpio_irq_set_type;
ic->irq_set_wake = vf610_gpio_irq_set_wake;
- ret = devm_gpiochip_add_data(dev, gc, port);
- if (ret < 0)
- return ret;
-
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -329,15 +326,20 @@ static int vf610_gpio_probe(struct platform_device *pdev)
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
- ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "failed to add irqchip\n");
- return ret;
- }
- gpiochip_set_chained_irqchip(gc, ic, port->irq,
- vf610_gpio_irq_handler);
+ girq = &gc->irq;
+ girq->chip = ic;
+ girq->parent_handler = vf610_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = port->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- return 0;
+ return devm_gpiochip_add_data(dev, gc, port);
}
static struct platform_driver vf610_gpio_driver = {
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 9b604f13e302..c301c1d56dd2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(gpioa_freq,
/* ----- begin of gipo a chip -------------------------------------------- */
static int vprbrd_gpioa_get(struct gpio_chip *chip,
- unsigned offset)
+ unsigned int offset)
{
int ret, answer, error = 0;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -129,7 +129,7 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip,
}
static void vprbrd_gpioa_set(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -170,7 +170,7 @@ static void vprbrd_gpioa_set(struct gpio_chip *chip,
}
static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
- unsigned offset)
+ unsigned int offset)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -207,7 +207,7 @@ static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
}
static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -251,8 +251,8 @@ static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
/* ----- begin of gipo b chip -------------------------------------------- */
-static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
- unsigned dir)
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned int offset,
+ unsigned int dir)
{
struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
int ret;
@@ -273,7 +273,7 @@ static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
}
static int vprbrd_gpiob_get(struct gpio_chip *chip,
- unsigned offset)
+ unsigned int offset)
{
int ret;
u16 val;
@@ -305,7 +305,7 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip,
}
static void vprbrd_gpiob_set(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -338,7 +338,7 @@ static void vprbrd_gpiob_set(struct gpio_chip *chip,
}
static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
- unsigned offset)
+ unsigned int offset)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
@@ -359,7 +359,7 @@ static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
}
static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 38c01912c7b2..25d86441666e 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include "gpiolib.h"
+#include "gpiolib-acpi.h"
/* Common property names */
#define XGENE_NIRQ_PROPERTY "apm,nr-irqs"
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 54d3359444f3..d7b16bb9e4e4 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -290,6 +290,7 @@ MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
static int xlp_gpio_probe(struct platform_device *pdev)
{
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
struct xlp_gpio_priv *priv;
void __iomem *gpio_base;
int irq_base, irq, err;
@@ -395,27 +396,27 @@ static int xlp_gpio_probe(struct platform_device *pdev)
irq_base = 0;
}
+ girq = &gc->irq;
+ girq->chip = &xlp_gpio_irq_chip;
+ girq->parent_handler = xlp_gpio_generic_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->first = irq_base;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
err = gpiochip_add_data(gc, priv);
if (err < 0)
return err;
- err = gpiochip_irqchip_add(gc, &xlp_gpio_irq_chip, irq_base,
- handle_level_irq, IRQ_TYPE_NONE);
- if (err) {
- dev_err(&pdev->dev, "Could not connect irqchip to gpiochip!\n");
- goto out_gpio_remove;
- }
-
- gpiochip_set_chained_irqchip(gc, &xlp_gpio_irq_chip, irq,
- xlp_gpio_generic_handler);
-
dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio);
return 0;
-
-out_gpio_remove:
- gpiochip_remove(gc);
- return err;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 8637adb6bc20..98cbaf0e415e 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -215,6 +215,7 @@ static int zx_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx_gpio *chip;
+ struct gpio_irq_chip *girq;
int irq, id, ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -242,32 +243,30 @@ static int zx_gpio_probe(struct platform_device *pdev)
chip->gc.parent = dev;
chip->gc.owner = THIS_MODULE;
- ret = gpiochip_add_data(&chip->gc, chip);
- if (ret)
- return ret;
-
/*
* irq_chip support
*/
writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
writew_relaxed(0, chip->base + ZX_GPIO_IE);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "invalid IRQ\n");
- gpiochip_remove(&chip->gc);
- return -ENODEV;
- }
+ if (irq < 0)
+ return irq;
+ girq = &chip->gc.irq;
+ girq->chip = &zx_irqchip;
+ girq->parent_handler = zx_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
- ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
- 0, handle_simple_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev, "could not add irqchip\n");
- gpiochip_remove(&chip->gc);
+ ret = gpiochip_add_data(&chip->gc, chip);
+ if (ret)
return ret;
- }
- gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
- irq, zx_irq_handler);
platform_set_drvdata(pdev, chip);
dev_info(dev, "ZX GPIO chip registered\n");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index f241b6c13dbe..cd475ff4bcad 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -830,6 +830,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
int ret, bank_num;
struct zynq_gpio *gpio;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
const struct of_device_id *match;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -849,10 +850,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio->base_addr);
gpio->irq = platform_get_irq(pdev, 0);
- if (gpio->irq < 0) {
- dev_err(&pdev->dev, "invalid IRQ\n");
+ if (gpio->irq < 0)
return gpio->irq;
- }
/* configure the gpio chip */
chip = &gpio->chip;
@@ -887,34 +886,38 @@ static int zynq_gpio_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_dis;
- /* report a bug if gpio chip registration fails */
- ret = gpiochip_add_data(chip, gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add gpio chip\n");
- goto err_pm_put;
- }
-
/* disable interrupts for all banks */
for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++)
writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
- ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add irq chip\n");
- goto err_rm_gpiochip;
+ /* Set up the GPIO irqchip */
+ girq = &chip->irq;
+ girq->chip = &zynq_gpio_edge_irqchip;
+ girq->parent_handler = zynq_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto err_pm_put;
}
+ girq->parents[0] = gpio->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, gpio->irq,
- zynq_gpio_irqhandler);
+ /* report a bug if gpio chip registration fails */
+ ret = gpiochip_add_data(chip, gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add gpio chip\n");
+ goto err_pm_put;
+ }
pm_runtime_put(&pdev->dev);
return 0;
-err_rm_gpiochip:
- gpiochip_remove(chip);
err_pm_put:
pm_runtime_put(&pdev->dev);
err_pm_dis:
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 39f2f9035c11..609ed16ae933 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -7,6 +7,7 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
@@ -18,6 +19,12 @@
#include <linux/pinctrl/pinctrl.h>
#include "gpiolib.h"
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
@@ -170,10 +177,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
- value = gpiod_get_raw_value_cansleep(event->desc);
- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
- event->handler(event->irq, event);
+ if (run_edge_events_on_boot &&
+ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+ value = gpiod_get_raw_value_cansleep(event->desc);
+ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+ event->handler(event->irq, event);
+ }
}
static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
@@ -382,6 +392,13 @@ int acpi_dev_add_driver_gpios(struct acpi_device *adev,
}
EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+{
+ if (adev)
+ adev->driver_gpios = NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
+
static void devm_acpi_dev_release_driver_gpios(struct device *dev, void *res)
{
acpi_dev_remove_driver_gpios(ACPI_COMPANION(dev));
@@ -720,6 +737,16 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ret ? ERR_PTR(ret) : lookup.desc;
}
+static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
+ const char *con_id)
+{
+ /* Never allow fallback if the device has properties */
+ if (acpi_dev_has_props(adev) || adev->driver_gpios)
+ return false;
+
+ return con_id == NULL;
+}
+
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
@@ -1256,15 +1283,6 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
return count ? count : -ENOENT;
}
-bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
-{
- /* Never allow fallback if the device has properties */
- if (acpi_dev_has_props(adev) || adev->driver_gpios)
- return false;
-
- return con_id == NULL;
-}
-
/* Run deferred acpi_gpiochip_request_irqs() */
static int acpi_gpio_handle_deferred_request_irqs(void)
{
@@ -1283,3 +1301,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
}
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ }
+ },
+ {} /* Terminating entry */
+};
+
+static int acpi_gpio_setup_params(void)
+{
+ if (run_edge_events_on_boot < 0) {
+ if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
new file mode 100644
index 000000000000..1c6d65cf0629
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ACPI helpers for GPIO API
+ *
+ * Copyright (C) 2012,2019 Intel Corporation
+ */
+
+#ifndef GPIOLIB_ACPI_H
+#define GPIOLIB_ACPI_H
+
+struct acpi_device;
+
+/**
+ * struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
+ * @flags: GPIO initialization flags
+ * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @pin_config: pin bias as provided by ACPI
+ * @polarity: interrupt polarity as provided by ACPI
+ * @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
+ */
+struct acpi_gpio_info {
+ struct acpi_device *adev;
+ enum gpiod_flags flags;
+ bool gpioint;
+ int pin_config;
+ int polarity;
+ int triggering;
+ unsigned int quirks;
+};
+
+#ifdef CONFIG_ACPI
+void acpi_gpiochip_add(struct gpio_chip *chip);
+void acpi_gpiochip_remove(struct gpio_chip *chip);
+
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
+
+int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
+ struct acpi_gpio_info *info);
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info);
+
+struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *dflags,
+ unsigned long *lookupflags);
+struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
+ const char *propname, int index,
+ struct acpi_gpio_info *info);
+
+int acpi_gpio_count(struct device *dev, const char *con_id);
+#else
+static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
+static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+
+static inline void
+acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
+
+static inline void
+acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
+
+static inline int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+ return 0;
+}
+static inline int
+acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ return 0;
+}
+
+static inline struct gpio_desc *
+acpi_find_gpio(struct device *dev, const char *con_id,
+ unsigned int idx, enum gpiod_flags *dflags,
+ unsigned long *lookupflags)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline struct gpio_desc *
+acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname,
+ int index, struct acpi_gpio_info *info)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline int acpi_gpio_count(struct device *dev, const char *con_id)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* GPIOLIB_ACPI_H */
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 0acc2cc6e868..98e3c20d9730 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -59,7 +59,7 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
{
return devm_gpiod_get_index(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(devm_gpiod_get);
+EXPORT_SYMBOL_GPL(devm_gpiod_get);
/**
* devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,7 +77,7 @@ struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
{
return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(devm_gpiod_get_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_optional);
/**
* devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -127,7 +127,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_gpiod_get_index);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
/**
* devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
@@ -182,7 +182,7 @@ struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
/**
* devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
@@ -239,7 +239,7 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_fwnode_get_index_gpiod_from_child);
+EXPORT_SYMBOL_GPL(devm_fwnode_get_index_gpiod_from_child);
/**
* devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
@@ -268,7 +268,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_index_optional);
/**
* devm_gpiod_get_array - Resource-managed gpiod_get_array()
@@ -303,7 +303,7 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
return descs;
}
-EXPORT_SYMBOL(devm_gpiod_get_array);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_array);
/**
* devm_gpiod_get_array_optional - Resource-managed gpiod_get_array_optional()
@@ -328,7 +328,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
return descs;
}
-EXPORT_SYMBOL(devm_gpiod_get_array_optional);
+EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
/**
* devm_gpiod_put - Resource-managed gpiod_put()
@@ -344,7 +344,7 @@ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
&desc));
}
-EXPORT_SYMBOL(devm_gpiod_put);
+EXPORT_SYMBOL_GPL(devm_gpiod_put);
/**
* devm_gpiod_unhinge - Remove resource management from a gpio descriptor
@@ -374,7 +374,7 @@ void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
/* Anything else we should warn about */
WARN_ON(ret);
}
-EXPORT_SYMBOL(devm_gpiod_unhinge);
+EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
/**
* devm_gpiod_put_array - Resource-managed gpiod_put_array()
@@ -390,7 +390,7 @@ void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
WARN_ON(devres_release(dev, devm_gpiod_release_array,
devm_gpiod_match_array, &descs));
}
-EXPORT_SYMBOL(devm_gpiod_put_array);
+EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
@@ -444,7 +444,7 @@ int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
return 0;
}
-EXPORT_SYMBOL(devm_gpio_request);
+EXPORT_SYMBOL_GPL(devm_gpio_request);
/**
* devm_gpio_request_one - request a single GPIO with initial setup
@@ -474,7 +474,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
return 0;
}
-EXPORT_SYMBOL(devm_gpio_request_one);
+EXPORT_SYMBOL_GPL(devm_gpio_request_one);
/**
* devm_gpio_free - free a GPIO
@@ -492,4 +492,4 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
WARN_ON(devres_release(dev, devm_gpio_release, devm_gpio_match,
&gpio));
}
-EXPORT_SYMBOL(devm_gpio_free);
+EXPORT_SYMBOL_GPL(devm_gpio_free);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..1eea2c6c2e1d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -21,6 +21,34 @@
#include <linux/gpio/machine.h>
#include "gpiolib.h"
+#include "gpiolib-of.h"
+
+/*
+ * This is used by external users of of_gpio_count() from <linux/of_gpio.h>
+ *
+ * FIXME: get rid of those external users by converting them to GPIO
+ * descriptors and let them all use gpiod_get_count()
+ */
+int of_gpio_get_count(struct device *dev, const char *con_id)
+{
+ int ret;
+ char propname[32];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
+ if (con_id)
+ snprintf(propname, sizeof(propname), "%s-%s",
+ con_id, gpio_suffixes[i]);
+ else
+ snprintf(propname, sizeof(propname), "%s",
+ gpio_suffixes[i]);
+
+ ret = of_gpio_named_count(dev->of_node, propname);
+ if (ret > 0)
+ break;
+ }
+ return ret ? ret : -ENOENT;
+}
static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
{
@@ -53,6 +81,23 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
return gpiochip_get_desc(chip, ret);
}
+/**
+ * of_gpio_need_valid_mask() - figure out if the OF GPIO driver needs
+ * to set the .valid_mask
+ * @dev: the device for the GPIO provider
+ * @return: true if the valid mask needs to be set
+ */
+bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
+{
+ int size;
+ struct device_node *np = gc->of_node;
+
+ size = of_property_count_u32_elems(np, "gpio-reserved-ranges");
+ if (size > 0 && size % 2 == 0)
+ return true;
+ return false;
+}
+
static void of_gpio_flags_quirks(struct device_node *np,
const char *propname,
enum of_gpio_flags *flags,
@@ -178,7 +223,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
-struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+static struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *propname, int index, enum of_gpio_flags *flags)
{
struct of_phandle_args gpiospec;
@@ -229,7 +274,76 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
else
return desc_to_gpio(desc);
}
-EXPORT_SYMBOL(of_get_named_gpio_flags);
+EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+
+/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ struct gpio_desc *desc;
+ enum of_gpio_flags flags;
+ bool active_low = false;
+ bool single_ended = false;
+ bool open_drain = false;
+ bool transitory = false;
+ int ret;
+
+ desc = of_get_named_gpiod_flags(node, propname,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+ return desc;
+ }
+
+ active_low = flags & OF_GPIO_ACTIVE_LOW;
+ single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
+ transitory = flags & OF_GPIO_TRANSITORY;
+
+ ret = gpiod_request(desc, label);
+ if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+ return desc;
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (active_low)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (single_ended) {
+ if (open_drain)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (transitory)
+ lflags |= GPIO_TRANSITORY;
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
/*
* The SPI GPIO bindings happened before we managed to establish that GPIO
@@ -324,6 +438,19 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
return desc;
}
+static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
+ const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ if (!IS_ENABLED(CONFIG_MFD_ARIZONA))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id || strcmp(con_id, "wlf,reset"))
+ return ERR_PTR(-ENOENT);
+
+ return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
@@ -343,36 +470,30 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- /*
- * -EPROBE_DEFER in our case means that we found a
- * valid GPIO property, but no controller has been
- * registered so far.
- *
- * This means we don't need to look any further for
- * alternate name conventions, and we should really
- * preserve the return code for our user to be able to
- * retry probing later.
- */
- if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
- return desc;
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
break;
}
- /* Special handling for SPI GPIOs if used */
- if (IS_ERR(desc))
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ /* Special handling for SPI GPIOs if used */
desc = of_find_spi_gpio(dev, con_id, &of_flags);
- if (IS_ERR(desc)) {
+ }
+
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
/* This quirk looks up flags and all */
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
if (!IS_ERR(desc))
return desc;
}
- /* Special handling for regulator GPIOs if used */
- if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
+ /* Special handling for regulator GPIOs if used */
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+ }
+
+ if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+ desc = of_find_arizona_gpio(dev, con_id, &of_flags);
if (IS_ERR(desc))
return desc;
@@ -523,8 +644,9 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
* GPIO chips. This function performs only one sanity check: whether GPIO
* is less than ngpios (that is specified in the gpio_chip).
*/
-int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec, u32 *flags)
+static int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
{
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
@@ -548,7 +670,6 @@ int of_gpio_simple_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
-EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank)
@@ -605,7 +726,7 @@ err0:
pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
return ret;
}
-EXPORT_SYMBOL(of_mm_gpiochip_add_data);
+EXPORT_SYMBOL_GPL(of_mm_gpiochip_add_data);
/**
* of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank)
@@ -622,7 +743,7 @@ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
iounmap(mm_gc->regs);
kfree(gc->label);
}
-EXPORT_SYMBOL(of_mm_gpiochip_remove);
+EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
{
@@ -734,7 +855,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
int of_gpiochip_add(struct gpio_chip *chip)
{
- int status;
+ int ret;
if (!chip->of_node)
return 0;
@@ -749,9 +870,9 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_gpiochip_init_valid_mask(chip);
- status = of_gpiochip_add_pin_range(chip);
- if (status)
- return status;
+ ret = of_gpiochip_add_pin_range(chip);
+ if (ret)
+ return ret;
/* If the chip defines names itself, these take precedence */
if (!chip->names)
@@ -760,13 +881,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
- status = of_gpiochip_scan_gpios(chip);
- if (status) {
+ ret = of_gpiochip_scan_gpios(chip);
+ if (ret) {
of_node_put(chip->of_node);
gpiochip_remove_pin_ranges(chip);
}
- return status;
+ return ret;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
new file mode 100644
index 000000000000..9768831b1fe2
--- /dev/null
+++ b/drivers/gpio/gpiolib-of.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef GPIOLIB_OF_H
+#define GPIOLIB_OF_H
+
+struct gpio_chip;
+enum of_gpio_flags;
+
+#ifdef CONFIG_OF_GPIO
+struct gpio_desc *of_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ unsigned long *lookupflags);
+int of_gpiochip_add(struct gpio_chip *gc);
+void of_gpiochip_remove(struct gpio_chip *gc);
+int of_gpio_get_count(struct device *dev, const char *con_id);
+bool of_gpio_need_valid_mask(const struct gpio_chip *gc);
+#else
+static inline struct gpio_desc *of_find_gpio(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ unsigned long *lookupflags)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
+static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline int of_gpio_get_count(struct device *dev, const char *con_id)
+{
+ return 0;
+}
+static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
+{
+ return false;
+}
+#endif /* CONFIG_OF_GPIO */
+
+#endif /* GPIOLIB_OF_H */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3ee99d070608..822988818efc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -11,7 +11,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/acpi.h>
@@ -30,6 +29,8 @@
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
+#include "gpiolib-of.h"
+#include "gpiolib-acpi.h"
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@ -213,7 +214,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
- int status;
+ int ret;
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
@@ -221,17 +222,17 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (!chip->get_direction)
return -ENOTSUPP;
- status = chip->get_direction(chip, offset);
- if (status > 0) {
+ ret = chip->get_direction(chip, offset);
+ if (ret > 0) {
/* GPIOF_DIR_IN, or other positive */
- status = 1;
+ ret = 1;
clear_bit(FLAG_IS_OUT, &desc->flags);
}
- if (status == 0) {
+ if (ret == 0) {
/* GPIOF_DIR_OUT */
set_bit(FLAG_IS_OUT, &desc->flags);
}
- return status;
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_get_direction);
@@ -350,7 +351,7 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
{
unsigned long *p;
- p = kmalloc_array(BITS_TO_LONGS(chip->ngpio), sizeof(*p), GFP_KERNEL);
+ p = bitmap_alloc(chip->ngpio, GFP_KERNEL);
if (!p)
return NULL;
@@ -360,38 +361,31 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
return p;
}
-static int gpiochip_alloc_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_alloc_valid_mask(struct gpio_chip *gc)
{
-#ifdef CONFIG_OF_GPIO
- int size;
- struct device_node *np = gpiochip->of_node;
-
- size = of_property_count_u32_elems(np, "gpio-reserved-ranges");
- if (size > 0 && size % 2 == 0)
- gpiochip->need_valid_mask = true;
-#endif
-
- if (!gpiochip->need_valid_mask)
+ if (!(of_gpio_need_valid_mask(gc) || gc->init_valid_mask))
return 0;
- gpiochip->valid_mask = gpiochip_allocate_mask(gpiochip);
- if (!gpiochip->valid_mask)
+ gc->valid_mask = gpiochip_allocate_mask(gc);
+ if (!gc->valid_mask)
return -ENOMEM;
return 0;
}
-static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_init_valid_mask(struct gpio_chip *gc)
{
- if (gpiochip->init_valid_mask)
- return gpiochip->init_valid_mask(gpiochip);
+ if (gc->init_valid_mask)
+ return gc->init_valid_mask(gc,
+ gc->valid_mask,
+ gc->ngpio);
return 0;
}
static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
{
- kfree(gpiochip->valid_mask);
+ bitmap_free(gpiochip->valid_mask);
gpiochip->valid_mask = NULL;
}
@@ -536,6 +530,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
return -EINVAL;
/*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
+ /*
* Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
* the hardware actually supports enabling both at the same time the
* electrical result would be disastrous.
@@ -857,7 +859,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
}
ret = kfifo_put(&le->events, ge);
- if (ret != 0)
+ if (ret)
wake_up_poll(&le->wait, EPOLLIN);
return IRQ_HANDLED;
@@ -926,7 +928,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
/* This is just wrong: we don't look for events on output lines */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
ret = -EINVAL;
goto out_free_label;
}
@@ -940,10 +944,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= IRQF_TRIGGER_RISING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
INIT_KFIFO(le->events);
@@ -1089,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
@@ -1172,21 +1176,21 @@ static void gpiodevice_release(struct device *dev)
static int gpiochip_setup_dev(struct gpio_device *gdev)
{
- int status;
+ int ret;
cdev_init(&gdev->chrdev, &gpio_fileops);
gdev->chrdev.owner = THIS_MODULE;
gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
- status = cdev_device_add(&gdev->chrdev, &gdev->dev);
- if (status)
- return status;
+ ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
+ if (ret)
+ return ret;
chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
MAJOR(gpio_devt), gdev->id);
- status = gpiochip_sysfs_register(gdev);
- if (status)
+ ret = gpiochip_sysfs_register(gdev);
+ if (ret)
goto err_remove_device;
/* From this point, the .release() function cleans up gpio_device */
@@ -1199,7 +1203,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
err_remove_device:
cdev_device_del(&gdev->chrdev, &gdev->dev);
- return status;
+ return ret;
}
static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog)
@@ -1240,13 +1244,13 @@ static void machine_gpiochip_add(struct gpio_chip *chip)
static void gpiochip_setup_devs(void)
{
struct gpio_device *gdev;
- int err;
+ int ret;
list_for_each_entry(gdev, &gpio_devices, list) {
- err = gpiochip_setup_dev(gdev);
- if (err)
+ ret = gpiochip_setup_dev(gdev);
+ if (ret)
pr_err("%s: Failed to initialize gpio device (%d)\n",
- dev_name(&gdev->dev), err);
+ dev_name(&gdev->dev), ret);
}
}
@@ -1255,7 +1259,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct lock_class_key *request_key)
{
unsigned long flags;
- int status = 0;
+ int ret = 0;
unsigned i;
int base = chip->base;
struct gpio_device *gdev;
@@ -1285,7 +1289,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
if (gdev->id < 0) {
- status = gdev->id;
+ ret = gdev->id;
goto err_free_gdev;
}
dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
@@ -1301,13 +1305,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) {
- status = -ENOMEM;
+ ret = -ENOMEM;
goto err_free_ida;
}
if (chip->ngpio == 0) {
chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
- status = -EINVAL;
+ ret = -EINVAL;
goto err_free_descs;
}
@@ -1317,7 +1321,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
- status = -ENOMEM;
+ ret = -ENOMEM;
goto err_free_descs;
}
@@ -1336,7 +1340,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (base < 0) {
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
- status = base;
+ ret = base;
spin_unlock_irqrestore(&gpio_lock, flags);
goto err_free_label;
}
@@ -1350,8 +1354,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
}
gdev->base = base;
- status = gpiodev_add_to_list(gdev);
- if (status) {
+ ret = gpiodev_add_to_list(gdev);
+ if (ret) {
spin_unlock_irqrestore(&gpio_lock, flags);
goto err_free_label;
}
@@ -1365,45 +1369,50 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- status = gpiochip_set_desc_names(chip);
- if (status)
+ ret = gpiochip_set_desc_names(chip);
+ if (ret)
goto err_remove_from_list;
- status = gpiochip_irqchip_init_valid_mask(chip);
- if (status)
+ ret = gpiochip_alloc_valid_mask(chip);
+ if (ret)
goto err_remove_from_list;
- status = gpiochip_alloc_valid_mask(chip);
- if (status)
- goto err_remove_irqchip_mask;
-
- status = gpiochip_add_irqchip(chip, lock_key, request_key);
- if (status)
+ ret = of_gpiochip_add(chip);
+ if (ret)
goto err_free_gpiochip_mask;
- status = of_gpiochip_add(chip);
- if (status)
- goto err_remove_chip;
-
- status = gpiochip_init_valid_mask(chip);
- if (status)
+ ret = gpiochip_init_valid_mask(chip);
+ if (ret)
goto err_remove_of_chip;
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
- if (chip->get_direction && gpiochip_line_is_valid(chip, i))
- desc->flags = !chip->get_direction(chip, i) ?
- (1 << FLAG_IS_OUT) : 0;
- else
- desc->flags = !chip->direction_input ?
- (1 << FLAG_IS_OUT) : 0;
+ if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+ if (!chip->get_direction(chip, i))
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ } else {
+ if (!chip->direction_input)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ }
}
acpi_gpiochip_add(chip);
machine_gpiochip_add(chip);
+ ret = gpiochip_irqchip_init_valid_mask(chip);
+ if (ret)
+ goto err_remove_acpi_chip;
+
+ ret = gpiochip_add_irqchip(chip, lock_key, request_key);
+ if (ret)
+ goto err_remove_irqchip_mask;
+
/*
* By first adding the chardev, and then adding the device,
* we get a device node entry in sysfs under
@@ -1413,23 +1422,23 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
* Otherwise, defer until later.
*/
if (gpiolib_initialized) {
- status = gpiochip_setup_dev(gdev);
- if (status)
- goto err_remove_acpi_chip;
+ ret = gpiochip_setup_dev(gdev);
+ if (ret)
+ goto err_remove_irqchip;
}
return 0;
+err_remove_irqchip:
+ gpiochip_irqchip_remove(chip);
+err_remove_irqchip_mask:
+ gpiochip_irqchip_free_valid_mask(chip);
err_remove_acpi_chip:
acpi_gpiochip_remove(chip);
err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
-err_remove_chip:
- gpiochip_irqchip_remove(chip);
err_free_gpiochip_mask:
gpiochip_free_valid_mask(chip);
-err_remove_irqchip_mask:
- gpiochip_irqchip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
@@ -1444,9 +1453,9 @@ err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
gdev->base, gdev->base + gdev->ngpio - 1,
- chip->label ? : "generic", status);
+ chip->label ? : "generic", ret);
kfree(gdev);
- return status;
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
@@ -1612,21 +1621,25 @@ static struct gpio_chip *find_chip_by_name(const char *name)
* The following is irqchip helper code for gpiochips.
*/
-static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
- if (!gpiochip->irq.need_valid_mask)
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ if (!girq->init_valid_mask)
return 0;
- gpiochip->irq.valid_mask = gpiochip_allocate_mask(gpiochip);
- if (!gpiochip->irq.valid_mask)
+ girq->valid_mask = gpiochip_allocate_mask(gc);
+ if (!girq->valid_mask)
return -ENOMEM;
+ girq->init_valid_mask(gc, girq->valid_mask, gc->ngpio);
+
return 0;
}
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
{
- kfree(gpiochip->irq.valid_mask);
+ bitmap_free(gpiochip->irq.valid_mask);
gpiochip->irq.valid_mask = NULL;
}
@@ -1726,6 +1739,273 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
}
EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+
+/**
+ * gpiochip_set_hierarchical_irqchip() - connects a hierarchical irqchip
+ * to a gpiochip
+ * @gc: the gpiochip to set the irqchip hierarchical handler to
+ * @irqchip: the irqchip to handle this level of the hierarchy, the interrupt
+ * will then percolate up to the parent
+ */
+static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
+ struct irq_chip *irqchip)
+{
+ /* DT will deal with mapping each IRQ as we go along */
+ if (is_of_node(gc->irq.fwnode))
+ return;
+
+ /*
+ * This is for legacy and boardfile "irqchip" fwnodes: allocate
+ * irqs upfront instead of dynamically since we don't have the
+ * dynamic type of allocation that hardware description languages
+ * provide. Once all GPIO drivers using board files are gone from
+ * the kernel we can delete this code, but for a transitional period
+ * it is necessary to keep this around.
+ */
+ if (is_fwnode_irqchip(gc->irq.fwnode)) {
+ int i;
+ int ret;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ struct irq_fwspec fwspec;
+ unsigned int parent_hwirq;
+ unsigned int parent_type;
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ /*
+ * We call the child to parent translation function
+ * only to check if the child IRQ is valid or not.
+ * Just pick the rising edge type here as that is what
+ * we likely need to support.
+ */
+ ret = girq->child_to_parent_hwirq(gc, i,
+ IRQ_TYPE_EDGE_RISING,
+ &parent_hwirq,
+ &parent_type);
+ if (ret) {
+ chip_err(gc, "skip set-up on hwirq %d\n",
+ i);
+ continue;
+ }
+
+ fwspec.fwnode = gc->irq.fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = girq->child_offset_to_irq(gc, i);
+ /* Just pick something */
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(gc->irq.domain,
+ /* just pick something */
+ -1,
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ chip_err(gc,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ i, parent_hwirq,
+ ret);
+ }
+ }
+ }
+
+ chip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
+
+ return;
+}
+
+static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ return irq_domain_translate_twocell(d, fwspec, hwirq, type);
+ }
+
+ /* This is for board files and others not using DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ int ret;
+
+ ret = irq_domain_translate_twocell(d, fwspec, hwirq, type);
+ if (ret)
+ return ret;
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq,
+ unsigned int nr_irqs,
+ void *data)
+{
+ struct gpio_chip *gc = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ unsigned int parent_hwirq;
+ unsigned int parent_type;
+ struct gpio_irq_chip *girq = &gc->irq;
+ int ret;
+
+ /*
+ * The nr_irqs parameter is always one except for PCI multi-MSI
+ * so this should not happen.
+ */
+ WARN_ON(nr_irqs != 1);
+
+ ret = gc->irq.child_irq_domain_ops.translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ chip_info(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+
+ ret = girq->child_to_parent_hwirq(gc, hwirq, type,
+ &parent_hwirq, &parent_type);
+ if (ret) {
+ chip_err(gc, "can't look up hwirq %lu\n", hwirq);
+ return ret;
+ }
+ chip_info(gc, "found parent hwirq %u\n", parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq,
+ hwirq,
+ gc->irq.chip,
+ gc,
+ girq->handler,
+ NULL, NULL);
+ irq_set_probe(irq);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ /* This parent only handles asserted level IRQs */
+ girq->populate_parent_fwspec(gc, &parent_fwspec, parent_hwirq,
+ parent_type);
+ chip_info(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq, parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq, 1, &parent_fwspec);
+ if (ret)
+ chip_err(gc,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ parent_hwirq, hwirq);
+
+ return ret;
+}
+
+static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return offset;
+}
+
+static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops)
+{
+ ops->activate = gpiochip_irq_domain_activate;
+ ops->deactivate = gpiochip_irq_domain_deactivate;
+ ops->alloc = gpiochip_hierarchy_irq_domain_alloc;
+ ops->free = irq_domain_free_irqs_common;
+
+ /*
+ * We only allow overriding the translate() function for
+ * hierarchical chips, and this should only be done if the user
+ * really need something other than 1:1 translation.
+ */
+ if (!ops->translate)
+ ops->translate = gpiochip_hierarchy_irq_domain_translate;
+}
+
+static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+{
+ if (!gc->irq.child_to_parent_hwirq ||
+ !gc->irq.fwnode) {
+ chip_err(gc, "missing irqdomain vital data\n");
+ return -EINVAL;
+ }
+
+ if (!gc->irq.child_offset_to_irq)
+ gc->irq.child_offset_to_irq = gpiochip_child_offset_to_irq_noop;
+
+ if (!gc->irq.populate_parent_fwspec)
+ gc->irq.populate_parent_fwspec =
+ gpiochip_populate_parent_fwspec_twocell;
+
+ gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
+
+ gc->irq.domain = irq_domain_create_hierarchy(
+ gc->irq.parent_domain,
+ 0,
+ gc->ngpio,
+ gc->irq.fwnode,
+ &gc->irq.child_irq_domain_ops,
+ gc);
+
+ if (!gc->irq.domain)
+ return -ENOMEM;
+
+ gpiochip_set_hierarchical_irqchip(gc, gc->irq.chip);
+
+ return 0;
+}
+
+static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
+{
+ return !!gc->irq.parent_domain;
+}
+
+void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
+ struct irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ fwspec->param_count = 2;
+ fwspec->param[0] = parent_hwirq;
+ fwspec->param[1] = parent_type;
+}
+EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell);
+
+void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
+ struct irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ fwspec->param_count = 4;
+ fwspec->param[0] = 0;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = 0;
+ fwspec->param[3] = parent_type;
+}
+EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
+
+#else
+
+static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+{
+ return -EINVAL;
+}
+
+static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
+{
+ return false;
+}
+
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
/**
* gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
* @d: the irqdomain used by this irqchip
@@ -1740,7 +2020,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct gpio_chip *chip = d->host_data;
- int err = 0;
+ int ret = 0;
if (!gpiochip_irqchip_irq_valid(chip, hwirq))
return -ENXIO;
@@ -1758,12 +2038,12 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_noprobe(irq);
if (chip->irq.num_parents == 1)
- err = irq_set_parent(irq, chip->irq.parents[0]);
+ ret = irq_set_parent(irq, chip->irq.parents[0]);
else if (chip->irq.map)
- err = irq_set_parent(irq, chip->irq.map[hwirq]);
+ ret = irq_set_parent(irq, chip->irq.map[hwirq]);
- if (err < 0)
- return err;
+ if (ret < 0)
+ return ret;
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
@@ -1794,6 +2074,11 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/*
+ * TODO: move these activate/deactivate in under the hierarchicial
+ * irqchip implementation as static once SPMI and SSBI (all external
+ * users) are phased over.
+ */
/**
* gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ
* @domain: The IRQ domain used by this IRQ chip
@@ -1833,10 +2118,25 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
{
+ struct irq_domain *domain = chip->irq.domain;
+
if (!gpiochip_irqchip_irq_valid(chip, offset))
return -ENXIO;
- return irq_create_mapping(chip->irq.domain, offset);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ if (irq_domain_is_hierarchy(domain)) {
+ struct irq_fwspec spec;
+
+ spec.fwnode = domain->fwnode;
+ spec.param_count = 2;
+ spec.param[0] = chip->irq.child_offset_to_irq(chip, offset);
+ spec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&spec);
+ }
+#endif
+
+ return irq_create_mapping(domain, offset);
}
static int gpiochip_irq_reqres(struct irq_data *d)
@@ -1913,7 +2213,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
struct lock_class_key *request_key)
{
struct irq_chip *irqchip = gpiochip->irq.chip;
- const struct irq_domain_ops *ops;
+ const struct irq_domain_ops *ops = NULL;
struct device_node *np;
unsigned int type;
unsigned int i;
@@ -1949,16 +2249,25 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
gpiochip->irq.lock_key = lock_key;
gpiochip->irq.request_key = request_key;
- if (gpiochip->irq.domain_ops)
- ops = gpiochip->irq.domain_ops;
- else
- ops = &gpiochip_domain_ops;
-
- gpiochip->irq.domain = irq_domain_add_simple(np, gpiochip->ngpio,
- gpiochip->irq.first,
- ops, gpiochip);
- if (!gpiochip->irq.domain)
- return -EINVAL;
+ /* If a parent irqdomain is provided, let's build a hierarchy */
+ if (gpiochip_hierarchy_is_hierarchical(gpiochip)) {
+ int ret = gpiochip_hierarchy_add_domain(gpiochip);
+ if (ret)
+ return ret;
+ } else {
+ /* Some drivers provide custom irqdomain ops */
+ if (gpiochip->irq.domain_ops)
+ ops = gpiochip->irq.domain_ops;
+
+ if (!ops)
+ ops = &gpiochip_domain_ops;
+ gpiochip->irq.domain = irq_domain_add_simple(np,
+ gpiochip->ngpio,
+ gpiochip->irq.first,
+ ops, gpiochip);
+ if (!gpiochip->irq.domain)
+ return -EINVAL;
+ }
if (gpiochip->irq.parent_handler) {
void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
@@ -2321,7 +2630,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
struct gpio_chip *chip = desc->gdev->chip;
- int status;
+ int ret;
unsigned long flags;
unsigned offset;
@@ -2339,10 +2648,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
desc_set_label(desc, label ? : "?");
- status = 0;
+ ret = 0;
} else {
kfree_const(label);
- status = -EBUSY;
+ ret = -EBUSY;
goto done;
}
@@ -2351,12 +2660,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
if (gpiochip_line_is_valid(chip, offset))
- status = chip->request(chip, offset);
+ ret = chip->request(chip, offset);
else
- status = -EINVAL;
+ ret = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
- if (status < 0) {
+ if (ret < 0) {
desc_set_label(desc, NULL);
kfree_const(label);
clear_bit(FLAG_REQUESTED, &desc->flags);
@@ -2371,7 +2680,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
}
done:
spin_unlock_irqrestore(&gpio_lock, flags);
- return status;
+ return ret;
}
/*
@@ -2414,24 +2723,24 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
- int status = -EPROBE_DEFER;
+ int ret = -EPROBE_DEFER;
struct gpio_device *gdev;
VALIDATE_DESC(desc);
gdev = desc->gdev;
if (try_module_get(gdev->owner)) {
- status = gpiod_request_commit(desc, label);
- if (status < 0)
+ ret = gpiod_request_commit(desc, label);
+ if (ret < 0)
module_put(gdev->owner);
else
get_device(&gdev->dev);
}
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+ if (ret)
+ gpiod_dbg(desc, "%s: status %d\n", __func__, ret);
- return status;
+ return ret;
}
static bool gpiod_free_commit(struct gpio_desc *desc)
@@ -2533,22 +2842,22 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
enum gpiod_flags dflags)
{
struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
- int err;
+ int ret;
if (IS_ERR(desc)) {
chip_err(chip, "failed to get GPIO descriptor\n");
return desc;
}
- err = gpiod_request_commit(desc, label);
- if (err < 0)
- return ERR_PTR(err);
+ ret = gpiod_request_commit(desc, label);
+ if (ret < 0)
+ return ERR_PTR(ret);
- err = gpiod_configure_flags(desc, label, lflags, dflags);
- if (err) {
+ ret = gpiod_configure_flags(desc, label, lflags, dflags);
+ if (ret) {
chip_err(chip, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
return desc;
@@ -2611,7 +2920,7 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
int gpiod_direction_input(struct gpio_desc *desc)
{
struct gpio_chip *chip;
- int status = 0;
+ int ret = 0;
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
@@ -2635,7 +2944,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
* assume we are in input mode after this.
*/
if (chip->direction_input) {
- status = chip->direction_input(chip, gpio_chip_hwgpio(desc));
+ ret = chip->direction_input(chip, gpio_chip_hwgpio(desc));
} else if (chip->get_direction &&
(chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) {
gpiod_warn(desc,
@@ -2643,7 +2952,7 @@ int gpiod_direction_input(struct gpio_desc *desc)
__func__);
return -EIO;
}
- if (status == 0)
+ if (ret == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
if (test_bit(FLAG_PULL_UP, &desc->flags))
@@ -2653,9 +2962,9 @@ int gpiod_direction_input(struct gpio_desc *desc)
gpio_set_config(chip, gpio_chip_hwgpio(desc),
PIN_CONFIG_BIAS_PULL_DOWN);
- trace_gpio_direction(desc_to_gpio(desc), 1, status);
+ trace_gpio_direction(desc_to_gpio(desc), 1, ret);
- return status;
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
@@ -2927,7 +3236,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- int err, i = 0;
+ int ret, i = 0;
/*
* Validate array_info against desc_array and its size.
@@ -2940,11 +3249,11 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
if (!can_sleep)
WARN_ON(array_info->chip->can_sleep);
- err = gpio_chip_get_multiple(array_info->chip,
+ ret = gpio_chip_get_multiple(array_info->chip,
array_info->get_mask,
value_bitmap);
- if (err)
- return err;
+ if (ret)
+ return ret;
if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
bitmap_xor(value_bitmap, value_bitmap,
@@ -3132,24 +3441,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
*/
static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
- int err = 0;
+ int ret = 0;
struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- err = chip->direction_input(chip, offset);
- if (!err)
+ ret = chip->direction_input(chip, offset);
+ if (!ret)
clear_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_output(chip, offset, 0);
- if (!err)
+ ret = chip->direction_output(chip, offset, 0);
+ if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(desc_to_gpio(desc), value, err);
- if (err < 0)
+ trace_gpio_direction(desc_to_gpio(desc), value, ret);
+ if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open drain err %d\n",
- __func__, err);
+ __func__, ret);
}
/*
@@ -3159,24 +3468,24 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
*/
static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
- int err = 0;
+ int ret = 0;
struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
- err = chip->direction_output(chip, offset, 1);
- if (!err)
+ ret = chip->direction_output(chip, offset, 1);
+ if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- err = chip->direction_input(chip, offset);
- if (!err)
+ ret = chip->direction_input(chip, offset);
+ if (!ret)
clear_bit(FLAG_IS_OUT, &desc->flags);
}
- trace_gpio_direction(desc_to_gpio(desc), !value, err);
- if (err < 0)
+ trace_gpio_direction(desc_to_gpio(desc), !value, ret);
+ if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open source err %d\n",
- __func__, err);
+ __func__, ret);
}
static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
@@ -3993,27 +4302,6 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
}
-static int dt_gpio_count(struct device *dev, const char *con_id)
-{
- int ret;
- char propname[32];
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id)
- snprintf(propname, sizeof(propname), "%s-%s",
- con_id, gpio_suffixes[i]);
- else
- snprintf(propname, sizeof(propname), "%s",
- gpio_suffixes[i]);
-
- ret = of_gpio_named_count(dev->of_node, propname);
- if (ret > 0)
- break;
- }
- return ret ? ret : -ENOENT;
-}
-
static int platform_gpio_count(struct device *dev, const char *con_id)
{
struct gpiod_lookup_table *table;
@@ -4046,7 +4334,7 @@ int gpiod_count(struct device *dev, const char *con_id)
int count = -ENOENT;
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
- count = dt_gpio_count(dev, con_id);
+ count = of_gpio_get_count(dev, con_id);
else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev))
count = acpi_gpio_count(dev, con_id);
@@ -4108,7 +4396,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
- int status;
+ int ret;
if (lflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -4141,9 +4429,9 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
else if (lflags & GPIO_PULL_DOWN)
set_bit(FLAG_PULL_DOWN, &desc->flags);
- status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
- if (status < 0)
- return status;
+ ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+ if (ret < 0)
+ return ret;
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -4153,12 +4441,12 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* Process flags */
if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
- status = gpiod_direction_output(desc,
+ ret = gpiod_direction_output(desc,
!!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
else
- status = gpiod_direction_input(desc);
+ ret = gpiod_direction_input(desc);
- return status;
+ return ret;
}
/**
@@ -4182,7 +4470,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = NULL;
- int status;
+ int ret;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
@@ -4217,9 +4505,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* If a connection label was passed use that, else attempt to use
* the device name as label
*/
- status = gpiod_request(desc, con_id ? con_id : devname);
- if (status < 0) {
- if (status == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
+ ret = gpiod_request(desc, con_id ? con_id : devname);
+ if (ret < 0) {
+ if (ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
/*
* This happens when there are several consumers for
* the same GPIO line: we just return here without
@@ -4232,89 +4520,20 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
con_id ? con_id : devname);
return desc;
} else {
- return ERR_PTR(status);
+ return ERR_PTR(ret);
}
}
- status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
- if (status < 0) {
- dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
- gpiod_put(desc);
- return ERR_PTR(status);
- }
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(gpiod_get_index);
-
-/**
- * gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @node: handle of the OF node
- * @propname: name of the DT property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- struct gpio_desc *desc;
- enum of_gpio_flags flags;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
- bool transitory = false;
- int ret;
-
- desc = of_get_named_gpiod_flags(node, propname,
- index, &flags);
-
- if (!desc || IS_ERR(desc)) {
- return desc;
- }
-
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- transitory = flags & OF_GPIO_TRANSITORY;
-
- ret = gpiod_request(desc, label);
- if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
- return desc;
- if (ret)
- return ERR_PTR(ret);
-
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
- if (transitory)
- lflags |= GPIO_TRANSITORY;
-
- ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (ret < 0) {
+ dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
return ERR_PTR(ret);
}
return desc;
}
-EXPORT_SYMBOL(gpiod_get_from_of_node);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -4424,7 +4643,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
struct gpio_chip *chip;
struct gpio_desc *local_desc;
int hwnum;
- int status;
+ int ret;
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
@@ -4432,10 +4651,10 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
local_desc = gpiochip_request_own_desc(chip, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
- status = PTR_ERR(local_desc);
+ ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
- name, chip->label, hwnum, status);
- return status;
+ name, chip->label, hwnum, ret);
+ return ret;
}
/* Mark GPIO as hogged so it can be identified and removed later */
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 7c52c2442173..b8b10a409c7b 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,9 +16,6 @@
#include <linux/module.h>
#include <linux/cdev.h>
-enum of_gpio_flags;
-struct acpi_device;
-
/**
* struct gpio_device - internal state container for GPIO devices
* @id: numerical ID number for the GPIO chip
@@ -69,126 +66,9 @@ struct gpio_device {
#endif
};
-/**
- * struct acpi_gpio_info - ACPI GPIO specific information
- * @adev: reference to ACPI device which consumes GPIO resource
- * @flags: GPIO initialization flags
- * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
- * @pin_config: pin bias as provided by ACPI
- * @polarity: interrupt polarity as provided by ACPI
- * @triggering: triggering type as provided by ACPI
- * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
- */
-struct acpi_gpio_info {
- struct acpi_device *adev;
- enum gpiod_flags flags;
- bool gpioint;
- int pin_config;
- int polarity;
- int triggering;
- unsigned int quirks;
-};
-
/* gpio suffixes used for ACPI and device tree lookup */
static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
-#ifdef CONFIG_OF_GPIO
-struct gpio_desc *of_find_gpio(struct device *dev,
- const char *con_id,
- unsigned int idx,
- unsigned long *lookupflags);
-struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
-int of_gpiochip_add(struct gpio_chip *gc);
-void of_gpiochip_remove(struct gpio_chip *gc);
-#else
-static inline struct gpio_desc *of_find_gpio(struct device *dev,
- const char *con_id,
- unsigned int idx,
- unsigned long *lookupflags)
-{
- return ERR_PTR(-ENOENT);
-}
-static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags)
-{
- return ERR_PTR(-ENOENT);
-}
-static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
-static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
-#endif /* CONFIG_OF_GPIO */
-
-#ifdef CONFIG_ACPI
-void acpi_gpiochip_add(struct gpio_chip *chip);
-void acpi_gpiochip_remove(struct gpio_chip *chip);
-
-void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
-
-int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- struct acpi_gpio_info *info);
-int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
- struct acpi_gpio_info *info);
-
-struct gpio_desc *acpi_find_gpio(struct device *dev,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags *dflags,
- unsigned long *lookupflags);
-struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- struct acpi_gpio_info *info);
-
-int acpi_gpio_count(struct device *dev, const char *con_id);
-
-bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id);
-#else
-static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
-static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
-
-static inline void
-acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
-
-static inline void
-acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
-
-static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
-{
- return 0;
-}
-static inline int
-acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
- struct acpi_gpio_info *info)
-{
- return 0;
-}
-
-static inline struct gpio_desc *
-acpi_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx, enum gpiod_flags *dflags,
- unsigned long *lookupflags)
-{
- return ERR_PTR(-ENOENT);
-}
-static inline struct gpio_desc *
-acpi_node_get_gpiod(struct fwnode_handle *fwnode, const char *propname,
- int index, struct acpi_gpio_info *info)
-{
- return ERR_PTR(-ENXIO);
-}
-static inline int acpi_gpio_count(struct device *dev, const char *con_id)
-{
- return -ENODEV;
-}
-
-static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
- const char *con_id)
-{
- return false;
-}
-#endif
-
struct gpio_array {
struct gpio_desc **desc;
unsigned int size;
diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c
new file mode 100644
index 000000000000..7e99860ca447
--- /dev/null
+++ b/drivers/gpio/sgpio-aspeed.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2019 American Megatrends International LLC.
+ *
+ * Author: Karthikeyan Mani <karthikeyanm@amiindia.co.in>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#define MAX_NR_SGPIO 80
+
+#define ASPEED_SGPIO_CTRL 0x54
+
+#define ASPEED_SGPIO_PINS_MASK GENMASK(9, 6)
+#define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16)
+#define ASPEED_SGPIO_ENABLE BIT(0)
+
+struct aspeed_sgpio {
+ struct gpio_chip chip;
+ struct clk *pclk;
+ spinlock_t lock;
+ void __iomem *base;
+ uint32_t dir_in[3];
+ int irq;
+};
+
+struct aspeed_sgpio_bank {
+ uint16_t val_regs;
+ uint16_t rdata_reg;
+ uint16_t irq_regs;
+ const char names[4][3];
+};
+
+/*
+ * Note: The "value" register returns the input value when the GPIO is
+ * configured as an input.
+ *
+ * The "rdata" register returns the output value when the GPIO is
+ * configured as an output.
+ */
+static const struct aspeed_sgpio_bank aspeed_sgpio_banks[] = {
+ {
+ .val_regs = 0x0000,
+ .rdata_reg = 0x0070,
+ .irq_regs = 0x0004,
+ .names = { "A", "B", "C", "D" },
+ },
+ {
+ .val_regs = 0x001C,
+ .rdata_reg = 0x0074,
+ .irq_regs = 0x0020,
+ .names = { "E", "F", "G", "H" },
+ },
+ {
+ .val_regs = 0x0038,
+ .rdata_reg = 0x0078,
+ .irq_regs = 0x003C,
+ .names = { "I", "J" },
+ },
+};
+
+enum aspeed_sgpio_reg {
+ reg_val,
+ reg_rdata,
+ reg_irq_enable,
+ reg_irq_type0,
+ reg_irq_type1,
+ reg_irq_type2,
+ reg_irq_status,
+};
+
+#define GPIO_VAL_VALUE 0x00
+#define GPIO_IRQ_ENABLE 0x00
+#define GPIO_IRQ_TYPE0 0x04
+#define GPIO_IRQ_TYPE1 0x08
+#define GPIO_IRQ_TYPE2 0x0C
+#define GPIO_IRQ_STATUS 0x10
+
+static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
+ const struct aspeed_sgpio_bank *bank,
+ const enum aspeed_sgpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return gpio->base + bank->val_regs + GPIO_VAL_VALUE;
+ case reg_rdata:
+ return gpio->base + bank->rdata_reg;
+ case reg_irq_enable:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE;
+ case reg_irq_type0:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0;
+ case reg_irq_type1:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1;
+ case reg_irq_type2:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2;
+ case reg_irq_status:
+ return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
+ default:
+ /* acturally if code runs to here, it's an error case */
+ BUG_ON(1);
+ }
+}
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_OFFSET(x) ((x) & 0x1f)
+#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
+
+static const struct aspeed_sgpio_bank *to_bank(unsigned int offset)
+{
+ unsigned int bank = GPIO_BANK(offset);
+
+ WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks));
+ return &aspeed_sgpio_banks[bank];
+}
+
+static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_sgpio_bank *bank = to_bank(offset);
+ unsigned long flags;
+ enum aspeed_sgpio_reg reg;
+ bool is_input;
+ int rc = 0;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
+ reg = is_input ? reg_val : reg_rdata;
+ rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return rc;
+}
+
+static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ const struct aspeed_sgpio_bank *bank = to_bank(offset);
+ void __iomem *addr;
+ u32 reg = 0;
+
+ addr = bank_reg(gpio, bank, reg_val);
+ reg = ioread32(addr);
+
+ if (val)
+ reg |= GPIO_BIT(offset);
+ else
+ reg &= ~GPIO_BIT(offset);
+
+ iowrite32(reg, addr);
+}
+
+static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ sgpio_set_value(gc, offset, val);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset);
+ sgpio_set_value(gc, offset, val);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ int dir_status;
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return dir_status;
+
+}
+
+static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
+ struct aspeed_sgpio **gpio,
+ const struct aspeed_sgpio_bank **bank,
+ u32 *bit, int *offset)
+{
+ struct aspeed_sgpio *internal;
+
+ *offset = irqd_to_hwirq(d);
+ internal = irq_data_get_irq_chip_data(d);
+ WARN_ON(!internal);
+
+ *gpio = internal;
+ *bank = to_bank(*offset);
+ *bit = GPIO_BIT(*offset);
+}
+
+static void aspeed_sgpio_irq_ack(struct irq_data *d)
+{
+ const struct aspeed_sgpio_bank *bank;
+ struct aspeed_sgpio *gpio;
+ unsigned long flags;
+ void __iomem *status_addr;
+ int offset;
+ u32 bit;
+
+ irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+
+ status_addr = bank_reg(gpio, bank, reg_irq_status);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ iowrite32(bit, status_addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
+{
+ const struct aspeed_sgpio_bank *bank;
+ struct aspeed_sgpio *gpio;
+ unsigned long flags;
+ u32 reg, bit;
+ void __iomem *addr;
+ int offset;
+
+ irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+ addr = bank_reg(gpio, bank, reg_irq_enable);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ reg = ioread32(addr);
+ if (set)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ iowrite32(reg, addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void aspeed_sgpio_irq_mask(struct irq_data *d)
+{
+ aspeed_sgpio_irq_set_mask(d, false);
+}
+
+static void aspeed_sgpio_irq_unmask(struct irq_data *d)
+{
+ aspeed_sgpio_irq_set_mask(d, true);
+}
+
+static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
+{
+ u32 type0 = 0;
+ u32 type1 = 0;
+ u32 type2 = 0;
+ u32 bit, reg;
+ const struct aspeed_sgpio_bank *bank;
+ irq_flow_handler_t handler;
+ struct aspeed_sgpio *gpio;
+ unsigned long flags;
+ void __iomem *addr;
+ int offset;
+
+ irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ type2 |= bit;
+ /* fall through */
+ case IRQ_TYPE_EDGE_RISING:
+ type0 |= bit;
+ /* fall through */
+ case IRQ_TYPE_EDGE_FALLING:
+ handler = handle_edge_irq;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ type0 |= bit;
+ /* fall through */
+ case IRQ_TYPE_LEVEL_LOW:
+ type1 |= bit;
+ handler = handle_level_irq;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ addr = bank_reg(gpio, bank, reg_irq_type0);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type0;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type1);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type1;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type2);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type2;
+ iowrite32(reg, addr);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ irq_set_handler_locked(d, handler);
+
+ return 0;
+}
+
+static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ struct aspeed_sgpio *data = gpiochip_get_data(gc);
+ unsigned int i, p, girq;
+ unsigned long reg;
+
+ chained_irq_enter(ic, desc);
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+ const struct aspeed_sgpio_bank *bank = &aspeed_sgpio_banks[i];
+
+ reg = ioread32(bank_reg(data, bank, reg_irq_status));
+
+ for_each_set_bit(p, &reg, 32) {
+ girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
+ generic_handle_irq(girq);
+ }
+
+ }
+
+ chained_irq_exit(ic, desc);
+}
+
+static struct irq_chip aspeed_sgpio_irqchip = {
+ .name = "aspeed-sgpio",
+ .irq_ack = aspeed_sgpio_irq_ack,
+ .irq_mask = aspeed_sgpio_irq_mask,
+ .irq_unmask = aspeed_sgpio_irq_unmask,
+ .irq_set_type = aspeed_sgpio_set_type,
+};
+
+static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
+ struct platform_device *pdev)
+{
+ int rc, i;
+ const struct aspeed_sgpio_bank *bank;
+ struct gpio_irq_chip *irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ return rc;
+
+ gpio->irq = rc;
+
+ /* Disable IRQ and clear Interrupt status registers for all SPGIO Pins. */
+ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+ bank = &aspeed_sgpio_banks[i];
+ /* disable irq enable bits */
+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
+ /* clear status bits */
+ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
+ }
+
+ irq = &gpio->chip.irq;
+ irq->chip = &aspeed_sgpio_irqchip;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = aspeed_sgpio_irq_handler;
+ irq->parent_handler_data = gpio;
+ irq->parents = &gpio->irq;
+ irq->num_parents = 1;
+
+ /* set IRQ settings and Enable Interrupt */
+ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+ bank = &aspeed_sgpio_banks[i];
+ /* set falling or level-low irq */
+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
+ /* trigger type is edge */
+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
+ /* dual edge trigger mode. */
+ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2));
+ /* enable irq */
+ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable));
+ }
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_sgpio_of_table[] = {
+ { .compatible = "aspeed,ast2400-sgpio" },
+ { .compatible = "aspeed,ast2500-sgpio" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table);
+
+static int __init aspeed_sgpio_probe(struct platform_device *pdev)
+{
+ struct aspeed_sgpio *gpio;
+ u32 nr_gpios, sgpio_freq, sgpio_clk_div;
+ int rc;
+ unsigned long apb_freq;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "ngpios", &nr_gpios);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Could not read ngpios property\n");
+ return -EINVAL;
+ } else if (nr_gpios > MAX_NR_SGPIO) {
+ dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n",
+ MAX_NR_SGPIO, nr_gpios);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Could not read bus-frequency property\n");
+ return -EINVAL;
+ }
+
+ gpio->pclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gpio->pclk)) {
+ dev_err(&pdev->dev, "devm_clk_get failed\n");
+ return PTR_ERR(gpio->pclk);
+ }
+
+ apb_freq = clk_get_rate(gpio->pclk);
+
+ /*
+ * From the datasheet,
+ * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
+ * period = 2 * (GPIO254[31:16] + 1) / PCLK
+ * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
+ * frequency = PCLK / (2 * (GPIO254[31:16] + 1))
+ * frequency * 2 * (GPIO254[31:16] + 1) = PCLK
+ * GPIO254[31:16] = PCLK / (frequency * 2) - 1
+ */
+ if (sgpio_freq == 0)
+ return -EINVAL;
+
+ sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
+
+ if (sgpio_clk_div > (1 << 16) - 1)
+ return -EINVAL;
+
+ iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) |
+ FIELD_PREP(ASPEED_SGPIO_PINS_MASK, (nr_gpios / 8)) |
+ ASPEED_SGPIO_ENABLE,
+ gpio->base + ASPEED_SGPIO_CTRL);
+
+ spin_lock_init(&gpio->lock);
+
+ gpio->chip.parent = &pdev->dev;
+ gpio->chip.ngpio = nr_gpios;
+ gpio->chip.direction_input = aspeed_sgpio_dir_in;
+ gpio->chip.direction_output = aspeed_sgpio_dir_out;
+ gpio->chip.get_direction = aspeed_sgpio_get_direction;
+ gpio->chip.request = NULL;
+ gpio->chip.free = NULL;
+ gpio->chip.get = aspeed_sgpio_get;
+ gpio->chip.set = aspeed_sgpio_set;
+ gpio->chip.set_config = NULL;
+ gpio->chip.label = dev_name(&pdev->dev);
+ gpio->chip.base = -1;
+
+ /* set all SGPIO pins as input (1). */
+ memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in));
+
+ aspeed_sgpio_setup_irqs(gpio, pdev);
+
+ rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static struct platform_driver aspeed_sgpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_sgpio_of_table,
+ },
+};
+
+module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe);
+MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1d80222587ad..3c88420e3497 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -394,7 +394,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
# !PREEMPT because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d3ee9c42f7e..6a5c96e519b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12) ?
+ adev->asic_type != CHIP_POLARIS12 &&
+ adev->asic_type != CHIP_VEGAM) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e069de8b54e6..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
return r;
}
- fence = amdgpu_ctx_get_fence(ctx, entity,
- deps[i].handle);
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+ struct drm_sched_fence *s_fence;
struct dma_fence *old = fence;
+ s_fence = to_drm_sched_fence(fence);
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(old);
}
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ dma_fence_put(fence);
+ if (r)
return r;
- } else if (fence) {
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
- true);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
}
return 0;
}
@@ -1145,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
@@ -1168,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk
- *chunk)
+ struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
@@ -1179,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ if (p->post_deps)
+ return -EINVAL;
+
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..7398b4850649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
- struct dma_fence *other = centity->fences[idx];
+ struct dma_fence *other;
+ unsigned idx;
+ long r;
- if (other) {
- signed long r;
- r = dma_fence_wait(other, true);
- if (r < 0) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ spin_lock(&ctx->ring_lock);
+ idx = centity->sequence & (amdgpu_sched_jobs - 1);
+ other = dma_fence_get(centity->fences[idx]);
+ spin_unlock(&ctx->ring_lock);
- return r;
- }
- }
+ if (!other)
+ return 0;
- return 0;
+ r = dma_fence_wait(other, true);
+ if (r < 0 && r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
+ dma_fence_put(other);
+ return r;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6d54decef7f8..5652cc72ed3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2e8b4238efd..5376328d3fd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xffffffff;
+uint amdgpu_ras_mask = 0xfffffffb;
/**
* DOC: vramlimit (int)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index df8a23554831..f6ac1e9548f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -32,7 +32,6 @@ struct amdgpu_gds {
uint32_t gws_size;
uint32_t oa_size;
uint32_t gds_compute_max_wave_id;
- uint32_t vgt_gs_max_wave_id;
};
struct amdgpu_gds_reg_offset {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8b7efd0a7028..2b546567853b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
- else if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev)) {
+ if (adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else
+ pm = adev->pm.dpm.user_state;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state) {
pm = amdgpu_dpm_get_current_power_state(adev);
- else
+ } else {
pm = adev->pm.dpm.user_state;
+ }
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.user_state = state;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return -EINVAL;
if (is_support_sw_smu(adev)) {
- err = smu_get_current_rpm(&adev->smu, &speed);
+ err = smu_get_fan_speed_rpm(&adev->smu, &speed);
if (err)
return err;
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return -EINVAL;
if (is_support_sw_smu(adev)) {
- err = smu_get_current_rpm(&adev->smu, &rpm);
+ err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
if (err)
return err;
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ if (adev->asic_type > CHIP_VEGA20) {
+ /* VCN clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCN: Disabled\n");
+ } else {
+ seq_printf(m, "VCN: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
}
- }
- seq_printf(m, "\n");
+ seq_printf(m, "\n");
+ } else {
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a4412e47810..fac7aa2c244f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
struct amdgpu_bo **bo_ptr);
-static void amdgpu_ras_self_test(struct amdgpu_device *adev)
-{
- /* TODO */
-}
-
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
if (!obj)
return -EINVAL;
+ if (block_info.block_id != TA_RAS_BLOCK__UMC) {
+ DRM_INFO("%s error injection is not supported yet\n",
+ ras_block_str(info->head.block));
+ return -EINVAL;
+ }
+
ret = psp_ras_trigger_error(&adev->psp, &block_info);
if (ret)
DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
@@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
+ if (!con->hw_supported) {
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+ return 0;
+ }
+
con->features = 0;
INIT_LIST_HEAD(&con->head);
/* Might need get this flag from vbios. */
@@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- amdgpu_ras_self_test(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 99f14fcc1460..19661c645703 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -30,6 +30,7 @@
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
#define AMDGPU_VCN_MAX_ENC_RINGS 3
+#define VCN_DEC_KMD_CMD 0x80000000
#define VCN_DEC_CMD_FENCE 0x00000000
#define VCN_DEC_CMD_TRAP 0x00000001
#define VCN_DEC_CMD_WRITE_REG 0x00000004
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1675d5837c3c..f41287f9000d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
}
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
+ }
}
static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
@@ -4197,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
- * This resets the wave ID counters. (needed by transform feedback)
- * TODO: This might only be needed on a VMID switch when we change
- * the GDS OA mapping, not sure.
- */
- amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
- amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
-
if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
else
@@ -4611,6 +4611,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 0);
WREG32(cp_int_cntl_reg, cp_int_cntl);
+ break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
@@ -4951,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2, /* SWITCH_BUFFER */
- .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
+ .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
@@ -5102,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
default:
adev->gds.gds_size = 0x10000;
adev->gds.gds_compute_max_wave_id = 0x4ff;
- adev->gds.vgt_gs_max_wave_id = 0x3ff;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0db9f488da7e..21187275dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[i].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[i].oa, 0);
+ }
}
static void gfx_v7_0_config_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5f401b41ef7c..ee1ccdcf2d30 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return 0;
}
+static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (!r)
+ adev->gfx.rlc.clear_state_gpu_addr =
+ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return r;
+}
+
+static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!adev->gfx.rlc.clear_state_obj)
+ return;
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+ if (likely(r == 0)) {
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+}
+
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -3706,6 +3739,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[i].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[i].oa, 0);
+ }
}
static void gfx_v8_0_config_init(struct amdgpu_device *adev)
@@ -4776,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
+ r = gfx_v8_0_csb_vram_pin(adev);
+ if (r)
+ return r;
+
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4892,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle)
else
pr_err("rlc is busy, skip halt rlc\n");
amdgpu_gfx_rlc_exit_safe_mode(adev);
+
+ gfx_v8_0_csb_vram_unpin(adev);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f4c4eea62526..c066e1d3f981 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- break;
- if ((adev->gfx.rlc_fw_version != 106 &&
- adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
- (adev->gfx.rlc_feature_version < 1) ||
- !adev->gfx.rlc.is_rlc_v2_1)
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
break;
@@ -1918,6 +1922,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
}
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
+ }
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -4860,7 +4873,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- WREG32(mmSQ_CMD, value);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_BIF_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
- AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
-
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_CP |
- AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 988c0adaca91..dfde886cc6bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
offset = size;
- /* No signed header for now from firmware
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
- */
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
}
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
@@ -1488,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}
/**
@@ -1501,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}
/**
@@ -1546,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
amdgpu_ring_write(ring, 0);
@@ -1556,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}
/**
@@ -1600,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}
static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -1629,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
- amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}
/**
@@ -2082,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 4);
+ if (r)
+ return r;
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
+ amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->vcn.external.scratch9);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+
static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -2145,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
- .test_ring = amdgpu_vcn_dec_ring_test_ring,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 26b15cc56c31..1d3cd5c50d5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1567,32 +1567,6 @@ copy_from_user_failed:
return err;
}
-static int kfd_ioctl_alloc_queue_gws(struct file *filep,
- struct kfd_process *p, void *data)
-{
- int retval;
- struct kfd_ioctl_alloc_queue_gws_args *args = data;
- struct kfd_dev *dev;
-
- if (!hws_gws_support)
- return -ENODEV;
-
- dev = kfd_device_by_id(args->gpu_id);
- if (!dev) {
- pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
- return -ENODEV;
- }
- if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
- return -ENODEV;
-
- mutex_lock(&p->mutex);
- retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
- mutex_unlock(&p->mutex);
-
- args->first_gws = 0;
- return retval;
-}
-
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
- AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
- kfd_ioctl_alloc_queue_gws, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 792371442195..4e3fc284f6ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case CHIP_RAVEN:
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+ break;
case CHIP_NAVI10:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4f8a6ffc5775..9cd3eb2d90bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- pr_debug("%s@%i\n", __func__, __LINE__);
case KFD_MQD_TYPE_COMPUTE:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
const struct drm_connector_state *state)
{
- uint32_t bpc = connector->display_info.bpc;
+ uint8_t bpc = (uint8_t)connector->display_info.bpc;
+
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
if (!state)
state = connector->state;
if (state) {
- bpc = state->max_bpc;
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min(bpc, state->max_requested_bpc);
+
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index c1a92c16535c..5cc3acccda2a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -262,12 +262,12 @@ void dce110_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 778392c73187..7c746ef1e32e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -226,12 +226,12 @@ void dce112_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
index 906310c3e2eb..5399b8cf6b75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
@@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = {
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->base.dprefclk_khz = 600000;
clk_mgr->base.funcs = &dce120_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 08a774fc7b67..50bfb5921de0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
void dcn2_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
}
void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct(
struct dccg *dccg)
{
clk_mgr->base.ctx = ctx;
+ clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &dcn2_funcs;
clk_mgr->regs = &clk_mgr_regs;
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4ef4dc63e221..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
*/
#include <linux/slab.h>
+#include <linux/mm.h>
#include "dm_services.h"
@@ -502,8 +503,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
static void destruct(struct dc *dc)
{
- dc_release_state(dc->current_state);
- dc->current_state = NULL;
+ if (dc->current_state) {
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+ }
destroy_links(dc);
@@ -1169,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
struct dc_state *dc_create_state(struct dc *dc)
{
- struct dc_state *context = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
if (!context)
return NULL;
@@ -1190,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
- struct dc_state *new_ctx = kmemdup(src_ctx,
- sizeof(struct dc_state), GFP_KERNEL);
+ struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
if (!new_ctx)
return NULL;
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1228,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
- kfree(context);
+ kvfree(context);
}
void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 8dbf759eba45..355b4ba12796 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
+ union max_down_spread max_down_spread = { {0} };
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
msleep(8);
}
- ASSERT(status == DC_OK);
-
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
&link_bw_set, sizeof(link_bw_set));
@@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
link->cur_link_settings.link_rate = link_bw_set;
link->cur_link_settings.use_link_rate_set = false;
}
+ // Read DPCD 00003h to find the max down spread.
+ core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
+ &max_down_spread.raw, sizeof(max_down_spread));
+ link->cur_link_settings.link_spread =
+ max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
static bool detect_dp(
@@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
return false;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* On detect, we want to make sure current link settings are
- * up to date, especially if link was powered on by GOP.
- */
- read_edp_current_link_settings_on_detect(link);
- }
-
prev_sink = link->local_sink;
if (prev_sink != NULL) {
dc_sink_retain(prev_sink);
@@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
+ read_edp_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (core_dc->current_state->res_ctx.
pipe_ctx[i].stream->link
- == link)
+ == link) {
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
@@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
core_dc->current_state->
res_ctx.pipe_ctx[i].stream_res.tg->inst +
1;
+
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
+ frame_ramp = 0;
+ }
}
}
abm->funcs->set_backlight_level_pwm(
@@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
/* Retrain with preferred link settings only relevant for
* DP signal type
+ * Check for non-DP signal or if passive dongle present
*/
- if (!dc_is_dp_signal(link->connector_signal))
+ if (!dc_is_dp_signal(link->connector_signal) ||
+ link->dongle_max_pix_clk > 0)
return;
for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 056be4c34a98..2c7aaed907b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2230,18 +2230,25 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
ddc_service_set_dongle_type(link->ddc,
link->dpcd_caps.dongle_type);
+ link->dpcd_caps.is_branch_dev = false;
return;
}
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+ if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
+ link->dpcd_caps.is_branch_dev = false;
+ }
+
+ else {
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+ }
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
break;
- case DOWNSTREAM_DVI_HDMI:
- /* At this point we don't know is it DVI or HDMI,
+ case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
+ /* At this point we don't know is it DVI or HDMI or DP++,
* assume DVI.*/
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
break;
@@ -2258,6 +2265,10 @@ static void get_active_converter_info(
det_caps, sizeof(det_caps));
switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ /*Handle DP case as DONGLE_NONE*/
+ case DOWN_STREAM_DETAILED_DP:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
case DOWN_STREAM_DETAILED_VGA:
link->dpcd_caps.dongle_type =
DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2267,6 +2278,8 @@ static void get_active_converter_info(
DISPLAY_DONGLE_DP_DVI_CONVERTER;
break;
case DOWN_STREAM_DETAILED_HDMI:
+ case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
+ /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
link->dpcd_caps.dongle_type =
DISPLAY_DONGLE_DP_HDMI_CONVERTER;
@@ -2282,14 +2295,18 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
- hdmi_caps.bits.YCrCr422_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
- hdmi_caps.bits.YCrCr420_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
- hdmi_caps.bits.YCrCr422_CONVERSION;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
- hdmi_caps.bits.YCrCr420_CONVERSION;
+ /*YCBCR capability only for HDMI case*/
+ if (port_caps->bits.DWN_STRM_PORTX_TYPE
+ == DOWN_STREAM_DETAILED_HDMI) {
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+ }
link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
translate_dpcd_max_bpc(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 51a78283a86d..2ceaab4fb5de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -258,7 +258,7 @@ bool resource_construct(
* PORT_CONNECTIVITY == 1 (as instructed by HW team).
*/
update_num_audio(&straps, &num_audio, &pool->audio_support);
- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ for (i = 0; i < caps->num_audio; i++) {
struct audio *aud = create_funcs->create_audio(ctx, i);
if (aud == NULL) {
@@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio(
return pool->audios[i];
}
}
+
+ /* use engine id to find free audio */
+ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ return pool->audios[id];
+ }
+
/*not found the matching one, first come first serve*/
for (i = 0; i < pool->audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
@@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
pix_clk /= 2;
if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
normalized_pix_clk = pix_clk;
break;
@@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count) {
+ stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index af7f8be230f7..352862370390 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
- if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
+ if (pipe_ctx->stream_res.stream_enc &&
+ pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
/* if using dynamic meta, don't set up generic infopackets */
pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index f8903bcabe49..58bd131d5b48 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -239,6 +239,10 @@ static void dmcu_set_backlight_level(
s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
REG_WRITE(BIOS_SCRATCH_2, s2);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
}
static void dce_abm_init(struct abm *abm)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 858a58856ebd..fafb4b470140 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -965,11 +965,17 @@ void hwss_edp_backlight_control(
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc;
struct pp_smu_funcs *pp_smu = NULL;
- struct clk_mgr *clk_mgr = core_dc->clk_mgr;
+ struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
+ if (!pipe_ctx->stream)
+ return;
+
+ core_dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = core_dc->clk_mgr;
+
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
@@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc;
struct pp_smu_funcs *pp_smu = NULL;
- struct clk_mgr *clk_mgr = dc->clk_mgr;
+ struct clk_mgr *clk_mgr;
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ return;
+
+ dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = dc->clk_mgr;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return;
@@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ pipe_ctx->stream_res.audio->enabled = false;
+
if (dc->res_pool->pp_smu)
pp_smu = dc->res_pool->pp_smu;
@@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
/* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
* stream->stream_engine_id);
*/
- if (pipe_ctx->stream_res.audio)
- pipe_ctx->stream_res.audio->enabled = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e50a696fcb5d..2118ea21d7e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct hubp *hubp = dc->res_pool->hubps[i];
- struct dpp *dpp = dc->res_pool->dpps[i];
-
- hubp->funcs->hubp_init(hubp);
- dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
- plane_atomic_power_down(dc, dpp, hubp);
- }
-
- apply_DEGVIDCN10_253_wa(dc);
+ dc->hwss.init_pipes(dc, dc->current_state);
}
for (i = 0; i < dc->res_pool->audio_count; i++) {
@@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-
-
-
-
static bool
dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
@@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface(
if (removed_pipe[i])
dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (removed_pipe[i]) {
+ dc->hwss.optimize_bandwidth(dc, context);
+ break;
+ }
+
if (dc->hwseq->wa.DEGVIDCN10_254)
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1a20461c2937..a12530a3ab9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = {
.num_audio = 3,
.num_stream_encoder = 3,
.num_pll = 3,
- .num_ddc = 3,
+ .num_ddc = 4,
};
static const struct dc_plane_cap plane_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 51a3dfe97f0e..31aa6ee5cd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg)
switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
case 6:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
+ /* Fall through */
case 5:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
+ /* Fall through */
case 4:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
+ /* Fall through */
case 3:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
+ /* Fall through */
case 2:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
+ /* Fall through */
case 1:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index ece6e136437b..6e2dbd03f9bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
break;
default:
ASSERT(false);
+ block_size = page_table_block_size;
break;
}
@@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
- phys_config.depth = 1;
- phys_config.block_size = 4096;
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
-
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 0b84a322b8a2..d810c8940129 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1153,8 +1153,8 @@ void dcn20_enable_plane(
apt.sys_default.quad_part = 0;
- apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr;
- apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr;
+ apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
+ apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
@@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global(
CRTC_STATE_VACTIVE);
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
pipe->stream_res.tg);
}
@@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock(
if (pipe->plane_state != NULL)
flip_immediate = pipe->plane_state->flip_immediate;
+ if (flip_immediate && lock) {
+ while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) {
+ udelay(1);
+ }
+
+ if (pipe->bottom_pipe != NULL)
+ while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) {
+ udelay(1);
+ }
+ }
+
/* In flip immediate and pipe splitting case, we need to use GSL
* for synchronization. Only do setup on locking and on flip type change.
*/
@@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio) {
dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
}
-
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ else if (pipe_ctx->stream_res.dsc)
+ dp_set_dsc_enable(pipe_ctx, false);
+#endif
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 26a66ccf6e72..1ae973962d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
optc1->min_v_sync_width = 1;
optc1->comb_opp_id = 0xf;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d200bc3cec71..b949e202d6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
+ min_dcfclk = 507;
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
index 27679ef6ebe8..96c263223315 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dcn20_vmid.h"
#include "reg_helper.h"
@@ -36,6 +38,38 @@
#define FN(reg_name, field_name) \
vmid->shifts->field_name, vmid->masks->field_name
+static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
+{
+ /* According the hardware spec, we need to poll for the lowest
+ * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
+ * context is updated. We can't use REG_WAIT here since we
+ * don't have a seperate field to wait on.
+ *
+ * TODO: Confirm timeout / poll interval with hardware team
+ */
+
+ int max_times = 10000;
+ int delay_us = 5;
+ int i;
+
+ for (i = 0; i < max_times; ++i) {
+ uint32_t entry_lo32;
+
+ REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
+ &entry_lo32);
+
+ if (entry_lo32 & 0x1)
+ return;
+
+ udelay(delay_us);
+ }
+
+ /* VM setup timed out */
+ DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
+ ASSERT(0);
+}
+
void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
{
REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
@@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_
REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
+ /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
+
+ dcn20_wait_for_vmid_ready(vmid);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
index 67089765780b..340ef4d41ebd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
@@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+ /* As per DSC spec v1.2a recommendation: */
+ if (vdsc_cfg->native_420)
+ vdsc_cfg->second_line_offset_adj = 512;
+ else
+ vdsc_cfg->second_line_offset_adj = 0;
+
return 0;
}
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c89393c19232..a148ffde8b12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,7 @@ struct resource_pool {
struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
unsigned int clk_src_count;
- struct audio *audios[MAX_PIPES];
+ struct audio *audios[MAX_AUDIOS];
unsigned int audio_count;
struct audio_support audio_support;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 959f5b654611..9502478c4a1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth {
};
enum dcn_hubbub_page_table_block_size {
- DCN_PAGE_TABLE_BLOCK_SIZE_4KB,
- DCN_PAGE_TABLE_BLOCK_SIZE_64KB
+ DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0,
+ DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4,
};
struct dcn_hubbub_phys_addr_config {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 8759ec03aede..f82365e2d03c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
* Data types shared between different Virtual HW blocks
******************************************************************************/
+#define MAX_AUDIOS 7
#define MAX_PIPES 6
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB_PIPES 1
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 1c66166d0a94..2c90d1b46c8b 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -43,7 +43,7 @@ enum dpcd_revision {
enum dpcd_downstream_port_type {
DOWNSTREAM_DP = 0,
DOWNSTREAM_VGA,
- DOWNSTREAM_DVI_HDMI,
+ DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */
DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f661bf96ed0..5b1ebb7f995a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ AMDGPU_PP_SENSOR_VCN_POWER_STATE,
};
enum amd_pp_task {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f1565c448de5..8a3eadeebdcb 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
+ uint32_t clock_limit;
if (!min && !max)
return -EINVAL;
- if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
return 0;
+ }
mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
@@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
/* not support power state */
memset(state_info, 0, sizeof(struct pp_states_info));
- state_info->nums = 0;
+ state_info->nums = 1;
+ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
return 0;
}
@@ -289,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu,
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
int ret = 0;
switch (sensor) {
@@ -312,6 +340,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -698,6 +730,12 @@ static int smu_sw_init(void *handle)
return ret;
}
+ ret = smu_register_irq_handler(smu);
+ if (ret) {
+ pr_err("Failed to register smc irq handler!\n");
+ return ret;
+ }
+
return 0;
}
@@ -707,6 +745,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ kfree(smu->irq_source);
+ smu->irq_source = NULL;
+
ret = smu_smc_table_sw_fini(smu);
if (ret) {
pr_err("Failed to sw fini smc table!\n");
@@ -1063,10 +1104,6 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- ret = smu_register_irq_handler(smu);
- if (ret)
- goto failed;
-
if (!smu->pm_enabled)
adev->pm.dpm_enabled = false;
else
@@ -1096,9 +1133,6 @@ static int smu_hw_fini(void *handle)
kfree(table_context->overdrive_table);
table_context->overdrive_table = NULL;
- kfree(smu->irq_source);
- smu->irq_source = NULL;
-
ret = smu_fini_fb_allocations(smu);
if (ret)
return ret;
@@ -1349,13 +1383,49 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
+static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
{
int ret = 0;
int index = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1386,39 +1456,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != level) {
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
-
- case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
- break;
-
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
- break;
-
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
+ ret = smu_asic_set_performance_level(smu, level);
+ if (ret) {
+ ret = smu_default_set_performance_level(smu, level);
}
-
if (!ret)
smu_dpm_ctx->dpm_level = level;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index e32ae9d3373c..18e780f566fa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
uint32_t sclk, mclk;
int ret = 0;
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0);
+ smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
+ smu10_data->vcn_power_gated = false;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..90c4e87ac5ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- *query = metrics_table.CurrSocketPower << 8;
+ /* For the 40.46 release, they changed the value name */
+ if (hwmgr->smu_version == 0x282e00)
+ *query = metrics_table.AverageSocketPower << 8;
+ else
+ *query = metrics_table.CurrSocketPower << 8;
return ret;
}
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ uint32_t soft_min_level, soft_max_level;
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
+ /* gfxclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
+
+ /* uclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ /* socclk soft min/max settings */
+ soft_min_level =
+ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
+ soft_max_level =
+ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
+
+ data->dpm_table.soc_table.dpm_state.soft_min_level =
+ data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.soc_table.dpm_state.soft_max_level =
+ data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
+ FEATURE_DPM_UCLK_MASK |
+ FEATURE_DPM_SOCCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 1af992fb0bde..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -429,7 +429,6 @@ struct smu_table_context
struct smu_table *tables;
uint32_t table_count;
struct smu_table memory_pool;
- uint16_t software_shutdown_temp;
uint8_t thermal_controller_type;
uint16_t TDPODLimit;
@@ -452,6 +451,7 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
+ bool vcn_gated;
};
struct smu_power_context {
@@ -613,6 +613,7 @@ struct pptable_funcs {
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
int (*set_thermal_fan_table)(struct smu_context *smu);
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*get_current_clk_freq_by_table)(struct smu_context *smu,
@@ -621,6 +622,7 @@ struct pptable_funcs {
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
+ int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
};
struct smu_funcs
@@ -685,7 +687,6 @@ struct smu_funcs
int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
- int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
@@ -751,8 +752,6 @@ struct smu_funcs
((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_get_current_rpm(smu, speed) \
- ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
#define smu_set_fan_speed_rpm(smu, speed) \
((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
#define smu_send_smc_msg(smu, msg) \
@@ -841,6 +840,8 @@ struct smu_funcs
((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
#define smu_set_fan_speed_percent(smu, speed) \
((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
+#define smu_get_fan_speed_rpm(smu, speed) \
+ ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
#define smu_msg_get_index(smu, msg) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
@@ -906,8 +907,6 @@ struct smu_funcs
((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
#define smu_set_azalia_d3_pme(smu) \
((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
@@ -918,6 +917,9 @@ struct smu_funcs
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
#define smu_baco_reset(smu) \
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 2dae0ae0829e..b81c7e715dc9 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include <linux/firmware.h>
+#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
@@ -501,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -515,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ return -ENOMEM;
+ smu_table->metrics_time = 0;
+
return 0;
}
+static int navi10_get_metrics_table(struct smu_context *smu,
+ SmuMetrics_t *metrics_table)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ int ret = 0;
+
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)smu_table->metrics_table, false);
+ if (ret) {
+ pr_info("Failed to export SMU metrics table!\n");
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+ return ret;
+}
+
static int navi10_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -576,44 +605,38 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
{
- int ret = 0;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
- if (enable && power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
if (ret)
return ret;
}
- power_gate->uvd_gated = false;
+ power_gate->vcn_gated = false;
} else {
- if (!enable && !power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
- if (ret)
- return ret;
- }
- power_gate->uvd_gated = true;
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
}
+ power_gate->vcn_gated = true;
}
- return 0;
+ return ret;
}
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
- static SmuMetrics_t metrics;
int ret = 0, clk_id = 0;
+ SmuMetrics_t metrics;
- if (!value)
- return -EINVAL;
-
- memset(&metrics, 0, sizeof(metrics));
-
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
+ ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -626,11 +649,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
return ret;
}
+static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ DpmDescriptor_t *dpm_desc = NULL;
+ uint32_t clk_index = 0;
+
+ clk_index = smu_clk_get_index(smu, clk_type);
+ dpm_desc = &pptable->DpmDescriptor[clk_index];
+
+ /* 0 - Fine grained DPM, 1 - Discrete DPM */
+ return dpm_desc->SnapToDiscrete == 0 ? true : false;
+}
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t freq_values[3] = {0};
+ uint32_t mark_index = 0;
switch (clk_type) {
case SMU_GFXCLK:
@@ -643,22 +681,42 @@ static int navi10_print_clk_levels(struct smu_context *smu,
ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
return size;
+
/* 10KHz -> MHz */
cur_value = cur_value / 100;
- size += sprintf(buf, "current clk: %uMhz\n", cur_value);
-
ret = smu_get_dpm_level_count(smu, clk_type, &count);
if (ret)
return size;
- for (i = 0; i < count; i++) {
- ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ for (i = 0; i < count; i++) {
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (ret)
+ return size;
+
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+ } else {
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
return size;
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
+ if (ret)
+ return size;
+
+ freq_values[1] = cur_value;
+ mark_index = cur_value == freq_values[0] ? 0 :
+ cur_value == freq_values[2] ? 2 : 1;
+ if (mark_index != 1)
+ freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
+
+ for (i = 0; i < 3; i++) {
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+ i == mark_index ? "*" : "");
+ }
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
- cur_value == value ? "*" : "");
}
break;
default:
@@ -866,8 +924,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics,
- false);
+ ret = navi10_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
if (ret)
return ret;
@@ -886,10 +945,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
if (!value)
return -EINVAL;
- msleep(1);
-
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
- (void *)&metrics, false);
+ ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -919,22 +975,22 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
+static int navi10_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
{
SmuMetrics_t metrics;
int ret = 0;
- if (!value)
+ if (!speed)
return -EINVAL;
- memset(&metrics, 0, sizeof(metrics));
-
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
- (void *)&metrics, false);
+ ret = navi10_get_metrics_table(smu, &metrics);
+ if (ret)
+ return ret;
if (ret)
return ret;
- *value = metrics.CurrFanSpeed;
+ *speed = metrics.CurrFanSpeed;
return ret;
}
@@ -944,10 +1000,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu,
{
int ret = 0;
uint32_t percent = 0;
- uint16_t current_rpm;
+ uint32_t current_rpm;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = navi10_get_fan_speed(smu, &current_rpm);
+ ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
if (ret)
return ret;
@@ -1278,7 +1334,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
+ ret = navi10_get_metrics_table(smu, &metrics);
if (ret)
return ret;
@@ -1530,6 +1586,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu,
return 0;
}
+static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+ uint32_t uclk_level = 0;
+
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
+
+ ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
+ if (ret)
+ return ret;
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = navi10_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int navi10_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
+
+ if (!range || !powerplay_table)
+ return -EINVAL;
+
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->software_shutdown_temp;
+
+ return 0;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1557,6 +1683,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.unforce_dpm_levels = navi10_unforce_dpm_levels,
.is_dpm_running = navi10_is_dpm_running,
.get_fan_speed_percent = navi10_get_fan_speed_percent,
+ .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
.get_profiling_clk_mask = navi10_get_profiling_clk_mask,
@@ -1565,6 +1692,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
.get_ppfeature_status = navi10_get_ppfeature_status,
.set_ppfeature_status = navi10_set_ppfeature_status,
+ .set_performance_level = navi10_set_performance_level,
+ .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 957288e22f47..620ff17c2fef 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -23,6 +23,10 @@
#ifndef __NAVI10_PPT_H__
#define __NAVI10_PPT_H__
+#define NAVI10_PEAK_SCLK_XTX (1830)
+#define NAVI10_PEAK_SCLK_XT (1755)
+#define NAVI10_PEAK_SCLK_XL (1625)
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 95c7c4dae523..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
int ret, index;
- uint32_t size;
+ uint32_t size = 0;
+ uint16_t atom_table_size;
uint8_t frev, crev;
void *table;
uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
- ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
+ ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
(uint8_t **)&table);
if (ret)
return ret;
+ size = atom_table_size;
}
if (!smu->smu_table.power_play_table)
@@ -1124,10 +1126,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
- int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
if (!range)
@@ -1138,6 +1138,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
if (high > range->max)
high = range->max;
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+
if (low > high)
return -EINVAL;
@@ -1146,8 +1149,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1189,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
if (!smu->pm_enabled)
return ret;
+
ret = smu_get_thermal_temperature_range(smu, &range);
+ if (ret)
+ return ret;
if (smu->smu_table.thermal_controller_type) {
ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1208,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
- adev->pm.dpm.thermal.min_temp = range.min;
- adev->pm.dpm.thermal.max_temp = range.max;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
return ret;
}
@@ -1371,23 +1379,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v11_0_get_current_rpm(struct smu_context *smu,
- uint32_t *current_rpm)
-{
- int ret;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
-
- if (ret) {
- pr_err("Attempt to get current RPM from SMC Failed!\n");
- return ret;
- }
-
- smu_read_smc_arg(smu, current_rpm);
-
- return 0;
-}
-
static uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
@@ -1402,7 +1393,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
{
int ret = 0;
- if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
+ if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
return 0;
ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
@@ -1773,7 +1764,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_current_rpm = smu_v11_0_get_current_rpm,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bb9bb09cfc7a..6a14497257e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
sizeof(PPTable_t));
- table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
@@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
return ret;
}
+static int vega20_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+
+ if (ret) {
+ pr_err("Attempt to get current RPM from SMC Failed!\n");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, speed);
+
+ return 0;
+}
+
static int vega20_get_fan_speed_percent(struct smu_context *smu,
uint32_t *speed)
{
@@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
uint32_t current_rpm = 0, percent = 0;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = smu_get_current_rpm(smu, &current_rpm);
+ ret = vega20_get_fan_speed_rpm(smu, &current_rpm);
if (ret)
return ret;
@@ -3034,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
{
+ uint32_t smu_version;
int ret = 0;
SmuMetrics_t metrics;
@@ -3044,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (ret)
return ret;
- *value = metrics.CurrSocketPower << 8;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /* For the 40.46 release, they changed the value name */
+ if (smu_version == 0x282e00)
+ *value = metrics.AverageSocketPower << 8;
+ else
+ *value = metrics.CurrSocketPower << 8;
return 0;
}
@@ -3217,35 +3242,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static const struct smu_temperature_range vega20_thermal_policy[] =
-{
- {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
- { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
static int vega20_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
-
+ struct smu_table_context *table_context = &smu->smu_table;
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- if (!range)
+ if (!range || !powerplay_table)
return -EINVAL;
- memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
-
- range->max = pptable->TedgeLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_crit_max = pptable->ThotspotLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_crit_max = pptable->ThbmLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->usSoftwareShutdownTemp;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
+ range->hotspot_crit_max = pptable->ThotspotLimit;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
+ range->mem_crit_max = pptable->ThbmLimit;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
return 0;
@@ -3293,6 +3307,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.is_dpm_running = vega20_is_dpm_running,
.set_thermal_fan_table = vega20_set_thermal_fan_table,
.get_fan_speed_percent = vega20_get_fan_speed_percent,
+ .get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
.get_thermal_temperature_range = vega20_get_thermal_temperature_range
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..9d4d5075cc64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
#include <linux/iommu.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
@@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
pipe->of_output_port =
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
- pipe->of_node = np;
+ pipe->of_node = of_node_get(np);
return 0;
}
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return mdev->irq;
}
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+ ret = 0;
+
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
mdev->n_pipelines = 0;
+ of_reserved_mem_device_release(dev);
+
if (funcs && funcs->cleanup)
funcs->cleanup(mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
return NULL;
}
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
+{
+ u32 bpp;
+
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ default:
+ bpp = info->cpp[0] * 8;
+ break;
+ }
+
+ return bpp;
+}
+
/* Two assumptions
* 1. RGB always has YTR
* 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);
+u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
+ u64 modifier);
+
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
u32 layer_type, u32 *n_fmts);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
- u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks;
+ u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
u64 min_size;
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
alignment_header);
+ bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
kfb->afbc_size = kfb->offset_payload + n_blocks *
- ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS,
+ ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..69d9e26c60c8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct komeda_plane_state *kplane_st;
struct drm_plane_state *plane_st;
- struct drm_framebuffer *fb;
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
plane_st = &kplane_st->base;
- fb = plane_st->fb;
plane = plane_st->plane;
plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ struct drm_crtc_state *new_crtc_st;
int i, err;
err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
* so need to add all affected_planes (even unchanged) to
* drm_atomic_state.
*/
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
err = drm_atomic_add_affected_planes(state, crtc);
if (err)
return err;
@@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
- goto cleanup_mode_config;
+ goto free_component_binding;
err = mdev->funcs->enable_irq(mdev);
if (err)
- goto cleanup_mode_config;
+ goto free_component_binding;
drm->irq_enabled = true;
+ drm_kms_helper_poll_init(drm);
+
err = drm_dev_register(drm, 0);
if (err)
- goto cleanup_mode_config;
+ goto free_interrupts;
return kms;
-cleanup_mode_config:
+free_interrupts:
+ drm_kms_helper_poll_fini(drm);
drm->irq_enabled = false;
+ mdev->funcs->disable_irq(mdev);
+free_component_binding:
+ component_unbind_all(mdev->dev, drm);
+cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
+ drm->dev_private = NULL;
+ drm_dev_put(drm);
free_kms:
kfree(kms);
return ERR_PTR(err);
@@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm->irq_enabled = false;
mdev->funcs->disable_irq(mdev);
- drm_dev_unregister(drm);
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..14b683164544 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct seq_file *sf);
/* component APIs */
+extern __printf(10, 11)
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..2851cac94d86 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
if (!kcrtc->master->wb_layer)
return 0;
- kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL);
+ kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
if (!kwb_conn)
return -ENOMEM;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
/* Enable extended register access */
- ast_enable_mmio(dev);
ast_open_key(ast);
+ ast_enable_mmio(dev);
/* Find out whether P2A works or whether to use device-tree */
ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
ast_open_key(ast);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index bc19dbd531ef..359030d5d818 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
+ bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ee777469293a..e4e22bbae2a7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
help
Toshiba TC358764 DSI/LVDS bridge driver.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 410572f14257..e1dafb0cc5e2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
- void *vaddr;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
buffer->gem = obj;
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * drm_client_buffer_vmap - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap() should be followed by a call to
+ * drm_client_buffer_vunmap(); or the client buffer should be mapped
+ * throughout its lifetime.
+ *
+ * Returns:
+ * The mapped memory's address
+ */
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vaddr)
+ return buffer->vaddr;
+
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = drm_gem_vmap(obj);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_delete;
- }
+ vaddr = drm_gem_vmap(buffer->gem);
+ if (IS_ERR(vaddr))
+ return vaddr;
buffer->vaddr = vaddr;
- return buffer;
-
-err_delete:
- drm_client_buffer_delete(buffer);
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap);
- return ERR_PTR(ret);
+/**
+ * drm_client_buffer_vunmap - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping. Calling this
+ * function is only required by clients that manage their buffer mappings
+ * by themselves.
+ */
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
+{
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
+ buffer->vaddr = NULL;
}
+EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 56d36779d213..c8922b7cac09 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
* simple XOR between the two handle the addition nicely.
*/
cmdline = &connector->cmdline_mode;
- if (cmdline->specified) {
+ if (cmdline->specified && cmdline->rotation_reflection) {
unsigned int cmdline_rest, panel_rest;
unsigned int cmdline_rot, panel_rot;
unsigned int sum_rot, sum_rest;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1984e5c54d58..a7ba5b4902d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
+ void *vaddr;
spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip;
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+
/* Generic fbdev uses a shadow buffer */
- if (helper->buffer)
+ if (helper->buffer) {
+ vaddr = drm_client_buffer_vmap(helper->buffer);
+ if (IS_ERR(vaddr))
+ return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
+ if (helper->fb->funcs->dirty)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
+ &clip_copy, 1);
+
+ if (helper->buffer)
+ drm_client_buffer_vunmap(helper->buffer);
}
}
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ return dev->mode_config.prefer_shadow_fbdev ||
+ dev->mode_config.prefer_shadow ||
+ fb->funcs->dirty;
+}
+
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
{
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
struct drm_clip_rect *clip = &helper->dirty_clip;
unsigned long flags;
- if (!helper->fb->funcs->dirty)
+ if (!drm_fbdev_use_shadow_fb(helper))
return;
spin_lock_irqsave(&helper->dirty_lock, flags);
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
+ void *vaddr;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
- fbi->screen_buffer = buffer->vaddr;
- /* Shamelessly leak the physical address to user-space */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
+
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
- if (fb->funcs->dirty) {
+ if (drm_fbdev_use_shadow_fb(fb_helper)) {
struct fb_ops *fbops;
void *shadow;
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
+ } else {
+ /* buffer is mapped for HW framebuffer */
+ vaddr = drm_client_buffer_vmap(fb_helper->buffer);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ fbi->screen_buffer = vaddr;
+ /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+ fbi->fix.smem_start =
+ page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
}
return 0;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 0b72468e8131..57564318ceea 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
- struct drm_connector *conn;
+ struct drm_connector *conn __maybe_unused;
struct drm_connector_state *conn_state;
int i, ret;
unsigned plane_mask;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index ad19df0686c9..bd2498bbd74a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -93,11 +93,6 @@ static struct bus_type mipi_dsi_bus_type = {
.pm = &mipi_dsi_device_pm_ops,
};
-static int of_device_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/**
* of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
* device tree node
@@ -110,7 +105,7 @@ struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
{
struct device *dev;
- dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
+ dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np);
return dev ? to_mipi_dsi_device(dev) : NULL;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 74a5739df506..c814bcef18a4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
}
static int drm_mode_parse_cmdline_extra(const char *str, int length,
+ bool freestanding,
const struct drm_connector *connector,
struct drm_cmdline_mode *mode)
{
@@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
for (i = 0; i < length; i++) {
switch (str[i]) {
case 'i':
+ if (freestanding)
+ return -EINVAL;
+
mode->interlace = true;
break;
case 'm':
+ if (freestanding)
+ return -EINVAL;
+
mode->margins = true;
break;
case 'D':
@@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
if (extras) {
int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1,
+ false,
connector,
mode);
if (ret)
@@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
return 0;
}
+static const char * const drm_named_modes_whitelist[] = {
+ "NTSC",
+ "PAL",
+};
+
+static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
+ if (!strncmp(mode, drm_named_modes_whitelist[i], size))
+ return true;
+
+ return false;
+}
+
/**
* drm_mode_parse_command_line_for_connector - parse command line modeline for connector
* @mode_option: optional per connector mode option
@@ -1686,7 +1710,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
*
* Additionals options can be provided following the mode, using a comma to
* separate each option. Valid options can be found in
- * Documentation/fb/modedb.txt.
+ * Documentation/fb/modedb.rst.
*
* The intermediate drm_cmdline_mode structure is required to store additional
* options from the command line modline like the force-enable/disable flag.
@@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
* bunch of things:
* - We need to make sure that the first character (which
* would be our resolution in X) is a digit.
- * - However, if the X resolution is missing, then we end up
- * with something like x<yres>, with our first character
- * being an alpha-numerical character, which would be
- * considered a named mode.
+ * - If not, then it's either a named mode or a force on/off.
+ * To distinguish between the two, we need to run the
+ * extra parsing function, and if not, then we consider it
+ * a named mode.
*
* If this isn't enough, we should add more heuristics here,
* and matching unit-tests.
*/
- if (!isdigit(name[0]) && name[0] != 'x')
+ if (!isdigit(name[0]) && name[0] != 'x') {
+ unsigned int namelen = strlen(name);
+
+ /*
+ * Only the force on/off options can be in that case,
+ * and they all take a single character.
+ */
+ if (namelen == 1) {
+ ret = drm_mode_parse_cmdline_extra(name, namelen, true,
+ connector, mode);
+ if (!ret)
+ return true;
+ }
+
named_mode = true;
+ }
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strchr(name, '-');
@@ -1770,7 +1808,13 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
if (named_mode) {
- strncpy(mode->name, name, mode_end);
+ if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
+ return false;
+
+ if (!drm_named_mode_is_in_whitelist(name, mode_end))
+ return false;
+
+ strscpy(mode->name, name, mode_end + 1);
} else {
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
parse_extras,
@@ -1809,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
extra_ptr != options_ptr) {
int len = strlen(name) - (extra_ptr - name);
- ret = drm_mode_parse_cmdline_extra(extra_ptr, len,
+ ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
connector, mode);
if (ret)
return false;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 60ce4a8ad9e1..6f7d3b3b3628 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 58baf49d9926..badab94be2d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -242,9 +242,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
continue;
- while ((d = bus_find_device(&platform_bus_type, p,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, &info->driver->driver))) {
put_device(p);
if (!(info->flags & DRM_FIMC_DEVICE) ||
@@ -412,9 +410,8 @@ static void exynos_drm_unregister_devices(void)
if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
continue;
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
+ while ((dev = platform_find_device_by_driver(NULL,
+ &info->driver->driver))) {
put_device(dev);
platform_device_unregister(to_platform_device(dev));
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a594ab7be2c0..164d914cbe9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
module_param_named(fimc_devs, fimc_mask, uint, 0644);
MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
-#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_fimc_context(dev) dev_get_drvdata(dev)
enum {
FIMC_CLK_LCLK,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 50904eee96f7..2a3382d43bc9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
- struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1e4b21c49a06..1c524db9570f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -58,7 +58,7 @@
#define GSC_COEF_DEPTH 3
#define GSC_AUTOSUSPEND_DELAY 2000
-#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_gsc_context(dev) dev_get_drvdata(dev)
#define gsc_read(offset) readl(ctx->regs + (offset))
#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 9af096479e1c..b24ba948b725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
- } while (retry > 1 &&
+ } while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91355c2ea8a5..8cace65f50ce 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra
subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
-subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c4710889cb32..3ef4e9f573cf 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 226) {
- u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+ u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
switch (wakeup_time) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 753ac3165061..7b908e10d32e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+ bi->num_qgv_points = qi.num_points;
+
for (j = 0; j < qi.num_points; j++) {
const struct intel_qgv_point *sp = &qi.points[j];
int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+ DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
- /* Did we initialize the bw limits successfully? */
- if (dev_priv->max_bw[0].num_planes == 0)
- return UINT_MAX;
-
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
if (num_planes >= bi->num_planes)
return bi->deratedbw[qgv_point];
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8993ab283562..0d19bbd08122 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(2 * 96000, min_cdclk);
/*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+ min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
+ /*
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
- dotclock = pipe_config->port_clock * 2 / 3;
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
else
dotclock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8592a7d422de..592b92782fab 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
- trace_intel_pipe_enable(dev_priv, pipe);
+ trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- trace_intel_pipe_disable(dev_priv, pipe);
+ trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
- /* else: fall through */
+ /* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c93ad512014c..2d1939db108f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ int pw_idx = power_well->desc->hsw.idx;
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum aux_ch aux_ch;
u32 val;
+ aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
+ if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 4336df46fe78..d0fc34826771 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
switch (lane_info) {
default:
MISSING_CASE(lane_info);
+ /* fall through */
case 1:
case 2:
case 4:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..8aa6a31e8ad0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_bpp = intel_dp_min_bpp(pipe_config);
- limits.max_bpp = pipe_config->pipe_bpp;
+ /*
+ * FIXME: If all the streams can't fit into the link with
+ * their current pipe_bpp we should reduce pipe_bpp across
+ * the board until things start to fit. Until then we
+ * limit to <= 8bpc since that's what was hardcoded for all
+ * MST streams previously. This hack should be removed once
+ * we have the proper retry logic in place.
+ */
+ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
@@ -539,7 +547,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index bc3a94d491c4..27bd7276a82d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -536,7 +536,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
- return -EPERM;
+ ret = -EPERM;
+ goto err;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 2f4894e9a03d..5ddbe71ab423 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -478,13 +478,13 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
-
- /* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..f413904a3e96 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
DRM_INFO("PPS2 = 0x%08x\n", pps_val);
- if (encoder->type == INTEL_OUTPUT_EDP) {
+ if (cpu_transcoder == TRANSCODER_EDP) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 99cc3e2e9c2c..f016a776a39e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fae0e50aad0..41dab9ea33cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1628,6 +1628,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
static int eb_copy_relocations(const struct i915_execbuffer *eb)
{
+ struct drm_i915_gem_relocation_entry *relocs;
const unsigned int count = eb->buffer_count;
unsigned int i;
int err;
@@ -1635,7 +1636,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
for (i = 0; i < count; i++) {
const unsigned int nreloc = eb->exec[i].relocation_count;
struct drm_i915_gem_relocation_entry __user *urelocs;
- struct drm_i915_gem_relocation_entry *relocs;
unsigned long size;
unsigned long copied;
@@ -1663,14 +1663,8 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
if (__copy_from_user((char *)relocs + copied,
(char __user *)urelocs + copied,
- len)) {
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
- goto err;
- }
+ len))
+ goto end;
copied += len;
} while (copied < size);
@@ -1699,10 +1693,14 @@ end:
return 0;
+end_user:
+ user_access_end();
+end:
+ kvfree(relocs);
+ err = -EFAULT;
err:
while (i--) {
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
+ relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
if (eb->exec[i].relocation_count)
kvfree(relocs);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 391621ee3cbb..39a661927d8e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -341,7 +341,7 @@ err:
*/
if (!i915_terminally_wedged(i915))
return VM_FAULT_SIGBUS;
- /* else: fall through */
+ /* else, fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..65eb430cedba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough to use PAGE_KERNEL anyway */
+ /* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 05011d4a3b88..914b5d4112bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(i915);
-
- if (i915_gem_init_hw(i915))
+ if (intel_gt_resume(i915))
goto err_wedged;
intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2c454f227c2e..23120901c55f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
if (ce->state)
__context_unpin_state(ce->state);
+ intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
intel_context_get(ce);
+ err = intel_ring_pin(ce->ring);
+ if (err)
+ goto err_put;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, flags);
- if (err) {
- i915_active_cancel(&ce->active);
- intel_context_put(ce);
- return err;
- }
+ if (err)
+ goto err_ring;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
+ if (err)
+ goto err_state;
}
return 0;
+
+err_state:
+ __context_unpin_state(ce->state);
+err_ring:
+ intel_ring_unpin(ce->ring);
+err_put:
+ intel_context_put(ce);
+ i915_active_cancel(&ce->active);
+ return err;
}
void intel_context_active_release(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7fd33e81c2d9..f25632c9b292 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ unsigned int slice = fls(sseu->slice_mask) - 1;
+ unsigned int subslice;
u32 mcr_s_ss_select;
- u32 slice = fls(sseu->slice_mask);
- u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = fls(sseu->subslice_mask[slice]);
+ GEM_BUG_ON(!subslice);
+ subslice--;
if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
+ unsigned long flags;
if (header) {
va_list ap;
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_engine_count(error, engine),
i915_reset_count(error));
- rcu_read_lock();
-
drm_printf(m, "\tRequests:\n");
+ spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
}
-
- rcu_read_unlock();
+ spin_unlock_irqrestore(&engine->active.lock, flags);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
@@ -1672,7 +1676,6 @@ struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
- unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->active.lock, flags);
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
active = request;
break;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3dc42a..ae5b6baf6dff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref);
}
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
- intel_engine_pm_get(engine);
- engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
- intel_engine_pm_put(engine);
- if (err) {
- dev_err(i915->drm.dev,
- "Failed to restart %s (%d)\n",
- engine->name, err);
- break;
- }
- }
- intel_gt_pm_put(i915);
-
- return err;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd993d60..a11c893f64c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,16 +7,22 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
struct drm_i915_private;
-struct intel_engine_cs;
void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+ return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine);
-int intel_engines_resume(struct drm_i915_private *i915);
-
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 868b220214f8..43e975a26016 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -70,6 +70,18 @@ struct intel_ring {
struct list_head request_list;
struct list_head active_link;
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
u32 head;
u32 tail;
u32 emit;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7b5967751762..9f8f7f54191f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
intel_engine_reset(engine, false);
}
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err = 0;
/*
* After resume, we may need to poke into the pinned kernel
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
+ intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
struct intel_context *ce;
+ intel_engine_pm_get(engine);
+
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
+
+ engine->serial++; /* kernel context lost */
+ err = engine->resume(engine);
+
+ intel_engine_pm_put(engine);
+ if (err) {
+ dev_err(i915->drm.dev,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ break;
+ }
}
+ intel_gt_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 7dd1130a19a4..53f342b20181 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+int intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b42b5f158295..82b7ace62d97 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+ GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
{
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_unpin(ce->ring);
}
static void
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = intel_ring_pin(ce->ring);
- if (ret)
- goto unpin_map;
-
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
- goto unpin_ring;
+ goto unpin_map;
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
return 0;
-unpin_ring:
- intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4c478b38e420..3f907701ef4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
}
}
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ if (intel_engine_pm_get_if_awake(engine))
+ awake |= engine->mask;
reset_prepare_engine(engine);
+ }
intel_uc_reset_prepare(i915);
+
+ return awake;
}
static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+ intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
+ if (awake & engine->mask)
+ intel_engine_pm_put(engine);
}
- intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- reset_prepare(i915);
+ awake = reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- reset_finish(i915);
+ reset_finish(i915, awake);
GEM_TRACE("end\n");
}
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask);
}
+static int resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ for_each_engine(engine, i915, id) {
+ ret = engine->resume(engine);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
+ intel_engine_mask_t awake;
int ret;
GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
- reset_prepare(i915);
+ awake = reset_prepare(i915);
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
- goto error;
+ goto taint;
}
+ ret = resume(i915);
+ if (ret)
+ goto taint;
+
i915_queue_hangcheck(i915);
finish:
- reset_finish(i915);
+ reset_finish(i915, awake);
unlock:
mutex_unlock(&error->wedge_mutex);
return;
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_get_if_awake(engine))
return 0;
reset_prepare_engine(engine);
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->resume(engine);
- if (ret)
- goto out;
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
+ intel_engine_pm_put(engine);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index c6023bc9452d..12010e798868 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
- GEM_BUG_ON(ring->vaddr);
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
ret = i915_timeline_pin(ring->timeline);
if (ret)
- return ret;
+ goto err_unpin;
flags = PIN_GLOBAL;
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto unpin_timeline;
+ goto err_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
- addr = i915_gem_object_pin_map(vma->obj, map);
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
- goto unpin_ring;
+ goto err_ring;
}
vma->obj->pin_global++;
+ GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+
return 0;
-unpin_ring:
+err_ring:
i915_vma_unpin(vma);
-unpin_timeline:
+err_timeline:
i915_timeline_unpin(ring->timeline);
+err_unpin:
+ atomic_dec(&ring->pin_count);
return ret;
}
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
- GEM_BUG_ON(!ring->vma);
- GEM_BUG_ON(!ring->vaddr);
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
+ GEM_BUG_ON(!ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
ring->vma->obj->pin_global--;
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ intel_engine_cleanup_common(engine);
+
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
- intel_engine_cleanup_common(engine);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 15e90fd2cfdc..99e8242194c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(i915))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
@@ -1098,10 +1093,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
if (engine->class != RENDER_CLASS)
return;
- gen9_whitelist_build(&engine->whitelist);
+ gen9_whitelist_build(w);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1139,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
@@ -1258,8 +1281,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
GEN7_SARCHKMD,
- GEN7_DISABLE_DEMAND_PREFETCH |
- GEN7_DISABLE_SAMPLER_PREFETCH);
+ GEN7_DISABLE_DEMAND_PREFETCH);
+
+ /* Wa_1606682166:icl */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 086801b51441..486c6953dcb1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
ring->base.timeline = &ring->timeline;
+ atomic_set(&ring->base.pin_count, 1);
INIT_LIST_HEAD(&ring->base.request_list);
intel_ring_update_space(&ring->base);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..b5c590c9ccba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
+ intel_engine_mask_t awake;
+
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ awake = reset_prepare(i915);
p->critical_section_begin();
reset_prepare(i915);
err = intel_gpu_reset(i915, ALL_ENGINES);
- reset_finish(i915);
p->critical_section_end();
+ reset_finish(i915, awake);
if (err) {
pr_err("intel_gpu_reset failed under %s\n", p->name);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 9eaf030affd0..44becd9538be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
err = 0;
for (i = 0; i < engine->whitelist.count; i++) {
- if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+ const struct i915_wa *wa = &engine->whitelist.list[i];
+
+ if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ continue;
+
+ if (!fn(engine, a[i], b[i], wa->reg))
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6ea88270c818..b09dc315e2da 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.workload = workload;
s.is_ctx_wa = true;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..8bb292b01271 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..4b04af569c05 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
+ gma = index << I915_GTT_PAGE_SHIFT;
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ gma, 1 << I915_GTT_PAGE_SHIFT)) {
+ gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
+ memset(p_data, 0, bytes);
+ return 0;
+ }
+
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..23aa3e50cbf8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
+ } else if (entry->size != size) {
+ /* the same gfn with different size: unmap and re-map */
+ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
+
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 2144fb46d0e1..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
-static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
- if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -EINVAL;
-
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- return 0;
}
static int
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return -EINVAL;
+ }
+
update_shadow_pdps(workload);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload,
- s->shadow[ring_id]->gem_context);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- goto err_req;
- }
-
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
+ intel_runtime_pm_get(rpm);
+
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1042,6 +1042,7 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ intel_runtime_pm_put_unchecked(rpm);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+ if (!intel_gvt_ggtt_validate_range(vgpu, start,
+ _RING_CTL_BUF_SIZE(ctl))) {
+ gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+ return ERR_PTR(-EINVAL);
+ }
+
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
+
+ if (workload->wa_ctx.indirect_ctx.size != 0) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.indirect_ctx.guest_gma,
+ workload->wa_ctx.indirect_ctx.size)) {
+ gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+ workload->wa_ctx.indirect_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ if (workload->wa_ctx.per_ctx.valid) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.per_ctx.guest_gma,
+ CACHELINE_BYTES)) {
+ gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+ workload->wa_ctx.per_ctx.guest_gma);
+ kmem_cache_free(s->workloads, workload);
+ return ERR_PTR(-EINVAL);
+ }
+ }
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
index a3deed692b9c..fe552e877e09 100644
--- a/drivers/gpu/drm/i915/gvt/trace_points.c
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -28,8 +28,6 @@
*
*/
-#include "trace.h"
-
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..bac1ee94f63f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc909ec5d9c3..fe7a6ec2c199 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1674,8 +1674,9 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- int num_planes;
- int deratedbw[3];
+ unsigned int deratedbw[3]; /* for each QGV point */
+ u8 num_qgv_points;
+ u8 num_planes;
} max_bw[6];
struct drm_private_obj bw_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54fb072..8a659d3d7435 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_engines_resume(dev_priv);
- if (ret)
- goto cleanup_uc;
-
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(dev_priv);
return 0;
-cleanup_uc:
- intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
return ret;
}
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_uc_init;
+ /* Only when the HW is re-initialised, can we replay the requests */
+ ret = intel_gt_resume(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
if (i915_inject_load_failure()) {
ret = -ENODEV;
- goto err_init_hw;
+ goto err_gt;
}
if (i915_inject_load_failure()) {
ret = -EIO;
- goto err_init_hw;
+ goto err_gt;
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_init_hw:
+err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1626,7 @@ err_init_hw:
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+err_init_hw:
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8ab820145ea6..7015a97b1097 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1444,9 +1444,11 @@ unwind_pd:
spin_lock(&pdp->lock);
if (atomic_dec_and_test(&pd->used)) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
- free_pd(vm, pd);
+ GEM_BUG_ON(alloc);
+ alloc = pd; /* defer the free to after the lock */
}
spin_unlock(&pdp->lock);
unwind:
@@ -1515,7 +1517,9 @@ unwind_pdp:
spin_lock(&pml4->lock);
if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pd(vm, pdp);
+ pml4->entry[pml4e] = vm->scratch_pdp;
+ GEM_BUG_ON(alloc);
+ alloc = pdp; /* defer the free until after the lock */
}
spin_unlock(&pml4->lock);
unwind:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b7e9fddef270..8bc76fcff70d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
switch (engine->id) {
default:
MISSING_CASE(engine->id);
+ /* fall through */
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
@@ -1417,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct i915_request *request;
+ unsigned long flags;
ee->engine_id = -1;
@@ -1428,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
+ spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring;
+ struct intel_ring *ring = request->ring;
ee->vm = ctx->vm ?: &ggtt->vm;
@@ -1461,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->rq_post = request->postfix;
ee->rq_tail = request->tail;
- ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
@@ -1469,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
engine_record_requests(engine, request, ee);
}
+ spin_unlock_irqrestore(&engine->active.lock, flags);
ee->hws_page =
i915_error_object_create(i915,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a700c5c3d167..5140017f9a39 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static void delay_after_mux(void)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
-
- /* PRM:
- *
- * OA unit is using “crclk” for its functionality. When trunk
- * level clock gating takes place, OA clock would be gated,
- * unable to count the events from non-render clock domain.
- * Render clock gating must be disabled when OA is enabled to
- * count the events from non-render domain. Unit level clock
- * gating for RCS should also be disabled.
- */
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
-
- /* It apparently takes a fairly long time for a new MUX
+ /*
+ * It apparently takes a fairly long time for a new MUX
* configuration to be be applied after these register writes.
* This delay duration was derived empirically based on the
* render_basic config but hopefully it covers the maximum
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* a delay at this location would mitigate any invalid reports.
*/
usleep_range(15000, 20000);
+}
+
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
+ /*
+ * PRM:
+ *
+ * OA unit is using “crclk” for its functionality. When trunk
+ * level clock gating takes place, OA clock would be gated,
+ * unable to count the events from non-render clock domain.
+ * Render clock gating must be disabled when OA is enabled to
+ * count the events from non-render domain. Unit level clock
+ * gating for RCS should also be disabled.
+ */
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE));
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+ config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return ret;
config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
i915_perf_destroy_locked(stream);
mutex_unlock(&dev_priv->perf.lock);
+ /* Release the reference the perf stream kept on the driver. */
+ drm_dev_put(&dev_priv->drm);
+
return 0;
}
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
if (!(param->flags & I915_PERF_FLAG_DISABLED))
i915_perf_enable_locked(stream);
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&dev_priv->drm);
+
return stream_fd;
err_open:
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
+ if (IS_GEN(dev_priv, 10)) {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ } else {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ }
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f4ce643b3bc3..cce426b23a24 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,24 +21,22 @@
/* watermark/fifo updates */
TRACE_EVENT(intel_pipe_enable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(enum pipe, pipe)
),
-
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
);
TRACE_EVENT(intel_pipe_disable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
),
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
),
TP_fast_assign(
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
TP_fast_assign(
enum pipe pipe = pch_transcoder;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
),
TP_fast_assign(
- enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- __entry->frame[pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline[pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ struct intel_crtc *crtc;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
}
__entry->old = old;
__entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
__entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->level = wm->level;
__entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->sprite0_start = sprite0_start;
__entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = crtc->debug.min_vbl;
__entry->max = crtc->debug.max_vbl;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..724627afdedc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ if (!drm_mm_node_allocated(node))
+ return;
+
DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 502c54428570..8d1aebc3e857 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
static void
dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
{
- struct drm_printer p;
+ if (debug->count) {
+ struct drm_printer p = drm_debug_printer("i915");
- if (!debug->count)
- return;
-
- p = drm_debug_printer("i915");
- __print_intel_runtime_pm_wakeref(&p, debug);
+ __print_intel_runtime_pm_wakeref(&p, debug);
+ }
kfree(debug->owners);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 9cbb2ebf575b..38275310b196 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
}
/**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+ return atomic_inc_not_zero(&wf->count);
+}
+
+/**
* intel_wakeref_put: Release the wakeref
* @i915: the drm_i915_private device
* @wf: the wakeref
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index e9f9e9fb9b17..6381652a8829 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- if (panel) {
+ if (panel)
bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_Unknown);
- }
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 477c0f766663..b609dc030d6c 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
- if (ret == 0)
+ if (ret == -ETIME)
ret = timeout ? -ETIMEDOUT : -EBUSY;
return ret;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index baf63fb6850a..c07abf9e201c 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -477,8 +477,7 @@ static int mcde_probe(struct platform_device *pdev)
struct device_driver *drv = &mcde_component_drivers[i]->driver;
struct device *p = NULL, *d;
- while ((d = bus_find_device(&platform_bus_type, p, drv,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, drv))) {
put_device(p);
component_match_add(dev, &match, mcde_compare_dev, d);
p = d;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
struct mtk_drm_private *private = drm->dev_private;
struct platform_device *pdev;
struct device_node *np;
+ struct device *dma_dev;
int ret;
if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
goto err_component_unbind;
}
- private->dma_dev = &pdev->dev;
+ dma_dev = &pdev->dev;
+ private->dma_dev = dma_dev;
+
+ /*
+ * Configure the DMA segment size to make sure we get contiguous IOVA
+ * when importing PRIME buffers.
+ */
+ if (!dma_dev->dma_parms) {
+ private->dma_parms_allocated = true;
+ dma_dev->dma_parms =
+ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
+ GFP_KERNEL);
+ }
+ if (!dma_dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_component_unbind;
+ }
+
+ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dma_dev, "Failed to set DMA segment size\n");
+ goto err_unset_dma_parms;
+ }
/*
* We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
- goto err_component_unbind;
+ goto err_unset_dma_parms;
drm_kms_helper_poll_init(drm);
drm_mode_config_reset(drm);
return 0;
+err_unset_dma_parms:
+ if (private->dma_parms_allocated)
+ dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
+ struct mtk_drm_private *private = drm->dev_private;
+
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
+ if (private->dma_parms_allocated)
+ private->dma_dev->dma_parms = NULL;
+
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
.compat_ioctl = drm_compat_ioctl,
};
+/*
+ * We need to override this because the device used to import the memory is
+ * not dev->dev, as drm_gem_prime_import() expects.
+ */
+struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct mtk_drm_private *private = dev->dev_private;
+
+ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
+}
+
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_node;
}
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_node;
+ }
private->ddp_comp[comp_id] = comp;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
} commit;
struct drm_atomic_state *suspend_state;
+
+ bool dma_parms_allocated;
};
extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 1671db47aa57..e9c55d1d6c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index be39cf01e51e..dc8ec2c94301 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9acbbc0f3232..048c8be426f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0e2f74163a16..0aa8a12c9952 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
if (ret)
goto fail;
- spin_lock_init(&dpu_enc->enc_spinlock);
-
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
@@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
return &dpu_enc->base;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index ff14555372d0..78d5fa230c16 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp5_crtc->enabled = false;
}
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
}
/* Restore vblank irq handling after power is enabled */
- drm_crtc_vblank_on(crtc);
+ mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
mdp5_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+ drm_crtc_vblank_reset(crtc);
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 4a60f5fca6b0..fec6ef1ae3b9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
return kms;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ab64ab470de7..c356f5ccf253 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
msm_submitqueue_init(dev, ctx);
- ctx->aspace = priv->gpu->aspace;
+ ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
file->driver_priv = ctx;
return 0;
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- drm_of_component_match_add(dev, matchptr, compare_of, np);
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8b78554cfde3..8cf6362e64bf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_map_sg(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sync_for_device(msm_obj);
}
return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8497768f1b41..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
- /* When restoring duplicated states, we need to make sure that the
- * bw remains the same and avoid recalculating it, as the connector's
- * bpc may have changed after the state was duplicated
- */
- if (!state->duplicated)
- asyh->dp.pbn =
- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
- connector->display_info.bpc * 3);
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ /*
+ * When restoring duplicated states, we need to make sure that
+ * the bw remains the same and avoid recalculating it, as the
+ * connector's bpc may have changed after the state was
+ * duplicated
+ */
+ if (!state->duplicated) {
+ const int bpp = connector->display_info.bpc * 3;
+ const int clock = crtc_state->adjusted_mode.clock;
+
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+ }
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..a835cebb6d90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool
+nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+{
+ long ret;
+
+ range->default_flags = 0;
+ range->pfn_flags_mask = -1UL;
+
+ ret = hmm_range_register(range, mirror,
+ range->start, range->end,
+ PAGE_SHIFT);
+ if (ret) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return (int)ret;
+ }
+
+ if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return -EAGAIN;
+ }
+
+ ret = hmm_range_fault(range, true);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -EBUSY;
+ up_read(&range->vma->vm_mm->mmap_sem);
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unlock(&svmm->mutex);
goto again;
}
@@ -666,8 +707,8 @@ again:
NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
+ up_read(&svmm->mm->mmap_sem);
}
- up_read(&svmm->mm->mmap_sem);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
u8 *ptr = msg->buf;
while (remaining) {
- u8 cnt = (remaining > 16) ? 16 : remaining;
- u8 cmd;
+ u8 cnt, retries, cmd;
if (msg->flags & I2C_M_RD)
cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
- if (ret < 0) {
- nvkm_i2c_aux_release(aux);
- return ret;
+ for (retries = 0, cnt = 0;
+ retries < 32 && !cnt;
+ retries++) {
+ cnt = min_t(u8, remaining, 16);
+ ret = aux->func->xfer(aux, true, cmd,
+ msg->addr, ptr, &cnt);
+ if (ret < 0)
+ goto out;
+ }
+ if (!cnt) {
+ AUX_TRACE(aux, "no data after 32 retries");
+ ret = -EIO;
+ goto out;
}
ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
msg++;
}
+ ret = num;
+out:
nvkm_i2c_aux_release(aux);
- return num;
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 84a2f243ed9b..4695f1c8e33f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..14b41de44ebc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
* Author: Archit Taneja <archit@ti.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
{
struct device_node *remote_node;
- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ remote_node = of_graph_get_remote_node(out->dev->of_node,
+ ffs(out->of_ports) - 1, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..6e8145c36e93 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -222,7 +222,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
size_t unmapped_page;
size_t pgsize = get_pgsize(iova, len - unmapped_len);
- unmapped_page = ops->unmap(ops, iova, pgsize);
+ unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
if (!unmapped_page)
break;
@@ -247,20 +247,28 @@ static void mmu_tlb_inv_context_s1(void *cookie)
mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
}
-static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
-{}
-
static void mmu_tlb_sync_context(void *cookie)
{
//struct panfrost_device *pfdev = cookie;
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
}
-static const struct iommu_gather_ops mmu_tlb_ops = {
+static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
+{
+ mmu_tlb_sync_context(cookie);
+}
+
+static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
+{
+ mmu_tlb_sync_context(cookie);
+}
+
+static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
- .tlb_add_flush = mmu_tlb_inv_range_nosync,
- .tlb_sync = mmu_tlb_sync_context,
+ .tlb_flush_walk = mmu_tlb_flush_walk,
+ .tlb_flush_leaf = mmu_tlb_flush_leaf,
};
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..952201c6d821 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
static struct drm_driver qxl_driver;
static struct pci_driver qxl_pci_driver;
+static bool is_vga(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
+}
+
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto disable_pci;
+ if (is_vga(pdev)) {
+ ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
+ if (ret) {
+ DRM_ERROR("can't get legacy vga ioports\n");
+ goto disable_pci;
+ }
+ }
+
ret = qxl_device_init(qdev, &qxl_driver, pdev);
if (ret)
- goto disable_pci;
+ goto put_vga;
ret = qxl_modeset_init(qdev);
if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
+put_vga:
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
+ if (is_vga(pdev))
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
dev->dev_private = NULL;
kfree(qdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
/* Locate the companion LVDS encoder for dual-link operation, if any. */
companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
- if (!companion) {
- dev_err(dev, "Companion LVDS encoder not found\n");
- return -ENXIO;
- }
+ if (!companion)
+ return 0;
/*
* Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 95e5c517a15f..9aae3d8e99ef 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend = rockchip_dp_suspend,
+ .suspend_late = rockchip_dp_suspend,
.resume_early = rockchip_dp_resume,
#endif
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 53d2c5bd61dc..38dc26376961 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -330,8 +330,7 @@ static struct component_match *rockchip_drm_match_add(struct device *dev)
struct device *p = NULL, *d;
do {
- d = bus_find_device(&platform_bus_type, p, &drv->driver,
- (void *)platform_bus_type.match);
+ d = platform_find_device_by_driver(p, &drv->driver);
put_device(p);
p = d;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
- spsc_queue_peek(&entity->job_queue) == NULL)
+ spsc_queue_count(&entity->job_queue) == 0)
return true;
return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
- if (spsc_queue_peek(&entity->job_queue)) {
+ if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index b45824ec7c8f..6d61a0eb5d64 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -9,6 +9,13 @@
#define cmdline_test(test) selftest(test, test)
+cmdline_test(drm_cmdline_test_force_d_only)
+cmdline_test(drm_cmdline_test_force_D_only_dvi)
+cmdline_test(drm_cmdline_test_force_D_only_hdmi)
+cmdline_test(drm_cmdline_test_force_D_only_not_digital)
+cmdline_test(drm_cmdline_test_force_e_only)
+cmdline_test(drm_cmdline_test_margin_only)
+cmdline_test(drm_cmdline_test_interlace_only)
cmdline_test(drm_cmdline_test_res)
cmdline_test(drm_cmdline_test_res_missing_x)
cmdline_test(drm_cmdline_test_res_missing_y)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 14c96edb13df..013de9d27c35 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -17,6 +17,136 @@
static const struct drm_connector no_connector = {};
+static int drm_cmdline_test_force_e_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON);
+
+ return 0;
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_hdmi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static int drm_cmdline_test_force_D_only_dvi(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
+ &connector_dvi,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
+
+ return 0;
+}
+
+static int drm_cmdline_test_force_d_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
+ &no_connector,
+ &mode));
+ FAIL_ON(mode.specified);
+ FAIL_ON(mode.refresh_specified);
+ FAIL_ON(mode.bpp_specified);
+
+ FAIL_ON(mode.rb);
+ FAIL_ON(mode.cvt);
+ FAIL_ON(mode.interlace);
+ FAIL_ON(mode.margins);
+ FAIL_ON(mode.force != DRM_FORCE_OFF);
+
+ return 0;
+}
+
+static int drm_cmdline_test_margin_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("m",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
+static int drm_cmdline_test_interlace_only(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("i",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_res(void *ignored)
{
struct drm_cmdline_mode mode = { };
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
/* R and B components are only 5 bits deep */
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
+ /* Fall through */
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
/* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
ret = sun6i_dsi_dcs_read(dsi, msg);
break;
}
+ /* Else, fall through */
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 274cb955e2e1..bdcaa4c7168c 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output)
"nvidia,hpd-gpio", 0,
GPIOD_IN,
"HDMI hotplug detect");
- if (IS_ERR(output->hpd_gpio))
- return PTR_ERR(output->hpd_gpio);
+ if (IS_ERR(output->hpd_gpio)) {
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT)
+ return PTR_ERR(output->hpd_gpio);
+
+ output->hpd_gpio = NULL;
+ }
if (output->hpd_gpio) {
err = gpiod_to_irq(output->hpd_gpio);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d594f7520b7b..7d78e6deac89 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
+ unsigned long attrs = 0;
dma_addr_t dma = d_page->dma;
d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
- dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
+ if (pool->type & IS_HUGE)
+ attrs = DMA_ATTR_NO_WARN;
+
+ dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
kfree(d_page);
d_page = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index bf11930e40e1..1551c8253bec 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -237,8 +237,7 @@ static void vc4_match_add_drivers(struct device *dev,
struct device_driver *drv = &drivers[i]->driver;
struct device *p = NULL, *d;
- while ((d = bus_find_device(&platform_bus_type, p, drv,
- (void *)platform_bus_type.match))) {
+ while ((d = platform_find_device_by_driver(p, drv))) {
put_device(p);
component_match_add(dev, match, compare_dev, d);
p = d;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..09b526518f5a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
.interruptible = false,
.no_wait_gpu = false
};
+ size_t max_segment;
/* wtf swapping */
if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
if (!bo->pages)
goto out;
- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ max_segment = virtio_max_dma_size(qdev->vdev);
+ max_segment &= PAGE_MASK;
+ if (max_segment > SCATTERLIST_MAX_SEGMENT)
+ max_segment = SCATTERLIST_MAX_SEGMENT;
+ ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
if (ret)
goto out;
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d47c5c0..0c647be81ab0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -46,8 +46,6 @@
#define RETRIES 3
#define VMW_HYPERVISOR_MAGIC 0x564D5868
-#define VMW_HYPERVISOR_PORT 0x5658
-#define VMW_HYPERVISOR_HB_PORT 0x5659
#define VMW_PORT_CMD_MSG 30
#define VMW_PORT_CMD_HB_MSG 0
@@ -93,7 +91,7 @@ static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
(protocol | GUESTMSG_FLAG_COOKIE), si, di,
- VMW_HYPERVISOR_PORT,
+ 0,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -126,7 +124,7 @@ static int vmw_close_channel(struct rpc_channel *channel)
VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
0, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -160,7 +158,8 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
VMW_PORT_HB_OUT(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
msg_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMWARE_HYPERVISOR_HB | (channel->channel_id << 16) |
+ VMWARE_HYPERVISOR_OUT,
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
@@ -181,7 +180,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16),
word, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
}
@@ -213,7 +212,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
VMW_PORT_HB_IN(
(MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
reply_len, si, di,
- VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMWARE_HYPERVISOR_HB | (channel->channel_id << 16),
VMW_HYPERVISOR_MAGIC, bp,
eax, ebx, ecx, edx, si, di);
@@ -230,7 +229,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16),
MESSAGE_STATUS_SUCCESS, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -269,7 +268,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
VMW_PORT(VMW_PORT_CMD_SENDSIZE,
msg_len, si, di,
- VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -327,7 +326,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_PORT(VMW_PORT_CMD_RECVSIZE,
0, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
@@ -353,7 +352,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
!!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -371,13 +370,13 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
MESSAGE_STATUS_SUCCESS, si, di,
- (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ channel->channel_id << 16,
VMW_HYPERVISOR_MAGIC,
eax, ebx, ecx, edx, si, di);
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -389,7 +388,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
break;
}
- if (retries == RETRIES)
+ if (!reply)
return -EINVAL;
*msg_len = reply_len;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 4907e50fb20a..f685c7071dec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -32,6 +32,7 @@
#ifndef _VMWGFX_MSG_H
#define _VMWGFX_MSG_H
+#include <asm/vmware.h>
/**
* Hypervisor-specific bi-directional communication channel. Should never
@@ -44,7 +45,7 @@
* @in_ebx: [IN] Message Len, through EBX
* @in_si: [IN] Input argument through SI, set to 0 if not used
* @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @port_num: [IN] port number + [channel id]
+ * @flags: [IN] hypercall flags + [channel id]
* @magic: [IN] hypervisor magic value
* @eax: [OUT] value of EAX register
* @ebx: [OUT] e.g. status from an HB message status command
@@ -54,10 +55,10 @@
* @di: [OUT]
*/
#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- port_num, magic, \
+ flags, magic, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("inl %%dx, %%eax;" : \
+ asm volatile (VMWARE_HYPERCALL : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -67,7 +68,7 @@
"a"(magic), \
"b"(in_ebx), \
"c"(cmd), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di) : \
"memory"); \
@@ -85,7 +86,7 @@
* @in_ecx: [IN] Message Len, through ECX
* @in_si: [IN] Input argument through SI, set to 0 if not used
* @in_di: [IN] Input argument through DI, set to 0 if not used
- * @port_num: [IN] port number + [channel id]
+ * @flags: [IN] hypercall flags + [channel id]
* @magic: [IN] hypervisor magic value
* @bp: [IN]
* @eax: [OUT] value of EAX register
@@ -98,12 +99,12 @@
#ifdef __x86_64__
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
- "rep outsb;" \
+ VMWARE_HYPERCALL_HB_OUT \
"pop %%rbp;" : \
"=a"(eax), \
"=b"(ebx), \
@@ -114,7 +115,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
@@ -123,12 +124,12 @@
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
"mov %12, %%rbp;" \
- "rep insb;" \
+ VMWARE_HYPERCALL_HB_IN \
"pop %%rbp" : \
"=a"(eax), \
"=b"(ebx), \
@@ -139,7 +140,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"r"(bp) : \
@@ -157,13 +158,13 @@
* just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
- "rep outsb;" \
+ VMWARE_HYPERCALL_HB_OUT \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
@@ -175,7 +176,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
@@ -184,13 +185,13 @@
#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- port_num, magic, bp, \
+ flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %12;" \
"push %%ebp;" \
"mov 0x04(%%esp), %%ebp;" \
- "rep insb;" \
+ VMWARE_HYPERCALL_HB_IN \
"pop %%ebp;" \
"add $0x04, %%esp;" : \
"=a"(eax), \
@@ -202,7 +203,7 @@
"a"(magic), \
"b"(cmd), \
"c"(in_ecx), \
- "d"(port_num), \
+ "d"(flags), \
"S"(in_si), \
"D"(in_di), \
"m"(bp) : \
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
new file mode 100644
index 000000000000..b84fcaf8b105
--- /dev/null
+++ b/drivers/greybus/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig GREYBUS
+ tristate "Greybus support"
+ depends on SYSFS
+ ---help---
+ This option enables the Greybus driver core. Greybus is an
+ hardware protocol that was designed to provide Unipro with a
+ sane application layer. It was originally designed for the
+ ARA project, a module phone system, but has shown up in other
+ phones, and can be tunneled over other busses in order to
+ control hardware devices.
+
+ Say Y here to enable support for these types of drivers.
+
+ To compile this code as a module, chose M here: the module
+ will be called greybus.ko
+
+if GREYBUS
+
+config GREYBUS_ES2
+ tristate "Greybus ES3 USB host controller"
+ depends on USB
+ ---help---
+ Select this option if you have a Toshiba ES3 USB device that
+ acts as a Greybus "host controller". This device is a bridge
+ from a USB device to a Unipro network.
+
+ To compile this code as a module, chose M here: the module
+ will be called gb-es2.ko
+
+endif # GREYBUS
+
diff --git a/drivers/greybus/Makefile b/drivers/greybus/Makefile
new file mode 100644
index 000000000000..9bccdd229aa2
--- /dev/null
+++ b/drivers/greybus/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+# Greybus core
+greybus-y := core.o \
+ debugfs.o \
+ hd.o \
+ manifest.o \
+ module.o \
+ interface.o \
+ bundle.o \
+ connection.o \
+ control.o \
+ svc.o \
+ svc_watchdog.o \
+ operation.o
+
+obj-$(CONFIG_GREYBUS) += greybus.o
+
+# needed for trace events
+ccflags-y += -I$(src)
+
+# Greybus Host controller drivers
+gb-es2-y := es2.o
+
+obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
+
+
diff --git a/drivers/greybus/arpc.h b/drivers/greybus/arpc.h
new file mode 100644
index 000000000000..c8b83c5cfa79
--- /dev/null
+++ b/drivers/greybus/arpc.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2016 Google Inc. All rights reserved.
+ * Copyright(c) 2016 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __ARPC_H
+#define __ARPC_H
+
+/* APBridgeA RPC (ARPC) */
+
+enum arpc_result {
+ ARPC_SUCCESS = 0x00,
+ ARPC_NO_MEMORY = 0x01,
+ ARPC_INVALID = 0x02,
+ ARPC_TIMEOUT = 0x03,
+ ARPC_UNKNOWN_ERROR = 0xff,
+};
+
+struct arpc_request_message {
+ __le16 id; /* RPC unique id */
+ __le16 size; /* Size in bytes of header + payload */
+ __u8 type; /* RPC type */
+ __u8 data[0]; /* ARPC data */
+} __packed;
+
+struct arpc_response_message {
+ __le16 id; /* RPC unique id */
+ __u8 result; /* Result of RPC */
+} __packed;
+
+/* ARPC requests */
+#define ARPC_TYPE_CPORT_CONNECTED 0x01
+#define ARPC_TYPE_CPORT_QUIESCE 0x02
+#define ARPC_TYPE_CPORT_CLEAR 0x03
+#define ARPC_TYPE_CPORT_FLUSH 0x04
+#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
+
+struct arpc_cport_connected_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_quiesce_req {
+ __le16 cport_id;
+ __le16 peer_space;
+ __le16 timeout;
+} __packed;
+
+struct arpc_cport_clear_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_flush_req {
+ __le16 cport_id;
+} __packed;
+
+struct arpc_cport_shutdown_req {
+ __le16 cport_id;
+ __le16 timeout;
+ __u8 phase;
+} __packed;
+
+#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/bundle.c b/drivers/greybus/bundle.c
index 3f702db9e098..84660729538b 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/greybus/bundle.c
@@ -6,7 +6,7 @@
* Copyright 2014-2015 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
static ssize_t bundle_class_show(struct device *dev,
diff --git a/drivers/staging/greybus/connection.c b/drivers/greybus/connection.c
index eda964208cce..fc8f57f97ce6 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -7,8 +7,8 @@
*/
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
diff --git a/drivers/staging/greybus/control.c b/drivers/greybus/control.c
index a9e8b6036cac..359a25841973 100644
--- a/drivers/staging/greybus/control.c
+++ b/drivers/greybus/control.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include "greybus.h"
+#include <linux/greybus.h>
/* Highest control-protocol version supported */
#define GB_CONTROL_VERSION_MAJOR 0
diff --git a/drivers/staging/greybus/core.c b/drivers/greybus/core.c
index d6b0d49130c0..e546c6431877 100644
--- a/drivers/staging/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -9,7 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define CREATE_TRACE_POINTS
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
#define GB_BUNDLE_AUTOSUSPEND_MS 3000
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/greybus/debugfs.c
index 56e20c30feb5..e102d7badb9d 100644
--- a/drivers/staging/greybus/debugfs.c
+++ b/drivers/greybus/debugfs.c
@@ -7,8 +7,7 @@
*/
#include <linux/debugfs.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
static struct dentry *gb_debug_root;
diff --git a/drivers/staging/greybus/es2.c b/drivers/greybus/es2.c
index be6af18cec31..366716f11b1a 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -11,12 +11,11 @@
#include <linux/kfifo.h>
#include <linux/debugfs.h>
#include <linux/list.h>
+#include <linux/greybus.h>
#include <asm/unaligned.h>
#include "arpc.h"
-#include "greybus.h"
#include "greybus_trace.h"
-#include "connection.h"
/* Default timeout for USB vendor requests. */
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h
index 7b5e2c6b1f6b..1bc9f1275c65 100644
--- a/drivers/staging/greybus/greybus_trace.h
+++ b/drivers/greybus/greybus_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus driver and device API
*
diff --git a/drivers/staging/greybus/hd.c b/drivers/greybus/hd.c
index 969f86697673..72b21bf2d7d3 100644
--- a/drivers/staging/greybus/hd.c
+++ b/drivers/greybus/hd.c
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
@@ -31,7 +31,7 @@ int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
EXPORT_SYMBOL_GPL(gb_hd_output);
static ssize_t bus_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct gb_host_device *hd = to_gb_host_device(dev);
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
/* Locking: Caller guarantees serialisation */
int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
- unsigned long flags)
+ unsigned long flags)
{
struct ida *id_map = &hd->cport_id_map;
int ida_start, ida_end;
@@ -122,9 +122,9 @@ struct device_type greybus_hd_type = {
};
struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
- struct device *parent,
- size_t buffer_size_max,
- size_t num_cports)
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports)
{
struct gb_host_device *hd;
int ret;
diff --git a/drivers/staging/greybus/interface.c b/drivers/greybus/interface.c
index d7b5b89a2f40..67dbe6fda9a1 100644
--- a/drivers/staging/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -7,8 +7,8 @@
*/
#include <linux/delay.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
diff --git a/drivers/staging/greybus/manifest.c b/drivers/greybus/manifest.c
index 08db49264f2b..dd7040697bde 100644
--- a/drivers/staging/greybus/manifest.c
+++ b/drivers/greybus/manifest.c
@@ -6,7 +6,7 @@
* Copyright 2014-2015 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
static const char *get_descriptor_type_string(u8 type)
{
@@ -104,15 +104,15 @@ static int identify_descriptor(struct gb_interface *intf,
size_t expected_size;
if (size < sizeof(*desc_header)) {
- dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
- size, sizeof(*desc_header));
+ dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
+ sizeof(*desc_header));
return -EINVAL; /* Must at least have header */
}
desc_size = le16_to_cpu(desc_header->size);
if (desc_size > size) {
dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
- desc_size, size);
+ desc_size, size);
return -EINVAL;
}
@@ -139,22 +139,22 @@ static int identify_descriptor(struct gb_interface *intf,
case GREYBUS_TYPE_INVALID:
default:
dev_err(&intf->dev, "invalid descriptor type (%u)\n",
- desc_header->type);
+ desc_header->type);
return -EINVAL;
}
if (desc_size < expected_size) {
dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
- get_descriptor_type_string(desc_header->type),
- desc_size, expected_size);
+ get_descriptor_type_string(desc_header->type),
+ desc_size, expected_size);
return -EINVAL;
}
/* Descriptor bigger than what we expect */
if (desc_size > expected_size) {
dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
- get_descriptor_type_string(desc_header->type),
- expected_size, desc_size);
+ get_descriptor_type_string(desc_header->type),
+ expected_size, desc_size);
}
descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
@@ -208,7 +208,7 @@ static char *gb_string_get(struct gb_interface *intf, u8 string_id)
/* Allocate an extra byte so we can guarantee it's NUL-terminated */
string = kmemdup(&desc_string->string, desc_string->length + 1,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!string)
return ERR_PTR(-ENOMEM);
string[desc_string->length] = '\0';
@@ -264,8 +264,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
desc_cport = tmp->data;
if (cport_id == le16_to_cpu(desc_cport->id)) {
dev_err(&bundle->dev,
- "duplicate CPort %u found\n",
- cport_id);
+ "duplicate CPort %u found\n", cport_id);
goto exit;
}
}
@@ -277,7 +276,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
return 0;
bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!bundle->cport_desc)
goto exit;
@@ -287,7 +286,7 @@ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
list_for_each_entry_safe(desc, next, &list, links) {
desc_cport = desc->data;
memcpy(&bundle->cport_desc[i++], desc_cport,
- sizeof(*desc_cport));
+ sizeof(*desc_cport));
/* Release the cport descriptor */
release_manifest_descriptor(desc);
@@ -333,9 +332,9 @@ static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
/* Ignore any legacy control bundles */
if (bundle_id == GB_CONTROL_BUNDLE_ID) {
dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
- __func__);
+ __func__);
release_cport_descriptors(&intf->manifest_descs,
- bundle_id);
+ bundle_id);
continue;
}
@@ -468,7 +467,7 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
/* we have to have at _least_ the manifest header */
if (size < sizeof(*header)) {
dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
- size, sizeof(*header));
+ size, sizeof(*header));
return false;
}
@@ -478,15 +477,15 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
manifest_size = le16_to_cpu(header->size);
if (manifest_size != size) {
dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
- size, manifest_size);
+ size, manifest_size);
return false;
}
/* Validate major/minor number */
if (header->version_major > GREYBUS_VERSION_MAJOR) {
dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
- header->version_major, header->version_minor,
- GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+ header->version_major, header->version_minor,
+ GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
return false;
}
@@ -513,7 +512,7 @@ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
}
if (found != 1) {
dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
- found);
+ found);
result = false;
goto out;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/greybus/module.c
index b251a53d0e8e..36f77f9e1d74 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/greybus/module.c
@@ -6,7 +6,7 @@
* Copyright 2016 Linaro Ltd.
*/
-#include "greybus.h"
+#include <linux/greybus.h>
#include "greybus_trace.h"
static ssize_t eject_store(struct device *dev,
diff --git a/drivers/staging/greybus/operation.c b/drivers/greybus/operation.c
index fe268f7b63ed..8459e9bc0749 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/greybus/operation.c
@@ -12,8 +12,8 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "greybus_trace.h"
static struct kmem_cache *gb_operation_cache;
diff --git a/drivers/staging/greybus/svc.c b/drivers/greybus/svc.c
index 05bc45287b87..ce7740ef449b 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -8,8 +8,7 @@
#include <linux/debugfs.h>
#include <linux/workqueue.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
#define SVC_INTF_EJECT_TIMEOUT 9000
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/greybus/svc_watchdog.c
index 7868ad8211c5..b6b1682c19c4 100644
--- a/drivers/staging/greybus/svc_watchdog.c
+++ b/drivers/greybus/svc_watchdog.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
-#include "greybus.h"
+#include <linux/greybus.h>
#define SVC_WATCHDOG_PERIOD (2 * HZ)
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 98bf694626f7..3a8c4a5971f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -23,12 +23,36 @@
#define A4_2WHEEL_MOUSE_HACK_7 0x01
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
+#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
+
struct a4tech_sc {
unsigned long quirks;
unsigned int hw_wheel;
__s32 delayed_value;
};
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+ usage->hid == A4_WHEEL_ORIENTATION) {
+ /*
+ * We do not want to have this usage mapped to anything as it's
+ * nonstandard and doesn't really behave like an HID report.
+ * It's only selecting the orientation (vertical/horizontal) of
+ * the previous mouse wheel report. The input_events will be
+ * generated once both reports are recorded in a4_event().
+ */
+ return -1;
+ }
+
+ return 0;
+
+}
+
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
struct input_dev *input;
- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
- !usage->type)
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
input = field->hidinput->input;
@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- if (usage->hid == 0x000100b8) {
+ if (usage->hid == A4_WHEEL_ORIENTATION) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
static struct hid_driver a4_driver = {
.name = "a4tech",
.id_table = a4_devices,
+ .input_mapping = a4_input_mapping,
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
- cp2112_gpio_direction_input(gc, d->hwirq);
-
if (!dev->gpio_poll) {
dev->gpio_poll = true;
schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
return PTR_ERR(dev->desc[pin]);
}
+ ret = cp2112_gpio_direction_input(&dev->gc, pin);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
+ goto err_desc;
+ }
+
ret = gpiochip_lock_as_irq(&dev->gc, pin);
if (ret) {
dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index b3d502421b79..0a38e8e9bc78 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
/* Locate the boot interface, to receive the LED change events */
struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+ struct hid_device *boot_hid;
+ struct hid_input *boot_hid_input;
- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ if (unlikely(boot_interface == NULL))
+ return -ENODEV;
+
+ boot_hid = usb_get_intfdata(boot_interface);
+ boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);
return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0d695f8e1b2c..0a00be19f7a0 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -568,6 +568,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -768,7 +769,8 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED 0xc539
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -989,6 +991,7 @@
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52 0x075c
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 6196217a7d93..cc47f948c1d0 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1125,7 +1125,7 @@ static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
HID_REQ_SET_REPORT);
kfree(hidpp_report);
- return retval;
+ return (retval < 0) ? retval : 0;
}
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
@@ -1832,13 +1832,17 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
.driver_data = recvr_type_hidpp},
- { /* Logitech gaming receiver (0xc539) */
+ { /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING),
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED),
.driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = recvr_type_27mhz},
+ { /* Logitech powerplay receiver (0xc53a) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_S510_RECEIVER_2),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index e3b6245bf4b2..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,15 +3749,23 @@ static const struct hid_device_id hidpp_devices[] = {
{ L27MHZ_DEVICE(HID_ANY_ID) },
- { /* Logitech G403 Gaming Mouse over USB */
+ { /* Logitech G403 Wireless Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
- { /* Logitech G700 Gaming Mouse over USB */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G703 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
+ { /* Logitech G703 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
{ /* Logitech G900 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
+ { /* Logitech G903 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ { /* Logitech G903 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
{ /* Logitech G920 Wheel over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+ { /* Logitech G Pro Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 185a577c46f6..166f41f3173b 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -92,6 +92,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -141,6 +142,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 93942063b51b..49dd2d905c7f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
static inline void sony_schedule_work(struct sony_sc *sc,
enum sony_worker which)
{
+ unsigned long flags;
+
switch (which) {
case SONY_WORKER_STATE:
- if (!sc->defer_initialization)
+ spin_lock_irqsave(&sc->lock, flags);
+ if (!sc->defer_initialization && sc->state_worker_initialized)
schedule_work(&sc->state_worker);
+ spin_unlock_irqrestore(&sc->lock, flags);
break;
case SONY_WORKER_HOTPLUG:
if (sc->hotplug_worker_initialized)
@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
static inline void sony_cancel_work_sync(struct sony_sc *sc)
{
+ unsigned long flags;
+
if (sc->hotplug_worker_initialized)
cancel_work_sync(&sc->hotplug_worker);
- if (sc->state_worker_initialized)
+ if (sc->state_worker_initialized) {
+ spin_lock_irqsave(&sc->lock, flags);
+ sc->state_worker_initialized = 0;
+ spin_unlock_irqrestore(&sc->lock, flags);
cancel_work_sync(&sc->state_worker);
+ }
}
-
static int sony_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index e12f2588ddeb..bdfc5ff3b2c5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -22,6 +22,8 @@
#include "hid-ids.h"
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
+
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data,
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
+ int motor_swap;
switch (effect->type) {
case FF_CONSTANT:
@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data,
ff_field->logical_minimum,
ff_field->logical_maximum);
+ /* 2-in-1 strong motor is left */
+ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+ motor_swap = left;
+ left = right;
+ right = motor_swap;
+ }
+
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
ff_field->value[1] = right;
@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
+ .driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define CML_LP_DEVICE_ID 0x02FC
+#define EHL_Ax_DEVICE_ID 0x4BB3
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 55b72573066b..4e11cc6fc34b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
spin_unlock_irq(&list->hiddev->list_lock);
mutex_lock(&hiddev->existancelock);
+ /*
+ * recheck exist with existance lock held to
+ * avoid opening a disconnected device
+ */
+ if (!list->hiddev->exist) {
+ res = -ENODEV;
+ goto bail_unlock;
+ }
if (!list->hiddev->open++)
if (list->hiddev->exist) {
struct hid_device *hid = hiddev->hid;
@@ -300,6 +308,10 @@ bail_normal_power:
hid_hw_power(hid, PM_HINT_NORMAL);
bail_unlock:
mutex_unlock(&hiddev->existancelock);
+
+ spin_lock_irq(&list->hiddev->list_lock);
+ list_del(&list->node);
+ spin_unlock_irq(&list->hiddev->list_lock);
bail:
file->private_data = NULL;
vfree(list);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8fc36a28081b..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -533,14 +533,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
*/
buttons = (data[4] << 1) | (data[3] & 0x01);
} else if (features->type == CINTIQ_COMPANION_2) {
- /* d-pad right -> data[4] & 0x10
- * d-pad up -> data[4] & 0x20
- * d-pad left -> data[4] & 0x40
- * d-pad down -> data[4] & 0x80
- * d-pad center -> data[3] & 0x01
+ /* d-pad right -> data[2] & 0x10
+ * d-pad up -> data[2] & 0x20
+ * d-pad left -> data[2] & 0x40
+ * d-pad down -> data[2] & 0x80
+ * d-pad center -> data[1] & 0x01
*/
buttons = ((data[2] >> 4) << 7) |
- ((data[1] & 0x04) << 6) |
+ ((data[1] & 0x04) << 4) |
((data[2] & 0x0F) << 2) |
(data[1] & 0x03);
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
y >>= 1;
distance >>= 1;
}
+ if (features->type == INTUOSHT2)
+ distance = features->distance_max - distance;
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
if (data[12] & 0x80)
- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
else
input_report_abs(input, ABS_WHEEL, 0);
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
}
if (wacom->tool[0]) {
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
- if (wacom->features.type == INTUOSP2_BT) {
+ if (wacom->features.type == INTUOSP2_BT ||
+ wacom->features.type == INTUOSP2S_BT) {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[13] : wacom->features.distance_max);
} else {
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 9a59957922d4..79e5356a737a 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -14,9 +14,6 @@ config HYPERV
config HYPERV_TIMER
def_bool HYPERV
-config HYPERV_TSCPAGE
- def_bool HYPERV && X86_64
-
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
depends on HYPERV && CONNECTOR && NLS
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
static unsigned long virt_to_hvpfn(void *addr)
{
- unsigned long paddr;
+ phys_addr_t paddr;
if (is_vmalloc_addr(addr))
paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 999f80a63bff..e70783e33680 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hyperv
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..50eaa1fd6e45 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
*/
u64 guestid;
- void *tsc_page;
-
struct hv_per_cpu_context __percpu *cpu_context;
/*
@@ -192,11 +190,11 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
u64 *requestid, bool raw);
/*
- * Maximum channels is determined by the size of the interrupt page
- * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
- * and the other is receive endpoint interrupt
+ * The Maximum number of channels (16348) is determined by the size of the
+ * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
+ * send endpoint interrupts, and the other is to receive endpoint interrupts.
*/
-#define MAX_NUM_CHANNELS ((PAGE_SIZE >> 1) << 3) /* 16348 channels */
+#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
/* The value here must be in multiple of 32 */
/* TODO: Need to make this configurable */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 650dd71f9724..2ca5668bdb62 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -246,6 +246,16 @@ config SENSORS_ADT7475
This driver can also be built as a module. If so, the module
will be called adt7475.
+config SENSORS_AS370
+ tristate "Synaptics AS370 SoC hardware monitoring driver"
+ help
+ If you say yes here you get support for the PVT sensors of
+ the Synaptics AS370 SoC
+
+ This driver can also be built as a module. If so, the module
+ will be called as370-hwmon.
+
+
config SENSORS_ASC7621
tristate "Andigilog aSC7621"
depends on I2C
@@ -1382,8 +1392,8 @@ config SENSORS_SHTC1
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
help
- If you say yes here you get support for the Sensiron SHTC1 and SHTW1
- humidity and temperature sensors.
+ If you say yes here you get support for the Sensiron SHTC1, SHTW1,
+ and SHTC3 humidity and temperature sensors.
This driver can also be built as a module. If so, the module
will be called shtc1.
@@ -1570,16 +1580,6 @@ config SENSORS_ADC128D818
This driver can also be built as a module. If so, the module
will be called adc128d818.
-config SENSORS_ADS1015
- tristate "Texas Instruments ADS1015"
- depends on I2C
- help
- If you say yes here you get support for Texas Instruments
- ADS1015/ADS1115 12/16-bit 4-input ADC device.
-
- This driver can also be built as a module. If so, the module
- will be called ads1015.
-
config SENSORS_ADS7828
tristate "Texas Instruments ADS7828 and compatibles"
depends on I2C
@@ -1834,17 +1834,12 @@ config SENSORS_W83795
will be called w83795.
config SENSORS_W83795_FANCTRL
- bool "Include automatic fan control support (DANGEROUS)"
+ bool "Include automatic fan control support"
depends on SENSORS_W83795
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
- This part of the code wasn't carefully reviewed and tested yet,
- so enabling this option is strongly discouraged on production
- servers. Only developers and testers should enable it for the
- time being.
-
Please also note that this option will create sysfs attribute
files which may change in the future, so you shouldn't rely
on them being stable.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 8db472ea04f0..c86ce4d3d36b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
-obj-$(CONFIG_SENSORS_ADS1015) += ads1015.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7X10) += adt7x10.o
@@ -48,6 +47,7 @@ obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
+obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 6ba1a08253f0..4cf25458f0b9 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -681,8 +681,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
if (resource->caps.flags & POWER_METER_CAN_CAP) {
if (!can_cap_in_hardware()) {
- dev_err(&resource->acpi_dev->dev,
- "Ignoring unsafe software power cap!\n");
+ dev_warn(&resource->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
goto skip_unsafe_cap;
}
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
deleted file mode 100644
index 3727a3762eb8..000000000000
--- a/drivers/hwmon/ads1015.c
+++ /dev/null
@@ -1,324 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ads1015.c - lm_sensors driver for ads1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- *
- * Based on the ads7828 driver by Steve Hardy.
- *
- * Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads1015.pdf
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/ads1015.h>
-
-/* ADS1015 registers */
-enum {
- ADS1015_CONVERSION = 0,
- ADS1015_CONFIG = 1,
-};
-
-/* PGA fullscale voltages in mV */
-static const unsigned int fullscale_table[8] = {
- 6144, 4096, 2048, 1024, 512, 256, 256, 256 };
-
-/* Data rates in samples per second */
-static const unsigned int data_rate_table_1015[8] = {
- 128, 250, 490, 920, 1600, 2400, 3300, 3300
-};
-
-static const unsigned int data_rate_table_1115[8] = {
- 8, 16, 32, 64, 128, 250, 475, 860
-};
-
-#define ADS1015_DEFAULT_CHANNELS 0xff
-#define ADS1015_DEFAULT_PGA 2
-#define ADS1015_DEFAULT_DATA_RATE 4
-
-enum ads1015_chips {
- ads1015,
- ads1115,
-};
-
-struct ads1015_data {
- struct device *hwmon_dev;
- struct mutex update_lock; /* mutex protect updates */
- struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
- enum ads1015_chips id;
-};
-
-static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
-{
- u16 config;
- struct ads1015_data *data = i2c_get_clientdata(client);
- unsigned int pga = data->channel_data[channel].pga;
- unsigned int data_rate = data->channel_data[channel].data_rate;
- unsigned int conversion_time_ms;
- const unsigned int * const rate_table = data->id == ads1115 ?
- data_rate_table_1115 : data_rate_table_1015;
- int res;
-
- mutex_lock(&data->update_lock);
-
- /* get channel parameters */
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
- if (res < 0)
- goto err_unlock;
- config = res;
- conversion_time_ms = DIV_ROUND_UP(1000, rate_table[data_rate]);
-
- /* setup and start single conversion */
- config &= 0x001f;
- config |= (1 << 15) | (1 << 8);
- config |= (channel & 0x0007) << 12;
- config |= (pga & 0x0007) << 9;
- config |= (data_rate & 0x0007) << 5;
-
- res = i2c_smbus_write_word_swapped(client, ADS1015_CONFIG, config);
- if (res < 0)
- goto err_unlock;
-
- /* wait until conversion finished */
- msleep(conversion_time_ms);
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONFIG);
- if (res < 0)
- goto err_unlock;
- config = res;
- if (!(config & (1 << 15))) {
- /* conversion not finished in time */
- res = -EIO;
- goto err_unlock;
- }
-
- res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
-
-err_unlock:
- mutex_unlock(&data->update_lock);
- return res;
-}
-
-static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
- s16 reg)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- unsigned int pga = data->channel_data[channel].pga;
- int fullscale = fullscale_table[pga];
- const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
-
- return DIV_ROUND_CLOSEST(reg * fullscale, mask);
-}
-
-/* sysfs callback function */
-static ssize_t in_show(struct device *dev, struct device_attribute *da,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct i2c_client *client = to_i2c_client(dev);
- int res;
- int index = attr->index;
-
- res = ads1015_read_adc(client, index);
- if (res < 0)
- return res;
-
- return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
-}
-
-static const struct sensor_device_attribute ads1015_in[] = {
- SENSOR_ATTR_RO(in0_input, in, 0),
- SENSOR_ATTR_RO(in1_input, in, 1),
- SENSOR_ATTR_RO(in2_input, in, 2),
- SENSOR_ATTR_RO(in3_input, in, 3),
- SENSOR_ATTR_RO(in4_input, in, 4),
- SENSOR_ATTR_RO(in5_input, in, 5),
- SENSOR_ATTR_RO(in6_input, in, 6),
- SENSOR_ATTR_RO(in7_input, in, 7),
-};
-
-/*
- * Driver interface
- */
-
-static int ads1015_remove(struct i2c_client *client)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- int k;
-
- hwmon_device_unregister(data->hwmon_dev);
- for (k = 0; k < ADS1015_CHANNELS; ++k)
- device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
- return 0;
-}
-
-#ifdef CONFIG_OF
-static int ads1015_get_channels_config_of(struct i2c_client *client)
-{
- struct ads1015_data *data = i2c_get_clientdata(client);
- struct device_node *node;
-
- if (!client->dev.of_node
- || !of_get_next_child(client->dev.of_node, NULL))
- return -EINVAL;
-
- for_each_child_of_node(client->dev.of_node, node) {
- u32 pval;
- unsigned int channel;
- unsigned int pga = ADS1015_DEFAULT_PGA;
- unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
-
- if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %pOF\n", node);
- continue;
- }
-
- channel = pval;
- if (channel >= ADS1015_CHANNELS) {
- dev_err(&client->dev,
- "invalid channel index %d on %pOF\n",
- channel, node);
- continue;
- }
-
- if (!of_property_read_u32(node, "ti,gain", &pval)) {
- pga = pval;
- if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %pOF\n",
- node);
- return -EINVAL;
- }
- }
-
- if (!of_property_read_u32(node, "ti,datarate", &pval)) {
- data_rate = pval;
- if (data_rate > 7) {
- dev_err(&client->dev,
- "invalid data_rate on %pOF\n", node);
- return -EINVAL;
- }
- }
-
- data->channel_data[channel].enabled = true;
- data->channel_data[channel].pga = pga;
- data->channel_data[channel].data_rate = data_rate;
- }
-
- return 0;
-}
-#endif
-
-static void ads1015_get_channels_config(struct i2c_client *client)
-{
- unsigned int k;
- struct ads1015_data *data = i2c_get_clientdata(client);
- struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
-
- /* prefer platform data */
- if (pdata) {
- memcpy(data->channel_data, pdata->channel_data,
- sizeof(data->channel_data));
- return;
- }
-
-#ifdef CONFIG_OF
- if (!ads1015_get_channels_config_of(client))
- return;
-#endif
-
- /* fallback on default configuration */
- for (k = 0; k < ADS1015_CHANNELS; ++k) {
- data->channel_data[k].enabled = true;
- data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
- data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
- }
-}
-
-static int ads1015_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ads1015_data *data;
- int err;
- unsigned int k;
-
- data = devm_kzalloc(&client->dev, sizeof(struct ads1015_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (client->dev.of_node)
- data->id = (enum ads1015_chips)
- of_device_get_match_data(&client->dev);
- else
- data->id = id->driver_data;
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* build sysfs attribute group */
- ads1015_get_channels_config(client);
- for (k = 0; k < ADS1015_CHANNELS; ++k) {
- if (!data->channel_data[k].enabled)
- continue;
- err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
- if (err)
- goto exit_remove;
- }
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- for (k = 0; k < ADS1015_CHANNELS; ++k)
- device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
- return err;
-}
-
-static const struct i2c_device_id ads1015_id[] = {
- { "ads1015", ads1015},
- { "ads1115", ads1115},
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ads1015_id);
-
-static const struct of_device_id __maybe_unused ads1015_of_match[] = {
- {
- .compatible = "ti,ads1015",
- .data = (void *)ads1015
- },
- {
- .compatible = "ti,ads1115",
- .data = (void *)ads1115
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, ads1015_of_match);
-
-static struct i2c_driver ads1015_driver = {
- .driver = {
- .name = "ads1015",
- .of_match_table = of_match_ptr(ads1015_of_match),
- },
- .probe = ads1015_probe,
- .remove = ads1015_remove,
- .id_table = ads1015_id,
-};
-
-module_i2c_driver(ads1015_driver);
-
-MODULE_AUTHOR("Dirk Eibach <eibach@gdsys.de>");
-MODULE_DESCRIPTION("ADS1015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index c3c6031a7285..6c64d50c9aae 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -187,7 +187,7 @@ static const struct of_device_id __maybe_unused adt7475_of_match[] = {
MODULE_DEVICE_TABLE(of, adt7475_of_match);
struct adt7475_data {
- struct device *hwmon_dev;
+ struct i2c_client *client;
struct mutex lock;
unsigned long measure_updated;
@@ -212,6 +212,7 @@ struct adt7475_data {
u8 vid;
u8 vrm;
+ const struct attribute_group *groups[9];
};
static struct i2c_driver adt7475_driver;
@@ -346,8 +347,8 @@ static ssize_t voltage_store(struct device *dev,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg;
long val;
@@ -440,8 +441,8 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg = 0;
u8 out;
int temp;
@@ -542,8 +543,7 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
long val;
switch (sattr->index) {
@@ -570,8 +570,8 @@ static ssize_t temp_st_store(struct device *dev,
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg;
int shift, idx;
ulong val;
@@ -647,8 +647,8 @@ static ssize_t point2_show(struct device *dev, struct device_attribute *attr,
static ssize_t point2_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
int temp;
long val;
@@ -710,8 +710,8 @@ static ssize_t tach_store(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned long val;
if (kstrtoul(buf, 10, &val))
@@ -769,8 +769,8 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
unsigned char reg = 0;
long val;
@@ -818,8 +818,8 @@ static ssize_t stall_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+
u8 mask = BIT(5 + sattr->index);
return sprintf(buf, "%d\n", !!(data->enh_acoustics[0] & mask));
@@ -830,8 +830,8 @@ static ssize_t stall_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
u8 mask = BIT(5 + sattr->index);
@@ -914,8 +914,8 @@ static ssize_t pwmchan_store(struct device *dev,
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int r;
long val;
@@ -938,8 +938,8 @@ static ssize_t pwmctrl_store(struct device *dev,
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int r;
long val;
@@ -982,8 +982,8 @@ static ssize_t pwmfreq_store(struct device *dev,
size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
int out;
long val;
@@ -1022,8 +1022,8 @@ static ssize_t pwm_use_point2_pwm_at_crit_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
long val;
if (kstrtol(buf, 10, &val))
@@ -1342,26 +1342,6 @@ static int adt7475_detect(struct i2c_client *client,
return 0;
}
-static void adt7475_remove_files(struct i2c_client *client,
- struct adt7475_data *data)
-{
- sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
- if (data->has_fan4)
- sysfs_remove_group(&client->dev.kobj, &fan4_attr_group);
- if (data->has_pwm2)
- sysfs_remove_group(&client->dev.kobj, &pwm2_attr_group);
- if (data->has_voltage & (1 << 0))
- sysfs_remove_group(&client->dev.kobj, &in0_attr_group);
- if (data->has_voltage & (1 << 3))
- sysfs_remove_group(&client->dev.kobj, &in3_attr_group);
- if (data->has_voltage & (1 << 4))
- sysfs_remove_group(&client->dev.kobj, &in4_attr_group);
- if (data->has_voltage & (1 << 5))
- sysfs_remove_group(&client->dev.kobj, &in5_attr_group);
- if (data->has_vid)
- sysfs_remove_group(&client->dev.kobj, &vid_attr_group);
-}
-
static int adt7475_update_limits(struct i2c_client *client)
{
struct adt7475_data *data = i2c_get_clientdata(client);
@@ -1489,7 +1469,8 @@ static int adt7475_probe(struct i2c_client *client,
};
struct adt7475_data *data;
- int i, ret = 0, revision;
+ struct device *hwmon_dev;
+ int i, ret = 0, revision, group_num = 0;
u8 config2, config3;
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
@@ -1497,6 +1478,7 @@ static int adt7475_probe(struct i2c_client *client,
return -ENOMEM;
mutex_init(&data->lock);
+ data->client = client;
i2c_set_clientdata(client, data);
if (client->dev.of_node)
@@ -1590,52 +1572,40 @@ static int adt7475_probe(struct i2c_client *client,
break;
}
- ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
- if (ret)
- return ret;
+ data->groups[group_num++] = &adt7475_attr_group;
/* Features that can be disabled individually */
if (data->has_fan4) {
- ret = sysfs_create_group(&client->dev.kobj, &fan4_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &fan4_attr_group;
}
if (data->has_pwm2) {
- ret = sysfs_create_group(&client->dev.kobj, &pwm2_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &pwm2_attr_group;
}
if (data->has_voltage & (1 << 0)) {
- ret = sysfs_create_group(&client->dev.kobj, &in0_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in0_attr_group;
}
if (data->has_voltage & (1 << 3)) {
- ret = sysfs_create_group(&client->dev.kobj, &in3_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in3_attr_group;
}
if (data->has_voltage & (1 << 4)) {
- ret = sysfs_create_group(&client->dev.kobj, &in4_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in4_attr_group;
}
if (data->has_voltage & (1 << 5)) {
- ret = sysfs_create_group(&client->dev.kobj, &in5_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num++] = &in5_attr_group;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
- ret = sysfs_create_group(&client->dev.kobj, &vid_attr_group);
- if (ret)
- goto eremove;
+ data->groups[group_num] = &vid_attr_group;
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
- goto eremove;
+ /* register device with all the acquired attributes */
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
+
+ if (IS_ERR(hwmon_dev)) {
+ ret = PTR_ERR(hwmon_dev);
+ return ret;
}
dev_info(&client->dev, "%s device, revision %d\n",
@@ -1657,21 +1627,7 @@ static int adt7475_probe(struct i2c_client *client,
/* Limits and settings, should never change update more than once */
ret = adt7475_update_limits(client);
if (ret)
- goto eremove;
-
- return 0;
-
-eremove:
- adt7475_remove_files(client, data);
- return ret;
-}
-
-static int adt7475_remove(struct i2c_client *client)
-{
- struct adt7475_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- adt7475_remove_files(client, data);
+ return ret;
return 0;
}
@@ -1683,7 +1639,6 @@ static struct i2c_driver adt7475_driver = {
.of_match_table = of_match_ptr(adt7475_of_match),
},
.probe = adt7475_probe,
- .remove = adt7475_remove,
.id_table = adt7475_id,
.detect = adt7475_detect,
.address_list = normal_i2c,
@@ -1757,8 +1712,8 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
static int adt7475_update_measure(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
u16 ext;
int i;
int ret;
@@ -1854,8 +1809,7 @@ static int adt7475_update_measure(struct device *dev)
static struct adt7475_data *adt7475_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7475_data *data = i2c_get_clientdata(client);
+ struct adt7475_data *data = dev_get_drvdata(dev);
int ret;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/as370-hwmon.c b/drivers/hwmon/as370-hwmon.c
new file mode 100644
index 000000000000..464244ba8d58
--- /dev/null
+++ b/drivers/hwmon/as370-hwmon.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics AS370 SoC Hardware Monitoring Driver
+ *
+ * Copyright (C) 2018 Synaptics Incorporated
+ * Author: Jisheng Zhang <jszhang@kernel.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#define CTRL 0x0
+#define PD BIT(0)
+#define EN BIT(1)
+#define T_SEL BIT(2)
+#define V_SEL BIT(3)
+#define NMOS_SEL BIT(8)
+#define PMOS_SEL BIT(9)
+#define STS 0x4
+#define BN_MASK GENMASK(11, 0)
+#define EOC BIT(12)
+
+struct as370_hwmon {
+ void __iomem *base;
+};
+
+static void init_pvt(struct as370_hwmon *hwmon)
+{
+ u32 val;
+ void __iomem *addr = hwmon->base + CTRL;
+
+ val = PD;
+ writel_relaxed(val, addr);
+ val |= T_SEL;
+ writel_relaxed(val, addr);
+ val |= EN;
+ writel_relaxed(val, addr);
+ val &= ~PD;
+ writel_relaxed(val, addr);
+}
+
+static int as370_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ int val;
+ struct as370_hwmon *hwmon = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_input:
+ val = readl_relaxed(hwmon->base + STS) & BN_MASK;
+ *temp = DIV_ROUND_CLOSEST(val * 251802, 4096) - 85525;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t
+as370_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const u32 as370_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info as370_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = as370_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *as370_hwmon_info[] = {
+ &as370_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops as370_hwmon_ops = {
+ .is_visible = as370_hwmon_is_visible,
+ .read = as370_hwmon_read,
+};
+
+static const struct hwmon_chip_info as370_chip_info = {
+ .ops = &as370_hwmon_ops,
+ .info = as370_hwmon_info,
+};
+
+static int as370_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct as370_hwmon *hwmon;
+ struct device *dev = &pdev->dev;
+
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hwmon->base))
+ return PTR_ERR(hwmon->base);
+
+ init_pvt(hwmon);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ "as370",
+ hwmon,
+ &as370_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id as370_hwmon_match[] = {
+ { .compatible = "syna,as370-hwmon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, as370_hwmon_match);
+
+static struct platform_driver as370_hwmon_driver = {
+ .probe = as370_hwmon_probe,
+ .driver = {
+ .name = "as370-hwmon",
+ .of_match_table = as370_hwmon_match,
+ },
+};
+module_platform_driver(as370_hwmon_driver);
+
+MODULE_AUTHOR("Jisheng Zhang<jszhang@kernel.org>");
+MODULE_DESCRIPTION("Synaptics AS370 SoC hardware monitor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index c9fa84b25678..4c609e23a4ef 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -706,21 +706,21 @@ static int asb100_detect_subclients(struct i2c_client *client)
goto ERROR_SC_2;
}
- data->lm75[0] = i2c_new_dummy(adapter, sc_addr[0]);
- if (!data->lm75[0]) {
+ data->lm75[0] = i2c_new_dummy_device(adapter, sc_addr[0]);
+ if (IS_ERR(data->lm75[0])) {
dev_err(&client->dev,
"subclient %d registration at address 0x%x failed.\n",
1, sc_addr[0]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[0]);
goto ERROR_SC_2;
}
- data->lm75[1] = i2c_new_dummy(adapter, sc_addr[1]);
- if (!data->lm75[1]) {
+ data->lm75[1] = i2c_new_dummy_device(adapter, sc_addr[1]);
+ if (IS_ERR(data->lm75[1])) {
dev_err(&client->dev,
"subclient %d registration at address 0x%x failed.\n",
2, sc_addr[1]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[1]);
goto ERROR_SC_3;
}
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index e232fa948833..79b8df258371 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -5,7 +5,7 @@
*
* The ATXP1 can reside on I2C addresses 0x37 or 0x4e. The chip is
* not auto-detected by the driver and must be instantiated explicitly.
- * See Documentation/i2c/instantiating-devices for more information.
+ * See Documentation/i2c/instantiating-devices.rst for more information.
*/
#include <linux/kernel.h>
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index fe6618e49dc4..d855c78fb8be 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -736,7 +736,7 @@ static int __init coretemp_init(void)
err = platform_driver_register(&coretemp_driver);
if (err)
- return err;
+ goto outzone;
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
coretemp_cpu_online, coretemp_cpu_offline);
@@ -747,6 +747,7 @@ static int __init coretemp_init(void)
outdrv:
platform_driver_unregister(&coretemp_driver);
+outzone:
kfree(zone_devices);
return err;
}
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f1c2d5faedf0..b85a125dd86f 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -44,12 +44,20 @@ static ssize_t iio_hwmon_read_val(struct device *dev,
int ret;
struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
struct iio_hwmon_state *state = dev_get_drvdata(dev);
+ struct iio_channel *chan = &state->channels[sattr->index];
+ enum iio_chan_type type;
+
+ ret = iio_read_channel_processed(chan, &result);
+ if (ret < 0)
+ return ret;
- ret = iio_read_channel_processed(&state->channels[sattr->index],
- &result);
+ ret = iio_get_channel_type(chan, &type);
if (ret < 0)
return ret;
+ if (type == IIO_POWER)
+ result *= 1000; /* mili-Watts to micro-Watts conversion */
+
return sprintf(buf, "%d\n", result);
}
@@ -59,7 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
struct iio_hwmon_state *st;
struct sensor_device_attribute *a;
int ret, i;
- int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
+ int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1, power_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
struct device *hwmon_dev;
@@ -114,6 +122,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
n = curr_i++;
prefix = "curr";
break;
+ case IIO_POWER:
+ n = power_i++;
+ prefix = "power";
+ break;
case IIO_HUMIDITYRELATIVE:
n = humidity_i++;
prefix = "humidity";
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index c77e89239dcd..5c1dddde193c 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 4994c90c8929..f73bd4eceb28 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -10,10 +10,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <asm/processor.h>
@@ -24,108 +22,18 @@
#define SEL_CORE 0x04
struct k8temp_data {
- struct device *hwmon_dev;
struct mutex update_lock;
- const char *name;
- char valid; /* zero until following fields are valid */
- unsigned long last_updated; /* in jiffies */
/* registers values */
u8 sensorsp; /* sensor presence bits - SEL_CORE, SEL_PLACE */
- u32 temp[2][2]; /* core, place */
u8 swap_core_select; /* meaning of SEL_CORE is inverted */
u32 temp_offset;
};
-static struct k8temp_data *k8temp_update_device(struct device *dev)
-{
- struct k8temp_data *data = dev_get_drvdata(dev);
- struct pci_dev *pdev = to_pci_dev(dev);
- u8 tmp;
-
- mutex_lock(&data->update_lock);
-
- if (!data->valid
- || time_after(jiffies, data->last_updated + HZ)) {
- pci_read_config_byte(pdev, REG_TEMP, &tmp);
- tmp &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP, &data->temp[0][0]);
-
- if (data->sensorsp & SEL_PLACE) {
- tmp |= SEL_PLACE; /* Select sensor 1, core0 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[0][1]);
- }
-
- if (data->sensorsp & SEL_CORE) {
- tmp &= ~SEL_PLACE; /* Select sensor 0, core1 */
- tmp |= SEL_CORE;
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[1][0]);
-
- if (data->sensorsp & SEL_PLACE) {
- tmp |= SEL_PLACE; /* Select sensor 1, core1 */
- pci_write_config_byte(pdev, REG_TEMP, tmp);
- pci_read_config_dword(pdev, REG_TEMP,
- &data->temp[1][1]);
- }
- }
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
- return data;
-}
-
-/*
- * Sysfs stuff
- */
-
-static ssize_t name_show(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct k8temp_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-
-
-static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute_2 *attr =
- to_sensor_dev_attr_2(devattr);
- int core = attr->nr;
- int place = attr->index;
- int temp;
- struct k8temp_data *data = k8temp_update_device(dev);
-
- if (data->swap_core_select && (data->sensorsp & SEL_CORE))
- core = core ? 0 : 1;
-
- temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
-
- return sprintf(buf, "%d\n", temp);
-}
-
-/* core, place */
-
-static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp2_input, temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2_RO(temp3_input, temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2_RO(temp4_input, temp, 1, 1);
-static DEVICE_ATTR_RO(name);
-
static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
-
MODULE_DEVICE_TABLE(pci, k8temp_ids);
static int is_rev_g_desktop(u8 model)
@@ -159,14 +67,76 @@ static int is_rev_g_desktop(u8 model)
return 1;
}
+static umode_t
+k8temp_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct k8temp_data *data = drvdata;
+
+ if ((channel & 1) && !(data->sensorsp & SEL_PLACE))
+ return 0;
+
+ if ((channel & 2) && !(data->sensorsp & SEL_CORE))
+ return 0;
+
+ return 0444;
+}
+
+static int
+k8temp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct k8temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int core, place;
+ u32 temp;
+ u8 tmp;
+
+ core = (channel >> 1) & 1;
+ place = channel & 1;
+
+ core ^= data->swap_core_select;
+
+ mutex_lock(&data->update_lock);
+ pci_read_config_byte(pdev, REG_TEMP, &tmp);
+ tmp &= ~(SEL_PLACE | SEL_CORE);
+ if (core)
+ tmp |= SEL_CORE;
+ if (place)
+ tmp |= SEL_PLACE;
+ pci_write_config_byte(pdev, REG_TEMP, tmp);
+ pci_read_config_dword(pdev, REG_TEMP, &temp);
+ mutex_unlock(&data->update_lock);
+
+ *val = TEMP_FROM_REG(temp) + data->temp_offset;
+
+ return 0;
+}
+
+static const struct hwmon_ops k8temp_ops = {
+ .is_visible = k8temp_is_visible,
+ .read = k8temp_read,
+};
+
+static const struct hwmon_channel_info *k8temp_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info k8temp_chip_info = {
+ .ops = &k8temp_ops,
+ .info = k8temp_info,
+};
+
static int k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
u8 scfg;
u32 temp;
u8 model, stepping;
struct k8temp_data *data;
+ struct device *hwmon_dev;
data = devm_kzalloc(&pdev->dev, sizeof(struct k8temp_data), GFP_KERNEL);
if (!data)
@@ -231,86 +201,21 @@ static int k8temp_probe(struct pci_dev *pdev,
data->sensorsp &= ~SEL_CORE;
}
- data->name = "k8temp";
mutex_init(&data->update_lock);
- pci_set_drvdata(pdev, data);
-
- /* Register sysfs hooks */
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- if (err)
- goto exit_remove;
-
- /* sensor can be changed and reports something */
- if (data->sensorsp & SEL_PLACE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- if (err)
- goto exit_remove;
- }
-
- /* core can be changed and reports something */
- if (data->sensorsp & SEL_CORE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- if (err)
- goto exit_remove;
- if (data->sensorsp & SEL_PLACE) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.
- dev_attr);
- if (err)
- goto exit_remove;
- }
- }
-
- err = device_create_file(&pdev->dev, &dev_attr_name);
- if (err)
- goto exit_remove;
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "k8temp",
+ data,
+ &k8temp_chip_info,
+ NULL);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.dev_attr);
- device_remove_file(&pdev->dev, &dev_attr_name);
- return err;
-}
-
-static void k8temp_remove(struct pci_dev *pdev)
-{
- struct k8temp_data *data = pci_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp2_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp3_input.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp4_input.dev_attr);
- device_remove_file(&pdev->dev, &dev_attr_name);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct pci_driver k8temp_driver = {
.name = "k8temp",
.id_table = k8temp_ids,
.probe = k8temp_probe,
- .remove = k8temp_remove,
};
module_pci_driver(k8temp_driver);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 3fb9c0a2d6d0..5e6392294c03 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -16,9 +16,9 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#include "lm75.h"
-
/*
* This driver handles the LM75 and compatible digital temperature sensors.
*/
@@ -36,6 +36,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ pct2075,
stds75,
stlm75,
tcn75,
@@ -50,6 +51,41 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp75c,
};
+/**
+ * struct lm75_params - lm75 configuration parameters.
+ * @set_mask: Bits to set in configuration register when configuring
+ * the chip.
+ * @clr_mask: Bits to clear in configuration register when configuring
+ * the chip.
+ * @default_resolution: Default number of bits to represent the temperature
+ * value.
+ * @resolution_limits: Limit register resolution. Optional. Should be set if
+ * the resolution of limit registers does not match the
+ * resolution of the temperature register.
+ * @resolutions: List of resolutions associated with sample times.
+ * Optional. Should be set if num_sample_times is larger
+ * than 1, and if the resolution changes with sample times.
+ * If set, number of entries must match num_sample_times.
+ * @default_sample_time:Sample time to be set by default.
+ * @num_sample_times: Number of possible sample times to be set. Optional.
+ * Should be set if the number of sample times is larger
+ * than one.
+ * @sample_times: All the possible sample times to be set. Mandatory if
+ * num_sample_times is larger than 1. If set, number of
+ * entries must match num_sample_times.
+ */
+
+struct lm75_params {
+ u8 set_mask;
+ u8 clr_mask;
+ u8 default_resolution;
+ u8 resolution_limits;
+ const u8 *resolutions;
+ unsigned int default_sample_time;
+ u8 num_sample_times;
+ const unsigned int *sample_times;
+};
+
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
@@ -59,24 +95,231 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
#define LM75_REG_CONF 0x01
#define LM75_REG_HYST 0x02
#define LM75_REG_MAX 0x03
+#define PCT2075_REG_IDLE 0x04
/* Each client has this additional data */
struct lm75_data {
- struct i2c_client *client;
- struct regmap *regmap;
- u8 orig_conf;
- u8 resolution; /* In bits, between 9 and 16 */
- u8 resolution_limits;
- unsigned int sample_time; /* In ms */
+ struct i2c_client *client;
+ struct regmap *regmap;
+ u8 orig_conf;
+ u8 current_conf;
+ u8 resolution; /* In bits, 9 to 16 */
+ unsigned int sample_time; /* In ms */
+ enum lm75_type kind;
+ const struct lm75_params *params;
};
/*-----------------------------------------------------------------------*/
+static const u8 lm75_sample_set_masks[] = { 0 << 5, 1 << 5, 2 << 5, 3 << 5 };
+
+#define LM75_SAMPLE_CLEAR_MASK (3 << 5)
+
+/* The structure below stores the configuration values of the supported devices.
+ * In case of being supported multiple configurations, the default one must
+ * always be the first element of the array
+ */
+static const struct lm75_params device_params[] = {
+ [adt75] = {
+ .clr_mask = 1 << 5, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [ds1775] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 500,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 125, 250, 500, 1000 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [ds75] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 600,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [stds75] = {
+ .clr_mask = 3 << 5,
+ .set_mask = 2 << 5, /* 11-bit mode */
+ .default_resolution = 11,
+ .default_sample_time = 600,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 150, 300, 600, 1200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [stlm75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 6,
+ },
+ [ds7505] = {
+ .set_mask = 3 << 5, /* 12-bit mode*/
+ .default_resolution = 12,
+ .default_sample_time = 200,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 25, 50, 100, 200 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [g751] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75a] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [lm75b] = {
+ .default_resolution = 11,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ },
+ [max6625] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 7,
+ },
+ [max6626] = {
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 7,
+ .resolution_limits = 9,
+ },
+ [max31725] = {
+ .default_resolution = 16,
+ .default_sample_time = MSEC_PER_SEC / 20,
+ },
+ [tcn75] = {
+ .default_resolution = 9,
+ .default_sample_time = MSEC_PER_SEC / 18,
+ },
+ [pct2075] = {
+ .default_resolution = 11,
+ .default_sample_time = MSEC_PER_SEC / 10,
+ .num_sample_times = 31,
+ .sample_times = (unsigned int []){ 100, 200, 300, 400, 500, 600,
+ 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700,
+ 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,
+ 2800, 2900, 3000, 3100 },
+ },
+ [mcp980x] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .resolution_limits = 9,
+ .default_sample_time = 240,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 30, 60, 120, 240 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp100] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = 320,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp101] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode */
+ .default_resolution = 12,
+ .default_sample_time = 320,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 40, 80, 160, 320 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp105] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp112] = {
+ .set_mask = 3 << 5, /* 8 samples / second */
+ .clr_mask = 1 << 7, /* no one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 125,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ },
+ [tmp175] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp275] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp75] = {
+ .set_mask = 3 << 5, /* 12-bit mode */
+ .clr_mask = 1 << 7, /* not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = 220,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ .resolutions = (u8 []) {9, 10, 11, 12 },
+ },
+ [tmp75b] = { /* not one-shot mode, Conversion rate 37Hz */
+ .clr_mask = 1 << 7 | 3 << 5,
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 37,
+ .sample_times = (unsigned int []){ MSEC_PER_SEC / 37,
+ MSEC_PER_SEC / 18,
+ MSEC_PER_SEC / 9, MSEC_PER_SEC / 4 },
+ .num_sample_times = 4,
+ },
+ [tmp75c] = {
+ .clr_mask = 1 << 5, /*not one-shot mode*/
+ .default_resolution = 12,
+ .default_sample_time = MSEC_PER_SEC / 12,
+ }
+};
+
static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
{
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
+static int lm75_write_config(struct lm75_data *data, u8 set_mask,
+ u8 clr_mask)
+{
+ u8 value;
+
+ clr_mask |= LM75_SHUTDOWN;
+ value = data->current_conf & ~clr_mask;
+ value |= set_mask;
+
+ if (data->current_conf != value) {
+ s32 err;
+
+ err = i2c_smbus_write_byte_data(data->client, LM75_REG_CONF,
+ value);
+ if (err)
+ return err;
+ data->current_conf = value;
+ }
+ return 0;
+}
+
static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -120,16 +363,12 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
return 0;
}
-static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long temp)
+static int lm75_write_temp(struct device *dev, u32 attr, long temp)
{
struct lm75_data *data = dev_get_drvdata(dev);
u8 resolution;
int reg;
- if (type != hwmon_temp)
- return -EINVAL;
-
switch (attr) {
case hwmon_temp_max:
reg = LM75_REG_MAX;
@@ -145,8 +384,8 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
* Resolution of limit registers is assumed to be the same as the
* temperature input register resolution unless given explicitly.
*/
- if (data->resolution_limits)
- resolution = data->resolution_limits;
+ if (data->params->resolution_limits)
+ resolution = data->params->resolution_limits;
else
resolution = data->resolution;
@@ -154,16 +393,88 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
1000) << (16 - resolution);
- return regmap_write(data->regmap, reg, temp);
+ return regmap_write(data->regmap, reg, (u16)temp);
+}
+
+static int lm75_update_interval(struct device *dev, long val)
+{
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int reg;
+ u8 index;
+ s32 err;
+
+ index = find_closest(val, data->params->sample_times,
+ (int)data->params->num_sample_times);
+
+ switch (data->kind) {
+ default:
+ err = lm75_write_config(data, lm75_sample_set_masks[index],
+ LM75_SAMPLE_CLEAR_MASK);
+ if (err)
+ return err;
+
+ data->sample_time = data->params->sample_times[index];
+ if (data->params->resolutions)
+ data->resolution = data->params->resolutions[index];
+ break;
+ case tmp112:
+ err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
+ if (err < 0)
+ return err;
+ reg &= ~0x00c0;
+ reg |= (3 - index) << 6;
+ err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+ if (err < 0)
+ return err;
+ data->sample_time = data->params->sample_times[index];
+ break;
+ case pct2075:
+ err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
+ index + 1);
+ if (err)
+ return err;
+ data->sample_time = data->params->sample_times[index];
+ break;
+ }
+ return 0;
+}
+
+static int lm75_write_chip(struct device *dev, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return lm75_update_interval(dev, val);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return lm75_write_chip(dev, attr, val);
+ case hwmon_temp:
+ return lm75_write_temp(dev, attr, val);
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct lm75_data *config_data = data;
+
switch (type) {
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
+ if (config_data->params->num_sample_times > 1)
+ return 0644;
return 0444;
}
break;
@@ -208,13 +519,13 @@ static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
{
- return reg == LM75_REG_TEMP;
+ return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
}
static const struct regmap_config lm75_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .max_register = LM75_REG_MAX,
+ .max_register = PCT2075_REG_IDLE,
.writeable_reg = lm75_is_writeable_reg,
.volatile_reg = lm75_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
@@ -238,8 +549,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- u8 set_mask, clr_mask;
- int new;
enum lm75_type kind;
if (client->dev.of_node)
@@ -256,6 +565,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
data->client = client;
+ data->kind = kind;
data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
if (IS_ERR(data->regmap))
@@ -264,113 +574,30 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
*/
- set_mask = 0;
- clr_mask = LM75_SHUTDOWN; /* continuous conversions */
-
- switch (kind) {
- case adt75:
- clr_mask |= 1 << 5; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case ds1775:
- case ds75:
- case stds75:
- clr_mask |= 3 << 5;
- set_mask |= 2 << 5; /* 11-bit mode */
- data->resolution = 11;
- data->sample_time = MSEC_PER_SEC;
- break;
- case stlm75:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 5;
- break;
- case ds7505:
- set_mask |= 3 << 5; /* 12-bit mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case g751:
- case lm75:
- case lm75a:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 2;
- break;
- case lm75b:
- data->resolution = 11;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max6625:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max6626:
- data->resolution = 12;
- data->resolution_limits = 9;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case max31725:
- data->resolution = 16;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case tcn75:
- data->resolution = 9;
- data->sample_time = MSEC_PER_SEC / 8;
- break;
- case mcp980x:
- data->resolution_limits = 9;
- /* fall through */
- case tmp100:
- case tmp101:
- set_mask |= 3 << 5; /* 12-bit mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC;
- clr_mask |= 1 << 7; /* not one-shot mode */
- break;
- case tmp112:
- set_mask |= 3 << 5; /* 12-bit mode */
- clr_mask |= 1 << 7; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- case tmp105:
- case tmp175:
- case tmp275:
- case tmp75:
- set_mask |= 3 << 5; /* 12-bit mode */
- clr_mask |= 1 << 7; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 2;
- break;
- case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
- clr_mask |= 1 << 15 | 0x3 << 13;
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 37;
- break;
- case tmp75c:
- clr_mask |= 1 << 5; /* not one-shot mode */
- data->resolution = 12;
- data->sample_time = MSEC_PER_SEC / 4;
- break;
- }
- /* configure as specified */
+ data->params = &device_params[data->kind];
+
+ /* Save default sample time and resolution*/
+ data->sample_time = data->params->default_sample_time;
+ data->resolution = data->params->default_resolution;
+
+ /* Cache original configuration */
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(dev, "Can't read config? %d\n", status);
return status;
}
data->orig_conf = status;
- new = status & ~clr_mask;
- new |= set_mask;
- if (status != new)
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
+ data->current_conf = status;
- err = devm_add_action_or_reset(dev, lm75_remove, data);
+ err = lm75_write_config(data, data->params->set_mask,
+ data->params->clr_mask);
if (err)
return err;
- dev_dbg(dev, "Config %02x\n", new);
+ err = devm_add_action_or_reset(dev, lm75_remove, data);
+ if (err)
+ return err;
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &lm75_chip_info,
@@ -397,6 +624,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "pct2075", pct2075, },
{ "stds75", stds75, },
{ "stlm75", stlm75, },
{ "tcn75", tcn75, },
@@ -467,6 +695,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,pct2075",
+ .data = (void *)pct2075
+ },
+ {
.compatible = "st,stds75",
.data = (void *)stds75
},
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
index f9431ad43ad3..53ff5051774c 100644
--- a/drivers/hwmon/ltc2990.c
+++ b/drivers/hwmon/ltc2990.c
@@ -13,7 +13,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#define LTC2990_STATUS 0x00
#define LTC2990_CONTROL 0x01
@@ -206,7 +206,6 @@ static int ltc2990_i2c_probe(struct i2c_client *i2c,
int ret;
struct device *hwmon_dev;
struct ltc2990_data *data;
- struct device_node *of_node = i2c->dev.of_node;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
@@ -218,9 +217,10 @@ static int ltc2990_i2c_probe(struct i2c_client *i2c,
data->i2c = i2c;
- if (of_node) {
- ret = of_property_read_u32_array(of_node, "lltc,meas-mode",
- data->mode, 2);
+ if (dev_fwnode(&i2c->dev)) {
+ ret = device_property_read_u32_array(&i2c->dev,
+ "lltc,meas-mode",
+ data->mode, 2);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index e7dff5febe16..7efa6bfef060 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -20,6 +20,7 @@
*
* Chip #vin #fan #pwm #temp chip IDs man ID
* nct6106d 9 3 3 6+3 0xc450 0xc1 0x5ca3
+ * nct6116d 9 5 5 3+3 0xd280 0xc1 0x5ca3
* nct6775f 9 4 3 6+3 0xb470 0xc1 0x5ca3
* nct6776f 9 5 3 6+3 0xc330 0xc1 0x5ca3
* nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
@@ -58,12 +59,13 @@
#define USE_ALTERNATE
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
- nct6795, nct6796, nct6797, nct6798 };
+enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
+ nct6793, nct6795, nct6796, nct6797, nct6798 };
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
"nct6106",
+ "nct6116",
"nct6775",
"nct6776",
"nct6779",
@@ -78,6 +80,7 @@ static const char * const nct6775_device_names[] = {
static const char * const nct6775_sio_names[] __initconst = {
"NCT6106D",
+ "NCT6116D",
"NCT6775F",
"NCT6776D/F",
"NCT6779D",
@@ -115,6 +118,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_NCT6106_ID 0xc450
+#define SIO_NCT6116_ID 0xd280
#define SIO_NCT6775_ID 0xb470
#define SIO_NCT6776_ID 0xc330
#define SIO_NCT6779_ID 0xc560
@@ -825,10 +829,8 @@ static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
-static const u16 NCT6106_REG_PWM[] = { 0x119, 0x129, 0x139 };
static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
-static const u16 NCT6106_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130 };
static const u16 NCT6106_REG_TEMP_SOURCE[] = {
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
@@ -852,7 +854,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
@@ -896,6 +898,70 @@ static const u16 NCT6106_REG_TEMP_CRIT[32] = {
[12] = 0x205,
};
+/* NCT6112D/NCT6114D/NCT6116D specific data */
+
+static const u16 NCT6116_REG_FAN[] = { 0x20, 0x22, 0x24, 0x26, 0x28 };
+static const u16 NCT6116_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4, 0xe6, 0xe8 };
+static const u16 NCT6116_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0xf6, 0xf5 };
+static const u16 NCT6116_FAN_PULSE_SHIFT[] = { 0, 2, 4, 6, 6 };
+
+static const u16 NCT6116_REG_PWM[] = { 0x119, 0x129, 0x139, 0x199, 0x1a9 };
+static const u16 NCT6116_REG_FAN_MODE[] = { 0x113, 0x123, 0x133, 0x193, 0x1a3 };
+static const u16 NCT6116_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130, 0x190, 0x1a0 };
+static const u16 NCT6116_REG_TEMP_SOURCE[] = {
+ 0xb0, 0xb1, 0xb2 };
+
+static const u16 NCT6116_REG_CRITICAL_TEMP[] = {
+ 0x11a, 0x12a, 0x13a, 0x19a, 0x1aa };
+static const u16 NCT6116_REG_CRITICAL_TEMP_TOLERANCE[] = {
+ 0x11b, 0x12b, 0x13b, 0x19b, 0x1ab };
+
+static const u16 NCT6116_REG_CRITICAL_PWM_ENABLE[] = {
+ 0x11c, 0x12c, 0x13c, 0x19c, 0x1ac };
+static const u16 NCT6116_REG_CRITICAL_PWM[] = {
+ 0x11d, 0x12d, 0x13d, 0x19d, 0x1ad };
+
+static const u16 NCT6116_REG_FAN_STEP_UP_TIME[] = {
+ 0x114, 0x124, 0x134, 0x194, 0x1a4 };
+static const u16 NCT6116_REG_FAN_STEP_DOWN_TIME[] = {
+ 0x115, 0x125, 0x135, 0x195, 0x1a5 };
+static const u16 NCT6116_REG_FAN_STOP_OUTPUT[] = {
+ 0x116, 0x126, 0x136, 0x196, 0x1a6 };
+static const u16 NCT6116_REG_FAN_START_OUTPUT[] = {
+ 0x117, 0x127, 0x137, 0x197, 0x1a7 };
+static const u16 NCT6116_REG_FAN_STOP_TIME[] = {
+ 0x118, 0x128, 0x138, 0x198, 0x1a8 };
+static const u16 NCT6116_REG_TOLERANCE_H[] = {
+ 0x112, 0x122, 0x132, 0x192, 0x1a2 };
+
+static const u16 NCT6116_REG_TARGET[] = {
+ 0x111, 0x121, 0x131, 0x191, 0x1a1 };
+
+static const u16 NCT6116_REG_AUTO_TEMP[] = {
+ 0x160, 0x170, 0x180, 0x1d0, 0x1e0 };
+static const u16 NCT6116_REG_AUTO_PWM[] = {
+ 0x164, 0x174, 0x184, 0x1d4, 0x1e4 };
+
+static const s8 NCT6116_ALARM_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, -1, -1, -1, -1, -1, -1, /* in8..in9 */
+ -1, /* unused */
+ 32, 33, 34, 35, 36, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
+ 48, -1 /* intrusion0, intrusion1 */
+};
+
+static const s8 NCT6116_BEEP_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, 10, 11, 12, -1, -1, -1, /* in8..in14 */
+ 32, /* global beep enable */
+ 24, 25, 26, 27, 28, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
+ 34, -1 /* intrusion0, intrusion1 */
+};
+
static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
{
if (mode == 0 && pwm == 255)
@@ -1294,6 +1360,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
reg == 0x111 || reg == 0x121 || reg == 0x131;
+ case nct6116:
+ return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+ reg == 0x26 || reg == 0x28 || reg == 0xe0 || reg == 0xe2 ||
+ reg == 0xe4 || reg == 0xe6 || reg == 0xe8 || reg == 0x111 ||
+ reg == 0x121 || reg == 0x131 || reg == 0x191 || reg == 0x1a1;
case nct6775:
return (((reg & 0xff00) == 0x100 ||
(reg & 0xff00) == 0x200) &&
@@ -1673,6 +1744,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
data->auto_pwm[i][data->auto_pwm_num] = 0xff;
break;
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
@@ -3109,6 +3181,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
case nct6776:
break; /* always enabled, nothing to do */
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
@@ -3535,6 +3608,23 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
fan3pin = !(cr24 & 0x80);
pwm3pin = cr24 & 0x08;
+ } else if (data->kind == nct6116) {
+ int cr1a = superio_inb(sioreg, 0x1a);
+ int cr1b = superio_inb(sioreg, 0x1b);
+ int cr24 = superio_inb(sioreg, 0x24);
+ int cr2a = superio_inb(sioreg, 0x2a);
+ int cr2b = superio_inb(sioreg, 0x2b);
+ int cr2f = superio_inb(sioreg, 0x2f);
+
+ fan3pin = !(cr2b & 0x10);
+ fan4pin = (cr2b & 0x80) || // pin 1(2)
+ (!(cr2f & 0x10) && (cr1a & 0x04)); // pin 65(66)
+ fan5pin = (cr2b & 0x80) || // pin 126(127)
+ (!(cr1b & 0x03) && (cr2a & 0x02)); // pin 94(96)
+
+ pwm3pin = fan3pin && (cr24 & 0x08);
+ pwm4pin = fan4pin;
+ pwm5pin = fan5pin;
} else {
/*
* NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
@@ -3764,7 +3854,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
- data->REG_PWM[0] = NCT6106_REG_PWM;
+ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6116_REG_PWM;
data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
@@ -3783,7 +3874,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_CRITICAL_PWM = NCT6106_REG_CRITICAL_PWM;
data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
data->REG_TEMP_SOURCE = NCT6106_REG_TEMP_SOURCE;
- data->REG_TEMP_SEL = NCT6106_REG_TEMP_SEL;
+ data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
@@ -3806,6 +3897,79 @@ static int nct6775_probe(struct platform_device *pdev)
reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
break;
+ case nct6116:
+ data->in_num = 9;
+ data->pwm_num = 3;
+ data->auto_pwm_num = 4;
+ data->temp_fixed_num = 3;
+ data->num_temp_alarms = 3;
+ data->num_temp_beeps = 3;
+
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+
+ data->temp_label = nct6776_temp_label;
+ data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
+
+ data->REG_VBAT = NCT6106_REG_VBAT;
+ data->REG_DIODE = NCT6106_REG_DIODE;
+ data->DIODE_MASK = NCT6106_DIODE_MASK;
+ data->REG_VIN = NCT6106_REG_IN;
+ data->REG_IN_MINMAX[0] = NCT6106_REG_IN_MIN;
+ data->REG_IN_MINMAX[1] = NCT6106_REG_IN_MAX;
+ data->REG_TARGET = NCT6116_REG_TARGET;
+ data->REG_FAN = NCT6116_REG_FAN;
+ data->REG_FAN_MODE = NCT6116_REG_FAN_MODE;
+ data->REG_FAN_MIN = NCT6116_REG_FAN_MIN;
+ data->REG_FAN_PULSES = NCT6116_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6116_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6116_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6116_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6116_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6116_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6116_REG_PWM;
+ data->REG_PWM[1] = NCT6116_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6116_REG_FAN_STOP_OUTPUT;
+ data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6106_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM_READ = NCT6106_REG_PWM_READ;
+ data->REG_PWM_MODE = NCT6106_REG_PWM_MODE;
+ data->PWM_MODE_MASK = NCT6106_PWM_MODE_MASK;
+ data->REG_AUTO_TEMP = NCT6116_REG_AUTO_TEMP;
+ data->REG_AUTO_PWM = NCT6116_REG_AUTO_PWM;
+ data->REG_CRITICAL_TEMP = NCT6116_REG_CRITICAL_TEMP;
+ data->REG_CRITICAL_TEMP_TOLERANCE
+ = NCT6116_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6116_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK
+ = NCT6106_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6116_REG_CRITICAL_PWM;
+ data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
+ data->REG_TEMP_SOURCE = NCT6116_REG_TEMP_SOURCE;
+ data->REG_TEMP_SEL = NCT6116_REG_TEMP_SEL;
+ data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6106_REG_WEIGHT_TEMP_BASE;
+ data->REG_ALARM = NCT6106_REG_ALARM;
+ data->ALARM_BITS = NCT6116_ALARM_BITS;
+ data->REG_BEEP = NCT6106_REG_BEEP;
+ data->BEEP_BITS = NCT6116_BEEP_BITS;
+
+ reg_temp = NCT6106_REG_TEMP;
+ reg_temp_mon = NCT6106_REG_TEMP_MON;
+ num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+ num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON);
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+ reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+
+ break;
case nct6775:
data->in_num = 9;
data->pwm_num = 3;
@@ -4351,6 +4515,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->have_vid = (cr2a & 0x60) == 0x40;
break;
case nct6106:
+ case nct6116:
case nct6779:
case nct6791:
case nct6792:
@@ -4380,6 +4545,7 @@ static int nct6775_probe(struct platform_device *pdev)
NCT6775_REG_CR_FAN_DEBOUNCE);
switch (data->kind) {
case nct6106:
+ case nct6116:
tmp |= 0xe0;
break;
case nct6775:
@@ -4575,6 +4741,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
case SIO_NCT6106_ID:
sio_data->kind = nct6106;
break;
+ case SIO_NCT6116_ID:
+ sio_data->kind = nct6116;
+ break;
case SIO_NCT6775_ID:
sio_data->kind = nct6775;
break;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec7bcf8d7cd6..f3dd2a17bd42 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = {
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in3_beep.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
+ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
return 0;
- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
+ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
return 0;
- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
+ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
return 0;
return attr->mode;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 710c30562fc1..95b447cfa24c 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -46,10 +46,34 @@
#define DTS_T_CTRL1_REG 0x27
#define VT_ADC_MD_REG 0x2E
+#define VSEN1_HV_LL_REG 0x02 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_LL_REG 0x03 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_HV_HL_REG 0x00 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define VSEN1_LV_HL_REG 0x01 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define SMI_STS1_REG 0xC1 /* Bank 0; SMI Status Register */
+#define SMI_STS3_REG 0xC3 /* Bank 0; SMI Status Register */
+#define SMI_STS5_REG 0xC5 /* Bank 0; SMI Status Register */
+#define SMI_STS7_REG 0xC7 /* Bank 0; SMI Status Register */
+#define SMI_STS8_REG 0xC8 /* Bank 0; SMI Status Register */
+
#define VSEN1_HV_REG 0x40 /* Bank 0; 2 regs (HV/LV) per sensor */
#define TEMP_CH1_HV_REG 0x42 /* Bank 0; same as VSEN2_HV */
#define LTD_HV_REG 0x62 /* Bank 0; 2 regs in VSEN range */
+#define LTD_HV_HL_REG 0x44 /* Bank 1; 1 reg for LTD */
+#define LTD_LV_HL_REG 0x45 /* Bank 1; 1 reg for LTD */
+#define LTD_HV_LL_REG 0x46 /* Bank 1; 1 reg for LTD */
+#define LTD_LV_LL_REG 0x47 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_CH_REG 0x05 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_W_REG 0x06 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_WH_REG 0x07 /* Bank 1; 1 reg for LTD */
+#define TEMP_CH1_C_REG 0x04 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_C_REG 0x90 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_CH_REG 0x91 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_W_REG 0x92 /* Bank 1; 1 reg per sensor */
+#define DTS_T_CPU1_WH_REG 0x93 /* Bank 1; 1 reg per sensor */
#define FANIN1_HV_REG 0x80 /* Bank 0; 2 regs (HV/LV) per sensor */
+#define FANIN1_HV_HL_REG 0x60 /* Bank 1; 2 regs (HV/LV) per sensor */
+#define FANIN1_LV_HL_REG 0x61 /* Bank 1; 2 regs (HV/LV) per sensor */
#define T_CPU1_HV_REG 0xA0 /* Bank 0; 2 regs (HV/LV) per sensor */
#define PRTS_REG 0x03 /* Bank 2 */
@@ -58,6 +82,8 @@
#define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */
#define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */
+#define ENABLE_TSI BIT(1)
+
static const unsigned short normal_i2c[] = {
0x2d, 0x2e, I2C_CLIENT_END
};
@@ -72,6 +98,7 @@ struct nct7904_data {
u8 fan_mode[FANCTL_MAX];
u8 enable_dts;
u8 has_dts;
+ u8 temp_mode; /* 0: TR mode, 1: TD mode */
};
/* Access functions */
@@ -170,6 +197,25 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
rpm = 1350000 / cnt;
*val = rpm;
return 0;
+ case hwmon_fan_min:
+ ret = nct7904_read_reg16(data, BANK_1,
+ FANIN1_HV_HL_REG + channel * 2);
+ if (ret < 0)
+ return ret;
+ cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+ if (cnt == 0x1fff)
+ rpm = 0;
+ else
+ rpm = 1350000 / cnt;
+ *val = rpm;
+ return 0;
+ case hwmon_fan_alarm:
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS5_REG + (channel >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (channel & 0x07)) & 1;
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -179,8 +225,20 @@ static umode_t nct7904_fan_is_visible(const void *_data, u32 attr, int channel)
{
const struct nct7904_data *data = _data;
- if (attr == hwmon_fan_input && data->fanin_mask & (1 << channel))
- return 0444;
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_alarm:
+ if (data->fanin_mask & (1 << channel))
+ return 0444;
+ break;
+ case hwmon_fan_min:
+ if (data->fanin_mask & (1 << channel))
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -211,6 +269,37 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
volt *= 6; /* 0.006V scale */
*val = volt;
return 0;
+ case hwmon_in_min:
+ ret = nct7904_read_reg16(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4);
+ if (ret < 0)
+ return ret;
+ volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ if (index < 14)
+ volt *= 2; /* 0.002V scale */
+ else
+ volt *= 6; /* 0.006V scale */
+ *val = volt;
+ return 0;
+ case hwmon_in_max:
+ ret = nct7904_read_reg16(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4);
+ if (ret < 0)
+ return ret;
+ volt = ((ret & 0xff00) >> 5) | (ret & 0x7);
+ if (index < 14)
+ volt *= 2; /* 0.002V scale */
+ else
+ volt *= 6; /* 0.006V scale */
+ *val = volt;
+ return 0;
+ case hwmon_in_alarm:
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS1_REG + (index >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (index & 0x07)) & 1;
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -221,9 +310,20 @@ static umode_t nct7904_in_is_visible(const void *_data, u32 attr, int channel)
const struct nct7904_data *data = _data;
int index = nct7904_chan_to_index[channel];
- if (channel > 0 && attr == hwmon_in_input &&
- (data->vsen_mask & BIT(index)))
- return 0444;
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_alarm:
+ if (channel > 0 && (data->vsen_mask & BIT(index)))
+ return 0444;
+ break;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ if (channel > 0 && (data->vsen_mask & BIT(index)))
+ return 0644;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -233,6 +333,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
{
struct nct7904_data *data = dev_get_drvdata(dev);
int ret, temp;
+ unsigned int reg1, reg2, reg3;
switch (attr) {
case hwmon_temp_input:
@@ -250,16 +351,106 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
temp = ((ret & 0xff00) >> 5) | (ret & 0x7);
*val = sign_extend32(temp, 10) * 125;
return 0;
+ case hwmon_temp_alarm:
+ if (channel == 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS3_REG);
+ if (ret < 0)
+ return ret;
+ *val = (ret >> 1) & 1;
+ } else if (channel < 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS1_REG);
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (((channel * 2) + 1) & 0x07)) & 1;
+ } else {
+ if ((channel - 5) < 4) {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS7_REG +
+ ((channel - 5) >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> ((channel - 5) & 0x07)) & 1;
+ } else {
+ ret = nct7904_read_reg(data, BANK_0,
+ SMI_STS8_REG +
+ ((channel - 5) >> 3));
+ if (ret < 0)
+ return ret;
+ *val = (ret >> (((channel - 5) & 0x07) - 4))
+ & 1;
+ }
+ }
+ return 0;
+ case hwmon_temp_type:
+ if (channel < 5) {
+ if ((data->tcpu_mask >> channel) & 0x01) {
+ if ((data->temp_mode >> channel) & 0x01)
+ *val = 3; /* TD */
+ else
+ *val = 4; /* TR */
+ } else {
+ *val = 0;
+ }
+ } else {
+ if ((data->has_dts >> (channel - 5)) & 0x01) {
+ if (data->enable_dts & ENABLE_TSI)
+ *val = 5; /* TSI */
+ else
+ *val = 6; /* PECI */
+ } else {
+ *val = 0;
+ }
+ }
+ return 0;
+ case hwmon_temp_max:
+ reg1 = LTD_HV_LL_REG;
+ reg2 = TEMP_CH1_W_REG;
+ reg3 = DTS_T_CPU1_W_REG;
+ break;
+ case hwmon_temp_max_hyst:
+ reg1 = LTD_LV_LL_REG;
+ reg2 = TEMP_CH1_WH_REG;
+ reg3 = DTS_T_CPU1_WH_REG;
+ break;
+ case hwmon_temp_crit:
+ reg1 = LTD_HV_HL_REG;
+ reg2 = TEMP_CH1_C_REG;
+ reg3 = DTS_T_CPU1_C_REG;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg1 = LTD_LV_HL_REG;
+ reg2 = TEMP_CH1_CH_REG;
+ reg3 = DTS_T_CPU1_CH_REG;
+ break;
default:
return -EOPNOTSUPP;
}
+
+ if (channel == 4)
+ ret = nct7904_read_reg(data, BANK_1, reg1);
+ else if (channel < 5)
+ ret = nct7904_read_reg(data, BANK_1,
+ reg2 + channel * 8);
+ else
+ ret = nct7904_read_reg(data, BANK_1,
+ reg3 + (channel - 5) * 4);
+
+ if (ret < 0)
+ return ret;
+ *val = ret * 1000;
+ return 0;
}
static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
{
const struct nct7904_data *data = _data;
- if (attr == hwmon_temp_input) {
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_alarm:
+ case hwmon_temp_type:
if (channel < 5) {
if (data->tcpu_mask & BIT(channel))
return 0444;
@@ -267,6 +458,21 @@ static umode_t nct7904_temp_is_visible(const void *_data, u32 attr, int channel)
if (data->has_dts & BIT(channel - 5))
return 0444;
}
+ break;
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_crit:
+ case hwmon_temp_crit_hyst:
+ if (channel < 5) {
+ if (data->tcpu_mask & BIT(channel))
+ return 0644;
+ } else {
+ if (data->has_dts & BIT(channel - 5))
+ return 0644;
+ }
+ break;
+ default:
+ break;
}
return 0;
@@ -297,6 +503,137 @@ static int nct7904_read_pwm(struct device *dev, u32 attr, int channel,
}
}
+static int nct7904_write_temp(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret;
+ unsigned int reg1, reg2, reg3;
+
+ val = clamp_val(val / 1000, -128, 127);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ reg1 = LTD_HV_LL_REG;
+ reg2 = TEMP_CH1_W_REG;
+ reg3 = DTS_T_CPU1_W_REG;
+ break;
+ case hwmon_temp_max_hyst:
+ reg1 = LTD_LV_LL_REG;
+ reg2 = TEMP_CH1_WH_REG;
+ reg3 = DTS_T_CPU1_WH_REG;
+ break;
+ case hwmon_temp_crit:
+ reg1 = LTD_HV_HL_REG;
+ reg2 = TEMP_CH1_C_REG;
+ reg3 = DTS_T_CPU1_C_REG;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg1 = LTD_LV_HL_REG;
+ reg2 = TEMP_CH1_CH_REG;
+ reg3 = DTS_T_CPU1_CH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (channel == 4)
+ ret = nct7904_write_reg(data, BANK_1, reg1, val);
+ else if (channel < 5)
+ ret = nct7904_write_reg(data, BANK_1,
+ reg2 + channel * 8, val);
+ else
+ ret = nct7904_write_reg(data, BANK_1,
+ reg3 + (channel - 5) * 4, val);
+
+ return ret;
+}
+
+static int nct7904_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret;
+ u8 tmp;
+
+ switch (attr) {
+ case hwmon_fan_min:
+ if (val <= 0)
+ return -EINVAL;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(1350000, val), 1, 0x1fff);
+ tmp = (val >> 5) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ FANIN1_HV_HL_REG + channel * 2, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = val & 0x1f;
+ ret = nct7904_write_reg(data, BANK_1,
+ FANIN1_LV_HL_REG + channel * 2, tmp);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct7904_write_in(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7904_data *data = dev_get_drvdata(dev);
+ int ret, index, tmp;
+
+ index = nct7904_chan_to_index[channel];
+
+ if (index < 14)
+ val = val / 2; /* 0.002V scale */
+ else
+ val = val / 6; /* 0.006V scale */
+
+ val = clamp_val(val, 0, 0x7ff);
+
+ switch (attr) {
+ case hwmon_in_min:
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_LV_LL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp &= ~0x7;
+ tmp |= val & 0x7;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_LV_LL_REG + index * 4, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp = (val >> 3) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_HV_LL_REG + index * 4, tmp);
+ return ret;
+ case hwmon_in_max:
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_LV_HL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp &= ~0x7;
+ tmp |= val & 0x7;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_LV_HL_REG + index * 4, tmp);
+ if (ret < 0)
+ return ret;
+ tmp = nct7904_read_reg(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4);
+ if (tmp < 0)
+ return tmp;
+ tmp = (val >> 3) & 0xff;
+ ret = nct7904_write_reg(data, BANK_1,
+ VSEN1_HV_HL_REG + index * 4, tmp);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int nct7904_write_pwm(struct device *dev, u32 attr, int channel,
long val)
{
@@ -354,8 +691,14 @@ static int nct7904_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
switch (type) {
+ case hwmon_in:
+ return nct7904_write_in(dev, attr, channel, val);
+ case hwmon_fan:
+ return nct7904_write_fan(dev, attr, channel, val);
case hwmon_pwm:
return nct7904_write_pwm(dev, attr, channel, val);
+ case hwmon_temp:
+ return nct7904_write_temp(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
@@ -404,51 +747,91 @@ static int nct7904_detect(struct i2c_client *client,
static const struct hwmon_channel_info *nct7904_info[] = {
HWMON_CHANNEL_INFO(in,
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT),
+ /* dummy, skipped in is_visible */
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_ALARM),
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT),
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
HWMON_CHANNEL_INFO(temp,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT),
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_TYPE | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST),
NULL
};
@@ -530,11 +913,14 @@ static int nct7904_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ data->temp_mode = 0;
for (i = 0; i < 4; i++) {
val = (ret & (0x03 << i)) >> (i * 2);
bit = (1 << i);
if (val == 0)
data->tcpu_mask &= ~bit;
+ else if (val == 0x1 || val == 0x2)
+ data->temp_mode |= bit;
}
/* PECI */
@@ -557,7 +943,7 @@ static int nct7904_probe(struct i2c_client *client,
if (ret < 0)
return ret;
data->has_dts = ret & 0xF;
- if (data->enable_dts & 0x2) {
+ if (data->enable_dts & ENABLE_TSI) {
ret = nct7904_read_reg(data, BANK_0, DTS_T_CTRL1_REG);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 09aaefa6fdb8..11a28609da3c 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -967,10 +967,8 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
spin_lock_init(&data->fan_lock[i]);
data->fan_irq[i] = platform_get_irq(pdev, i);
- if (data->fan_irq[i] < 0) {
- dev_err(dev, "get IRQ fan%d failed\n", i);
+ if (data->fan_irq[i] < 0)
return data->fan_irq[i];
- }
sprintf(name, "NPCM7XX-FAN-MD%d", i);
ret = devm_request_irq(dev, data->fan_irq[i], npcm7xx_fan_isr,
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index a7d2b16dd702..30e18eb60da7 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev,
static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
{
- return div64_u64(get_unaligned_be64(accum) * 1000000ULL,
- get_unaligned_be32(samples));
+ u64 divisor = get_unaligned_be32(samples);
+
+ return (divisor == 0) ? 0 :
+ div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
}
static ssize_t occ_show_power_2(struct device *dev,
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index b6588483fae1..d62d69bb7e49 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -46,6 +46,15 @@ config SENSORS_IBM_CFFPS
This driver can also be built as a module. If so, the module will
be called ibm-cffps.
+config SENSORS_INSPUR_IPSPS
+ tristate "INSPUR Power System Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the INSPUR
+ Power System power supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called inspur-ipsps.
+
config SENSORS_IR35221
tristate "Infineon IR35221"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index c950ea9a5d00..03bacfcfd660 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
+obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index ee2ee9e3ffd7..d44745e498e7 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/pmbus.h>
#include "pmbus.h"
@@ -20,8 +21,9 @@
#define CFFPS_PN_CMD 0x9B
#define CFFPS_SN_CMD 0x9E
#define CFFPS_CCIN_CMD 0xBD
-#define CFFPS_FW_CMD_START 0xFA
-#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_FW_CMD 0xFA
+#define CFFPS1_FW_NUM_BYTES 4
+#define CFFPS2_FW_NUM_WORDS 3
#define CFFPS_SYS_CONFIG_CMD 0xDA
#define CFFPS_INPUT_HISTORY_CMD 0xD6
@@ -52,6 +54,8 @@ enum {
CFFPS_DEBUGFS_NUM_ENTRIES
};
+enum versions { cffps1, cffps2 };
+
struct ibm_cffps_input_history {
struct mutex update_lock;
unsigned long last_update;
@@ -61,6 +65,7 @@ struct ibm_cffps_input_history {
};
struct ibm_cffps {
+ enum versions version;
struct i2c_client *client;
struct ibm_cffps_input_history input_history;
@@ -132,6 +137,8 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
struct ibm_cffps *psu = to_psu(idxp, idx);
char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ pmbus_set_page(psu->client, 0);
+
switch (idx) {
case CFFPS_DEBUGFS_INPUT_HISTORY:
return ibm_cffps_read_input_history(psu, buf, count, ppos);
@@ -152,16 +159,36 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
rc = snprintf(data, 5, "%04X", rc);
goto done;
case CFFPS_DEBUGFS_FW:
- for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
- rc = i2c_smbus_read_byte_data(psu->client,
- CFFPS_FW_CMD_START + i);
- if (rc < 0)
- return rc;
+ switch (psu->version) {
+ case cffps1:
+ for (i = 0; i < CFFPS1_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD +
+ i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
- snprintf(&data[i * 2], 3, "%02X", rc);
- }
+ rc = i * 2;
+ break;
+ case cffps2:
+ for (i = 0; i < CFFPS2_FW_NUM_WORDS; ++i) {
+ rc = i2c_smbus_read_word_data(psu->client,
+ CFFPS_FW_CMD +
+ i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 4], 5, "%04X", rc);
+ }
- rc = i * 2;
+ rc = i * 4;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
goto done;
default:
return -EINVAL;
@@ -279,6 +306,8 @@ static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
psu->led_state = CFFPS_LED_ON;
}
+ pmbus_set_page(psu->client, 0);
+
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
psu->led_state);
if (rc < 0)
@@ -299,6 +328,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
if (led_cdev->brightness == LED_OFF)
return 0;
+ pmbus_set_page(psu->client, 0);
+
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
CFFPS_LED_BLINK);
if (rc < 0)
@@ -328,15 +359,32 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
dev_warn(dev, "failed to register led class: %d\n", rc);
}
-static struct pmbus_driver_info ibm_cffps_info = {
- .pages = 1,
- .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
- PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
- PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
- PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
- PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
- .read_byte_data = ibm_cffps_read_byte_data,
- .read_word_data = ibm_cffps_read_word_data,
+static struct pmbus_driver_info ibm_cffps_info[] = {
+ [cffps1] = {
+ .pages = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_STATUS_FAN12,
+ .read_byte_data = ibm_cffps_read_byte_data,
+ .read_word_data = ibm_cffps_read_word_data,
+ },
+ [cffps2] = {
+ .pages = 2,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_STATUS_FAN12,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT,
+ .read_byte_data = ibm_cffps_read_byte_data,
+ .read_word_data = ibm_cffps_read_word_data,
+ },
};
static struct pmbus_platform_data ibm_cffps_pdata = {
@@ -347,12 +395,21 @@ static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i, rc;
+ enum versions vs;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
+ const void *md = of_device_get_match_data(&client->dev);
+
+ if (md)
+ vs = (enum versions)md;
+ else if (id)
+ vs = (enum versions)id->driver_data;
+ else
+ vs = cffps1;
client->dev.platform_data = &ibm_cffps_pdata;
- rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
if (rc)
return rc;
@@ -364,6 +421,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
if (!psu)
return 0;
+ psu->version = vs;
psu->client = client;
mutex_init(&psu->input_history.update_lock);
psu->input_history.last_update = jiffies - HZ;
@@ -405,13 +463,21 @@ static int ibm_cffps_probe(struct i2c_client *client,
}
static const struct i2c_device_id ibm_cffps_id[] = {
- { "ibm_cffps1", 1 },
+ { "ibm_cffps1", cffps1 },
+ { "ibm_cffps2", cffps2 },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
static const struct of_device_id ibm_cffps_of_match[] = {
- { .compatible = "ibm,cffps1" },
+ {
+ .compatible = "ibm,cffps1",
+ .data = (void *)cffps1
+ },
+ {
+ .compatible = "ibm,cffps2",
+ .data = (void *)cffps2
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
new file mode 100644
index 000000000000..42e01549184a
--- /dev/null
+++ b/drivers/hwmon/pmbus/inspur-ipsps.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2019 Inspur Corp.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include <linux/hwmon-sysfs.h>
+
+#include "pmbus.h"
+
+#define IPSPS_REG_VENDOR_ID 0x99
+#define IPSPS_REG_MODEL 0x9A
+#define IPSPS_REG_FW_VERSION 0x9B
+#define IPSPS_REG_PN 0x9C
+#define IPSPS_REG_SN 0x9E
+#define IPSPS_REG_HW_VERSION 0xB0
+#define IPSPS_REG_MODE 0xFC
+
+#define MODE_ACTIVE 0x55
+#define MODE_STANDBY 0x0E
+#define MODE_REDUNDANCY 0x00
+
+#define MODE_ACTIVE_STRING "active"
+#define MODE_STANDBY_STRING "standby"
+#define MODE_REDUNDANCY_STRING "redundancy"
+
+enum ipsps_index {
+ vendor,
+ model,
+ fw_version,
+ part_number,
+ serial_number,
+ hw_version,
+ mode,
+ num_regs,
+};
+
+static const u8 ipsps_regs[num_regs] = {
+ [vendor] = IPSPS_REG_VENDOR_ID,
+ [model] = IPSPS_REG_MODEL,
+ [fw_version] = IPSPS_REG_FW_VERSION,
+ [part_number] = IPSPS_REG_PN,
+ [serial_number] = IPSPS_REG_SN,
+ [hw_version] = IPSPS_REG_HW_VERSION,
+ [mode] = IPSPS_REG_MODE,
+};
+
+static ssize_t ipsps_string_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ u8 reg;
+ int rc;
+ char *p;
+ char data[I2C_SMBUS_BLOCK_MAX + 1];
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_block_data(client, reg, data);
+ if (rc < 0)
+ return rc;
+
+ /* filled with printable characters, ending with # */
+ p = memscan(data, '#', rc);
+ *p = '\0';
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", data);
+}
+
+static ssize_t ipsps_fw_version_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ u8 reg;
+ int rc;
+ u8 data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_block_data(client, reg, data);
+ if (rc < 0)
+ return rc;
+
+ if (rc != 6)
+ return -EPROTO;
+
+ return snprintf(buf, PAGE_SIZE, "%u.%02u%u-%u.%02u\n",
+ data[1], data[2]/* < 100 */, data[3]/*< 10*/,
+ data[4], data[5]/* < 100 */);
+}
+
+static ssize_t ipsps_mode_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u8 reg;
+ int rc;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ rc = i2c_smbus_read_byte_data(client, reg);
+ if (rc < 0)
+ return rc;
+
+ switch (rc) {
+ case MODE_ACTIVE:
+ return snprintf(buf, PAGE_SIZE, "[%s] %s %s\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ case MODE_STANDBY:
+ return snprintf(buf, PAGE_SIZE, "%s [%s] %s\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ case MODE_REDUNDANCY:
+ return snprintf(buf, PAGE_SIZE, "%s %s [%s]\n",
+ MODE_ACTIVE_STRING,
+ MODE_STANDBY_STRING, MODE_REDUNDANCY_STRING);
+ default:
+ return snprintf(buf, PAGE_SIZE, "unspecified\n");
+ }
+}
+
+static ssize_t ipsps_mode_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ u8 reg;
+ int rc;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ reg = ipsps_regs[attr->index];
+ if (sysfs_streq(MODE_STANDBY_STRING, buf)) {
+ rc = i2c_smbus_write_byte_data(client, reg,
+ MODE_STANDBY);
+ if (rc < 0)
+ return rc;
+ return count;
+ } else if (sysfs_streq(MODE_ACTIVE_STRING, buf)) {
+ rc = i2c_smbus_write_byte_data(client, reg,
+ MODE_ACTIVE);
+ if (rc < 0)
+ return rc;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static SENSOR_DEVICE_ATTR_RO(vendor, ipsps_string, vendor);
+static SENSOR_DEVICE_ATTR_RO(model, ipsps_string, model);
+static SENSOR_DEVICE_ATTR_RO(part_number, ipsps_string, part_number);
+static SENSOR_DEVICE_ATTR_RO(serial_number, ipsps_string, serial_number);
+static SENSOR_DEVICE_ATTR_RO(hw_version, ipsps_string, hw_version);
+static SENSOR_DEVICE_ATTR_RO(fw_version, ipsps_fw_version, fw_version);
+static SENSOR_DEVICE_ATTR_RW(mode, ipsps_mode, mode);
+
+static struct attribute *ipsps_attrs[] = {
+ &sensor_dev_attr_vendor.dev_attr.attr,
+ &sensor_dev_attr_model.dev_attr.attr,
+ &sensor_dev_attr_part_number.dev_attr.attr,
+ &sensor_dev_attr_serial_number.dev_attr.attr,
+ &sensor_dev_attr_hw_version.dev_attr.attr,
+ &sensor_dev_attr_fw_version.dev_attr.attr,
+ &sensor_dev_attr_mode.dev_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(ipsps);
+
+static struct pmbus_driver_info ipsps_info = {
+ .pages = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
+ .groups = ipsps_groups,
+};
+
+static struct pmbus_platform_data ipsps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static int ipsps_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ client->dev.platform_data = &ipsps_pdata;
+ return pmbus_do_probe(client, id, &ipsps_info);
+}
+
+static const struct i2c_device_id ipsps_id[] = {
+ { "ipsps1", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ipsps_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id ipsps_of_match[] = {
+ { .compatible = "inspur,ipsps1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ipsps_of_match);
+#endif
+
+static struct i2c_driver ipsps_driver = {
+ .driver = {
+ .name = "inspur-ipsps",
+ .of_match_table = of_match_ptr(ipsps_of_match),
+ },
+ .probe = ipsps_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ipsps_id,
+};
+
+module_i2c_driver(ipsps_driver);
+
+MODULE_AUTHOR("John Wang");
+MODULE_DESCRIPTION("PMBus driver for Inspur Power System power supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 69d9029ea410..254b0f98c755 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -244,8 +244,6 @@ static int max31785_write_word_data(struct i2c_client *client, int page,
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
-#define MAX37185_NUM_FAN_PAGES 6
-
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index c846759bc1c0..a9229c6b0e84 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/pmbus.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include "pmbus.h"
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 54c0ff00d67f..42ffd2e5182d 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -304,7 +304,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ctx->irq = platform_get_irq(pdev, 0);
+ ctx->irq = platform_get_irq_optional(pdev, 0);
if (ctx->irq == -EPROBE_DEFER)
return ctx->irq;
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index efe4bb1ff221..d3a64a35f7a9 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -146,7 +146,7 @@ static struct platform_driver rpi_hwmon_driver = {
};
module_platform_driver(rpi_hwmon_driver);
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:raspberrypi-hwmon");
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 0c93fc5ca762..8a7732c0bef3 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -72,7 +72,7 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
const struct scmi_handle *h = scmi_sensors->handle;
sensor = *(scmi_sensors->info[type] + channel);
- ret = h->sensor_ops->reading_get(h, sensor->id, false, &value);
+ ret = h->sensor_ops->reading_get(h, sensor->id, &value);
if (ret)
return ret;
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index 83fe08185ac7..a0078ccede03 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -24,19 +24,33 @@ static const unsigned char shtc1_cmd_measure_blocking_lpm[] = { 0x64, 0x58 };
static const unsigned char shtc1_cmd_measure_nonblocking_lpm[] = { 0x60, 0x9c };
/* command for reading the ID register */
-static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
+static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
-/* constants for reading the ID register */
-#define SHTC1_ID 0x07
-#define SHTC1_ID_REG_MASK 0x1f
+/*
+ * constants for reading the ID register
+ * SHTC1: 0x0007 with mask 0x003f
+ * SHTW1: 0x0007 with mask 0x003f
+ * SHTC3: 0x0807 with mask 0x083f
+ */
+#define SHTC3_ID 0x0807
+#define SHTC3_ID_MASK 0x083f
+#define SHTC1_ID 0x0007
+#define SHTC1_ID_MASK 0x003f
/* delays for non-blocking i2c commands, both in us */
#define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
#define SHTC1_NONBLOCKING_WAIT_TIME_LPM 1000
+#define SHTC3_NONBLOCKING_WAIT_TIME_HPM 12100
+#define SHTC3_NONBLOCKING_WAIT_TIME_LPM 800
#define SHTC1_CMD_LENGTH 2
#define SHTC1_RESPONSE_LENGTH 6
+enum shtcx_chips {
+ shtc1,
+ shtc3,
+};
+
struct shtc1_data {
struct i2c_client *client;
struct mutex update_lock;
@@ -47,6 +61,7 @@ struct shtc1_data {
unsigned int nonblocking_wait_time; /* in us */
struct shtc1_platform_data setup;
+ enum shtcx_chips chip;
int temperature; /* 1000 * temperature in dgr C */
int humidity; /* 1000 * relative humidity in %RH */
@@ -157,13 +172,16 @@ static void shtc1_select_command(struct shtc1_data *data)
data->command = data->setup.blocking_io ?
shtc1_cmd_measure_blocking_hpm :
shtc1_cmd_measure_nonblocking_hpm;
- data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_HPM;
-
+ data->nonblocking_wait_time = (data->chip == shtc1) ?
+ SHTC1_NONBLOCKING_WAIT_TIME_HPM :
+ SHTC3_NONBLOCKING_WAIT_TIME_HPM;
} else {
data->command = data->setup.blocking_io ?
shtc1_cmd_measure_blocking_lpm :
shtc1_cmd_measure_nonblocking_lpm;
- data->nonblocking_wait_time = SHTC1_NONBLOCKING_WAIT_TIME_LPM;
+ data->nonblocking_wait_time = (data->chip == shtc1) ?
+ SHTC1_NONBLOCKING_WAIT_TIME_LPM :
+ SHTC3_NONBLOCKING_WAIT_TIME_LPM;
}
}
@@ -171,9 +189,11 @@ static int shtc1_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
- char id_reg[2];
+ u16 id_reg;
+ char id_reg_buf[2];
struct shtc1_data *data;
struct device *hwmon_dev;
+ enum shtcx_chips chip = id->driver_data;
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
@@ -187,13 +207,20 @@ static int shtc1_probe(struct i2c_client *client,
dev_err(dev, "could not send read_id_reg command: %d\n", ret);
return ret < 0 ? ret : -ENODEV;
}
- ret = i2c_master_recv(client, id_reg, sizeof(id_reg));
- if (ret != sizeof(id_reg)) {
+ ret = i2c_master_recv(client, id_reg_buf, sizeof(id_reg_buf));
+ if (ret != sizeof(id_reg_buf)) {
dev_err(dev, "could not read ID register: %d\n", ret);
return -ENODEV;
}
- if ((id_reg[1] & SHTC1_ID_REG_MASK) != SHTC1_ID) {
- dev_err(dev, "ID register doesn't match\n");
+
+ id_reg = be16_to_cpup((__be16 *)id_reg_buf);
+ if (chip == shtc3) {
+ if ((id_reg & SHTC3_ID_MASK) != SHTC3_ID) {
+ dev_err(dev, "SHTC3 ID register does not match\n");
+ return -ENODEV;
+ }
+ } else if ((id_reg & SHTC1_ID_MASK) != SHTC1_ID) {
+ dev_err(dev, "SHTC1 ID register does not match\n");
return -ENODEV;
}
@@ -204,6 +231,7 @@ static int shtc1_probe(struct i2c_client *client,
data->setup.blocking_io = false;
data->setup.high_precision = true;
data->client = client;
+ data->chip = chip;
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;
@@ -222,8 +250,9 @@ static int shtc1_probe(struct i2c_client *client,
/* device ID table */
static const struct i2c_device_id shtc1_id[] = {
- { "shtc1", 0 },
- { "shtw1", 0 },
+ { "shtc1", shtc1 },
+ { "shtw1", shtc1 },
+ { "shtc3", shtc3 },
{ }
};
MODULE_DEVICE_TABLE(i2c, shtc1_id);
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index d8c91c2cb8cf..af01f763f7d1 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -197,7 +197,7 @@ static int smm665_read_adc(struct smm665_data *data, int adc)
if (rv != -ENXIO) {
/*
* We expect ENXIO to reflect NACK
- * (per Documentation/i2c/fault-codes).
+ * (per Documentation/i2c/fault-codes.rst).
* Everything else is an error.
*/
dev_dbg(&client->dev,
@@ -586,10 +586,10 @@ static int smm665_probe(struct i2c_client *client,
data->client = client;
data->type = id->driver_data;
- data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK)
+ data->cmdreg = i2c_new_dummy_device(adapter, (client->addr & ~SMM665_REGMASK)
| SMM665_CMDREG_BASE);
- if (!data->cmdreg)
- return -ENOMEM;
+ if (IS_ERR(data->cmdreg))
+ return PTR_ERR(data->cmdreg);
switch (data->type) {
case smm465:
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index d2c04b6a3f2b..015f1ea31966 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -894,12 +894,12 @@ w83781d_detect_subclients(struct i2c_client *new_client)
}
for (i = 0; i < num_sc; i++) {
- data->lm75[i] = i2c_new_dummy(adapter, sc_addr[i]);
- if (!data->lm75[i]) {
+ data->lm75[i] = i2c_new_dummy_device(adapter, sc_addr[i]);
+ if (IS_ERR(data->lm75[i])) {
dev_err(&new_client->dev,
"Subclient %d registration at address 0x%x failed.\n",
i, sc_addr[i]);
- err = -ENOMEM;
+ err = PTR_ERR(data->lm75[i]);
if (i == 1)
goto ERROR_SC_3;
goto ERROR_SC_2;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 050ad4201691..aad8d4da5802 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1260,7 +1260,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
struct i2c_adapter *adapter = client->adapter;
struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
- int i, id, err;
+ int i, id;
u8 val;
id = i2c_adapter_id(adapter);
@@ -1272,8 +1272,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -ENODEV;
- goto error_sc_0;
+ return -ENODEV;
}
}
w83791d_write(client, W83791D_REG_I2C_SUBADDR,
@@ -1283,29 +1282,22 @@ static int w83791d_detect_subclients(struct i2c_client *client)
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
if (!(val & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + (val & 0x7));
if (!(val & 0x80)) {
- if ((data->lm75[0] != NULL) &&
+ if (!IS_ERR(data->lm75[0]) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclient\n",
data->lm75[0]->addr);
- err = -ENODEV;
- goto error_sc_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((val >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + ((val >> 4) & 0x7));
}
return 0;
-
-/* Undo inits in case of errors */
-
-error_sc_1:
- i2c_unregister_device(data->lm75[0]);
-error_sc_0:
- return err;
}
@@ -1394,7 +1386,7 @@ static int w83791d_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &w83791d_group);
if (err)
- goto error3;
+ return err;
/* Check if pins of fan/pwm 4-5 are in use as GPIO */
has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10;
@@ -1419,9 +1411,6 @@ error5:
sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45);
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
-error3:
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1432,9 +1421,6 @@ static int w83791d_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index da8a6d62aa23..7fc8a1160c8f 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -924,7 +924,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
static int
w83792d_detect_subclients(struct i2c_client *new_client)
{
- int i, id, err;
+ int i, id;
int address = new_client->addr;
u8 val;
struct i2c_adapter *adapter = new_client->adapter;
@@ -938,8 +938,7 @@ w83792d_detect_subclients(struct i2c_client *new_client)
dev_err(&new_client->dev,
"invalid subclient address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -ENODEV;
- goto ERROR_SC_0;
+ return -ENODEV;
}
}
w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR,
@@ -949,28 +948,21 @@ w83792d_detect_subclients(struct i2c_client *new_client)
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
if (!(val & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+ 0x48 + (val & 0x7));
if (!(val & 0x80)) {
- if ((data->lm75[0] != NULL) &&
+ if (!IS_ERR(data->lm75[0]) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&new_client->dev,
"duplicate addresses 0x%x, use force_subclient\n",
data->lm75[0]->addr);
- err = -ENODEV;
- goto ERROR_SC_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((val >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
+ 0x48 + ((val >> 4) & 0x7));
}
return 0;
-
-/* Undo inits in case of errors */
-
-ERROR_SC_1:
- i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
- return err;
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
@@ -1396,7 +1388,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &w83792d_group);
if (err)
- goto exit_i2c_unregister;
+ return err;
/*
* Read GPIO enable register to check if pins for fan 4,5 are used as
@@ -1441,9 +1433,6 @@ exit_remove_files:
sysfs_remove_group(&dev->kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
-exit_i2c_unregister:
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1459,9 +1448,6 @@ w83792d_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
return 0;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 46f5dfec8d0a..9df48b70c70c 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1551,9 +1551,6 @@ static int w83793_remove(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
-
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
@@ -1565,7 +1562,7 @@ static int w83793_remove(struct i2c_client *client)
static int
w83793_detect_subclients(struct i2c_client *client)
{
- int i, id, err;
+ int i, id;
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
@@ -1580,8 +1577,7 @@ w83793_detect_subclients(struct i2c_client *client)
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
- err = -EINVAL;
- goto ERROR_SC_0;
+ return -EINVAL;
}
}
w83793_write_value(client, W83793_REG_I2C_SUBADDR,
@@ -1591,28 +1587,21 @@ w83793_detect_subclients(struct i2c_client *client)
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08))
- data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
+ data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + (tmp & 0x7));
if (!(tmp & 0x80)) {
- if ((data->lm75[0] != NULL)
+ if (!IS_ERR(data->lm75[0])
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
- err = -ENODEV;
- goto ERROR_SC_1;
+ return -ENODEV;
}
- data->lm75[1] = i2c_new_dummy(adapter,
- 0x48 + ((tmp >> 4) & 0x7));
+ data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ 0x48 + ((tmp >> 4) & 0x7));
}
return 0;
-
- /* Undo inits in case of errors */
-
-ERROR_SC_1:
- i2c_unregister_device(data->lm75[0]);
-ERROR_SC_0:
- return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
@@ -1945,9 +1934,6 @@ exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
-
- i2c_unregister_device(data->lm75[0]);
- i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 14638db4991d..7a9f5fb08330 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -106,7 +106,7 @@ config CORESIGHT_CPU_DEBUG
can quickly get to know program counter (PC), secure state,
exception level, etc. Before use debugging functionality, platform
needs to ensure the clock domain and power domain are enabled
- properly, please refer Documentation/trace/coresight-cpu-debug.txt
+ properly, please refer Documentation/trace/coresight-cpu-debug.rst
for detailed description and the example for usage.
endif
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 2463aa7ab4f6..96544b348c27 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -646,24 +646,23 @@ static int debug_remove(struct amba_device *adev)
return 0;
}
+static const struct amba_cs_uci_id uci_id_debug[] = {
+ {
+ /* CPU Debug UCI data */
+ .devarch = 0x47706a15,
+ .devarch_mask = 0xfff0ffff,
+ .devtype = 0x00000015,
+ }
+};
+
static const struct amba_id debug_ids[] = {
- { /* Debug for Cortex-A53 */
- .id = 0x000bbd03,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A57 */
- .id = 0x000bbd07,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A72 */
- .id = 0x000bbd08,
- .mask = 0x000fffff,
- },
- { /* Debug for Cortex-A73 */
- .id = 0x000bbd09,
- .mask = 0x000fffff,
- },
- { 0, 0 },
+ CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */
+ CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */
+ CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */
+ CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */
+ CS_AMBA_UCI_ID(0x000f0205, uci_id_debug), /* Qualcomm Kryo */
+ CS_AMBA_UCI_ID(0x000f0211, uci_id_debug), /* Qualcomm Kryo */
+ {},
};
static struct amba_driver debug_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 5c1ca0df5cb0..84f1dcb69827 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
/* See function coresight_get_sink_by_id() to know where this is used */
hash = hashlen_hash(hashlen_string(NULL, name));
+ sysfs_attr_init(&ea->attr.attr);
ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!ea->attr.attr.name)
return -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a0365e23678e..219c10eb752c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -296,11 +296,8 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
-
- if (config->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
+ etm4_set_mode_exclude(drvdata,
+ config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -999,10 +996,8 @@ static ssize_t addr_range_store(struct device *dev,
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
*/
- if (config->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
+ etm4_set_mode_exclude(drvdata,
+ config->mode & ETM_MODE_EXCLUDE ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 7bcac8896fc1..a128b5063f46 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -34,7 +34,8 @@
#include "coresight-etm-perf.h"
static int boot_enable;
-module_param_named(boot_enable, boot_enable, int, S_IRUGO);
+module_param(boot_enable, int, 0444);
+MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
/* The number of ETMv4 currently registered */
static int etm4_count;
@@ -47,7 +48,7 @@ static enum cpuhp_state hp_online;
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
- /* Writing any value to ETMOSLAR unlocks the trace registers */
+ /* Writing 0 to TRCOSLAR unlocks the trace registers */
writel_relaxed(0x0, drvdata->base + TRCOSLAR);
drvdata->os_unlock = true;
isb();
@@ -188,6 +189,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
done:
CS_LOCK(drvdata->base);
@@ -453,8 +461,12 @@ static void etm4_disable_hw(void *info)
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
- /* make sure everything completes before disabling */
- mb();
+ /*
+ * Make sure everything completes before disabling, as recommended
+ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+ * SSTATUS") of ARM IHI 0064D
+ */
+ dsb(sy);
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
@@ -1047,10 +1059,8 @@ static int etm4_starting_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
+ if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
if (local_read(&etmdrvdata[cpu]->mode))
etm4_enable_hw(etmdrvdata[cpu]);
@@ -1192,11 +1202,15 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
};
static const struct amba_id etm4_ids[] = {
- CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
- CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
- CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
- CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
- CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */
+ CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
+ CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
+ CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
+ CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index fa97cb9ab4f9..05f7896c3a01 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -5,6 +5,7 @@
* Description: CoreSight Funnel driver
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -192,7 +193,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
- pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
+ dev_warn_once(dev, "Uses OBSOLETE CoreSight funnel binding\n");
desc.name = coresight_alloc_device_name(&funnel_devs, dev);
if (!desc.name)
@@ -302,11 +303,19 @@ static const struct of_device_id static_funnel_match[] = {
{}
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id static_funnel_ids[] = {
+ {"ARMHC9FE", 0},
+ {},
+};
+#endif
+
static struct platform_driver static_funnel_driver = {
.probe = static_funnel_probe,
.driver = {
.name = "coresight-static-funnel",
.of_match_table = static_funnel_match,
+ .acpi_match_table = ACPI_PTR(static_funnel_ids),
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index dad7d96c5943..3c5bee429105 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -37,11 +37,6 @@ static int coresight_alloc_conns(struct device *dev,
return 0;
}
-int coresight_device_fwnode_match(struct device *dev, const void *fwnode)
-{
- return dev_fwnode(dev) == fwnode;
-}
-
static struct device *
coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
{
@@ -51,8 +46,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
* If we have a non-configurable replicator, it will be found on the
* platform bus.
*/
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, coresight_device_fwnode_match);
+ dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
if (dev)
return dev;
@@ -60,8 +54,7 @@ coresight_find_device_by_fwnode(struct fwnode_handle *fwnode)
* We have a configurable component - circle through the AMBA bus
* looking for the device that matches the endpoint node.
*/
- return bus_find_device(&amba_bustype, NULL,
- fwnode, coresight_device_fwnode_match);
+ return bus_find_device_by_fwnode(&amba_bustype, fwnode);
}
#ifdef CONFIG_OF
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 7d401790dd7e..82e563cdc879 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -185,11 +185,11 @@ static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
}
/* coresight AMBA ID, full UCI structure: id table entry. */
-#define CS_AMBA_UCI_ID(pid, uci_ptr) \
- { \
- .id = pid, \
- .mask = 0x000fffff, \
- .data = uci_ptr \
+#define CS_AMBA_UCI_ID(pid, uci_ptr) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ .data = (void *)uci_ptr \
}
/* extract the data value from a UCI structure given amba_id pointer. */
@@ -202,6 +202,4 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
void coresight_release_platform_data(struct coresight_platform_data *pdata);
-int coresight_device_fwnode_match(struct device *dev, const void *fwnode);
-
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b7d6d59d56db..b29ba640eb25 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -184,7 +184,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
- pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
+ dev_warn_once(dev,
+ "Uses OBSOLETE CoreSight replicator binding\n");
desc.name = coresight_alloc_device_name(&replicator_devs, dev);
if (!desc.name)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 23b7ff00af5c..807416b75ecc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -479,30 +479,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
* traces.
*/
if (!buf->snapshot && to_read > handle->size) {
- u32 mask = 0;
-
- /*
- * The value written to RRP must be byte-address aligned to
- * the width of the trace memory databus _and_ to a frame
- * boundary (16 byte), whichever is the biggest. For example,
- * for 32-bit, 64-bit and 128-bit wide trace memory, the four
- * LSBs must be 0s. For 256-bit wide trace memory, the five
- * LSBs must be 0s.
- */
- switch (drvdata->memwidth) {
- case TMC_MEM_INTF_WIDTH_32BITS:
- case TMC_MEM_INTF_WIDTH_64BITS:
- case TMC_MEM_INTF_WIDTH_128BITS:
- mask = GENMASK(31, 4);
- break;
- case TMC_MEM_INTF_WIDTH_256BITS:
- mask = GENMASK(31, 5);
- break;
- }
+ u32 mask = tmc_get_memwidth_mask(drvdata);
/*
* Make sure the new size is aligned in accordance with the
- * requirement explained above.
+ * requirement explained in function tmc_get_memwidth_mask().
*/
to_read = handle->size & mask;
/* Move the RAM read pointer up */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 17006705287a..625882bc8b08 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -871,6 +871,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
return ERR_PTR(rc);
}
+ refcount_set(&etr_buf->refcount, 1);
dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
(unsigned long)size >> 10, etr_buf->mode);
return etr_buf;
@@ -927,15 +928,24 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
rrp = tmc_read_rrp(drvdata);
rwp = tmc_read_rwp(drvdata);
status = readl_relaxed(drvdata->base + TMC_STS);
+
+ /*
+ * If there were memory errors in the session, truncate the
+ * buffer.
+ */
+ if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
+ dev_dbg(&drvdata->csdev->dev,
+ "tmc memory error detected, truncating buffer\n");
+ etr_buf->len = 0;
+ etr_buf->full = 0;
+ return;
+ }
+
etr_buf->full = status & TMC_STS_FULL;
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
etr_buf->ops->sync(etr_buf, rrp, rwp);
-
- /* Insert barrier packets at the beginning, if there was an overflow */
- if (etr_buf->full)
- tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
}
static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
@@ -1072,6 +1082,13 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
drvdata->sysfs_buf = NULL;
} else {
tmc_sync_etr_buf(drvdata);
+ /*
+ * Insert barrier packets at the beginning, if there was
+ * an overflow.
+ */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf,
+ etr_buf->offset);
}
}
@@ -1263,8 +1280,6 @@ retry:
if (IS_ERR(etr_buf))
return etr_buf;
- refcount_set(&etr_buf->refcount, 1);
-
/* Now that we have a buffer, add it to the IDR. */
mutex_lock(&drvdata->idr_mutex);
ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
@@ -1291,19 +1306,11 @@ get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
struct perf_event *event, int nr_pages,
void **pages, bool snapshot)
{
- struct etr_buf *etr_buf;
-
/*
* In per-thread mode the etr_buf isn't shared, so just go ahead
* with memory allocation.
*/
- etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
- if (IS_ERR(etr_buf))
- goto out;
-
- refcount_set(&etr_buf->refcount, 1);
-out:
- return etr_buf;
+ return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
}
static struct etr_buf *
@@ -1410,10 +1417,12 @@ free_etr_perf_buffer:
* tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
* buffer to the perf ring buffer.
*/
-static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
+static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
+ unsigned long src_offset,
+ unsigned long to_copy)
{
- long bytes, to_copy;
- long pg_idx, pg_offset, src_offset;
+ long bytes;
+ long pg_idx, pg_offset;
unsigned long head = etr_perf->head;
char **dst_pages, *src_buf;
struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1422,8 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
pg_idx = head >> PAGE_SHIFT;
pg_offset = head & (PAGE_SIZE - 1);
dst_pages = (char **)etr_perf->pages;
- src_offset = etr_buf->offset;
- to_copy = etr_buf->len;
while (to_copy > 0) {
/*
@@ -1434,6 +1441,8 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
* 3) what is available in the destination page.
* in one iteration.
*/
+ if (src_offset >= etr_buf->size)
+ src_offset -= etr_buf->size;
bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
&src_buf);
if (WARN_ON_ONCE(bytes <= 0))
@@ -1454,8 +1463,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
/* Move source pointers */
src_offset += bytes;
- if (src_offset >= etr_buf->size)
- src_offset -= etr_buf->size;
}
}
@@ -1471,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
void *config)
{
bool lost = false;
- unsigned long flags, size = 0;
+ unsigned long flags, offset, size = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1484,7 +1491,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
goto out;
}
- if (WARN_ON(drvdata->perf_data != etr_perf)) {
+ if (WARN_ON(drvdata->perf_buf != etr_buf)) {
lost = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
@@ -1496,12 +1503,38 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
tmc_sync_etr_buf(drvdata);
CS_LOCK(drvdata->base);
- /* Reset perf specific data */
- drvdata->perf_data = NULL;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ lost = etr_buf->full;
+ offset = etr_buf->offset;
size = etr_buf->len;
- tmc_etr_sync_perf_buffer(etr_perf);
+
+ /*
+ * The ETR buffer may be bigger than the space available in the
+ * perf ring buffer (handle->size). If so advance the offset so that we
+ * get the latest trace data. In snapshot mode none of that matters
+ * since we are expected to clobber stale data in favour of the latest
+ * traces.
+ */
+ if (!etr_perf->snapshot && size > handle->size) {
+ u32 mask = tmc_get_memwidth_mask(drvdata);
+
+ /*
+ * Make sure the new size is aligned in accordance with the
+ * requirement explained in function tmc_get_memwidth_mask().
+ */
+ size = handle->size & mask;
+ offset = etr_buf->offset + etr_buf->len - size;
+
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ lost = true;
+ }
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (lost)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+ tmc_etr_sync_perf_buffer(etr_perf, offset, size);
/*
* In snapshot mode we simply increment the head by the number of byte
@@ -1511,8 +1544,6 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
*/
if (etr_perf->snapshot)
handle->head += size;
-
- lost |= etr_buf->full;
out:
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
@@ -1556,7 +1587,6 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
}
etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
- drvdata->perf_data = etr_perf;
/*
* No HW configuration is needed if the sink is already in
@@ -1572,6 +1602,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
/* Associate with monitored process. */
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ drvdata->perf_buf = etr_perf->etr_buf;
atomic_inc(csdev->refcnt);
}
@@ -1617,6 +1648,8 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
+ /* Reset perf specific data */
+ drvdata->perf_buf = NULL;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index be37aff573b4..1cf82fa58289 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -70,6 +70,34 @@ void tmc_disable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(0x0, drvdata->base + TMC_CTL);
}
+u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
+{
+ u32 mask = 0;
+
+ /*
+ * When moving RRP or an offset address forward, the new values must
+ * be byte-address aligned to the width of the trace memory databus
+ * _and_ to a frame boundary (16 byte), whichever is the biggest. For
+ * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
+ * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
+ * be 0s.
+ */
+ switch (drvdata->memwidth) {
+ case TMC_MEM_INTF_WIDTH_32BITS:
+ /* fallthrough */
+ case TMC_MEM_INTF_WIDTH_64BITS:
+ /* fallthrough */
+ case TMC_MEM_INTF_WIDTH_128BITS:
+ mask = GENMASK(31, 4);
+ break;
+ case TMC_MEM_INTF_WIDTH_256BITS:
+ mask = GENMASK(31, 5);
+ break;
+ }
+
+ return mask;
+}
+
static int tmc_read_prepare(struct tmc_drvdata *drvdata)
{
int ret = 0;
@@ -236,6 +264,7 @@ coresight_tmc_reg(ffcr, TMC_FFCR);
coresight_tmc_reg(mode, TMC_MODE);
coresight_tmc_reg(pscr, TMC_PSCR);
coresight_tmc_reg(axictl, TMC_AXICTL);
+coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
coresight_tmc_reg(devid, CORESIGHT_DEVID);
coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
@@ -255,6 +284,7 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_devid.attr,
&dev_attr_dba.attr,
&dev_attr_axictl.attr,
+ &dev_attr_authstatus.attr,
NULL,
};
@@ -342,6 +372,13 @@ static inline bool tmc_etr_can_use_sg(struct device *dev)
return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
}
+static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
+{
+ u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
+
+ return (auth & TMC_AUTH_NSID_MASK) == 0x3;
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
{
@@ -349,6 +386,9 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
u32 dma_mask = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
+ if (!tmc_etr_has_non_secure_access(drvdata))
+ return -EACCES;
+
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 1ed50411cc3c..71de978575f3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -39,6 +39,7 @@
#define TMC_ITATBCTR2 0xef0
#define TMC_ITATBCTR1 0xef4
#define TMC_ITATBCTR0 0xef8
+#define TMC_AUTHSTATUS 0xfb8
/* register description */
/* TMC_CTL - 0x020 */
@@ -47,6 +48,7 @@
#define TMC_STS_TMCREADY_BIT 2
#define TMC_STS_FULL BIT(0)
#define TMC_STS_TRIGGERED BIT(1)
+#define TMC_STS_MEMERR BIT(5)
/*
* TMC_AXICTL - 0x110
*
@@ -89,6 +91,8 @@
#define TMC_DEVID_AXIAW_SHIFT 17
#define TMC_DEVID_AXIAW_MASK 0x7f
+#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -178,8 +182,8 @@ struct etr_buf {
* device configuration register (DEVID)
* @idr: Holds etr_bufs allocated for this ETR.
* @idr_mutex: Access serialisation for idr.
- * @perf_data: PERF buffer for ETR.
- * @sysfs_data: SYSFS buffer for ETR.
+ * @sysfs_buf: SYSFS buffer for ETR.
+ * @perf_buf: PERF buffer for ETR.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -202,7 +206,7 @@ struct tmc_drvdata {
struct idr idr;
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
- void *perf_data;
+ struct etr_buf *perf_buf;
};
struct etr_buf_operations {
@@ -251,6 +255,7 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
+u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
/* ETB/ETF functions */
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 55db77f6410b..6453c67a4d01 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1046,9 +1046,7 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
struct coresight_connection *conn = &csdev->pdata->conns[i];
struct device *dev = NULL;
- dev = bus_find_device(&coresight_bustype, NULL,
- (void *)conn->child_fwnode,
- coresight_device_fwnode_match);
+ dev = bus_find_device_by_fwnode(&coresight_bustype, conn->child_fwnode);
if (dev) {
conn->child_dev = to_coresight_device(dev);
/* and put reference from 'bus_find_device()' */
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
index d9252fa8d9ca..b63eb8f309ad 100644
--- a/drivers/hwtracing/intel_th/Makefile
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -20,3 +20,6 @@ intel_th_msu-y := msu.o
obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o
intel_th_pti-y := pti.o
+
+obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu_sink.o
+intel_th_msu_sink-y := msu-sink.o
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 55922896d862..d5c1821b31c6 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -789,12 +789,6 @@ static int intel_th_populate(struct intel_th *th)
return 0;
}
-static int match_devt(struct device *dev, const void *data)
-{
- dev_t devt = (dev_t)(unsigned long)(void *)data;
- return dev->devt == devt;
-}
-
static int intel_th_output_open(struct inode *inode, struct file *file)
{
const struct file_operations *fops;
@@ -802,9 +796,7 @@ static int intel_th_output_open(struct inode *inode, struct file *file)
struct device *dev;
int err;
- dev = bus_find_device(&intel_th_bus, NULL,
- (void *)(unsigned long)inode->i_rdev,
- match_devt);
+ dev = bus_find_device_by_devt(&intel_th_bus, inode->i_rdev);
if (!dev || !dev->driver)
return -ENODEV;
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
new file mode 100644
index 000000000000..2c7f5116be12
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An example software sink buffer for Intel TH MSU.
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ */
+
+#include <linux/intel_th.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#define MAX_SGTS 16
+
+struct msu_sink_private {
+ struct device *dev;
+ struct sg_table **sgts;
+ unsigned int nr_sgts;
+};
+
+static void *msu_sink_assign(struct device *dev, int *mode)
+{
+ struct msu_sink_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->sgts = kcalloc(MAX_SGTS, sizeof(void *), GFP_KERNEL);
+ if (!priv->sgts) {
+ kfree(priv);
+ return NULL;
+ }
+
+ priv->dev = dev;
+ *mode = MSC_MODE_MULTI;
+
+ return priv;
+}
+
+static void msu_sink_unassign(void *data)
+{
+ struct msu_sink_private *priv = data;
+
+ kfree(priv->sgts);
+ kfree(priv);
+}
+
+/* See also: msc.c: __msc_buffer_win_alloc() */
+static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
+{
+ struct msu_sink_private *priv = data;
+ unsigned int nents;
+ struct scatterlist *sg_ptr;
+ void *block;
+ int ret, i;
+
+ if (priv->nr_sgts == MAX_SGTS)
+ return -ENOMEM;
+
+ nents = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ ret = sg_alloc_table(*sgt, nents, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ priv->sgts[priv->nr_sgts++] = *sgt;
+
+ for_each_sg((*sgt)->sgl, sg_ptr, nents, i) {
+ block = dma_alloc_coherent(priv->dev->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nents;
+}
+
+/* See also: msc.c: __msc_buffer_win_free() */
+static void msu_sink_free_window(void *data, struct sg_table *sgt)
+{
+ struct msu_sink_private *priv = data;
+ struct scatterlist *sg_ptr;
+ int i;
+
+ for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) {
+ dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE,
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
+ }
+
+ sg_free_table(sgt);
+ priv->nr_sgts--;
+}
+
+static int msu_sink_ready(void *data, struct sg_table *sgt, size_t bytes)
+{
+ struct msu_sink_private *priv = data;
+
+ intel_th_msc_window_unlock(priv->dev, sgt);
+
+ return 0;
+}
+
+static const struct msu_buffer sink_mbuf = {
+ .name = "sink",
+ .assign = msu_sink_assign,
+ .unassign = msu_sink_unassign,
+ .alloc_window = msu_sink_alloc_window,
+ .free_window = msu_sink_free_window,
+ .ready = msu_sink_ready,
+};
+
+module_intel_th_msu_buffer(sink_mbuf);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 8ab28e5fb366..fc9f15f36ad4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -17,21 +17,48 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
+#include <linux/intel_th.h>
#include "intel_th.h"
#include "msu.h"
#define msc_dev(x) (&(x)->thdev->dev)
+/*
+ * Lockout state transitions:
+ * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
+ * \-----------/
+ * WIN_READY: window can be used by HW
+ * WIN_INUSE: window is in use
+ * WIN_LOCKED: window is filled up and is being processed by the buffer
+ * handling code
+ *
+ * All state transitions happen automatically, except for the LOCKED->READY,
+ * which needs to be signalled by the buffer code by calling
+ * intel_th_msc_window_unlock().
+ *
+ * When the interrupt handler has to switch to the next window, it checks
+ * whether it's READY, and if it is, it performs the switch and tracing
+ * continues. If it's LOCKED, it stops the trace.
+ */
+enum lockout_state {
+ WIN_READY = 0,
+ WIN_INUSE,
+ WIN_LOCKED
+};
+
/**
* struct msc_window - multiblock mode window descriptor
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
+ * @lockout: lockout state, see comment below
+ * @lo_lock: lockout state serialization
* @nr_blocks: number of blocks (pages) in this window
* @nr_segs: number of segments in this window (<= @nr_blocks)
* @_sgt: array of block descriptors
@@ -40,6 +67,8 @@
struct msc_window {
struct list_head entry;
unsigned long pgoff;
+ enum lockout_state lockout;
+ spinlock_t lo_lock;
unsigned int nr_blocks;
unsigned int nr_segs;
struct msc *msc;
@@ -66,8 +95,8 @@ struct msc_iter {
struct msc_window *start_win;
struct msc_window *win;
unsigned long offset;
- int start_block;
- int block;
+ struct scatterlist *start_block;
+ struct scatterlist *block;
unsigned int block_off;
unsigned int wrap_count;
unsigned int eof;
@@ -77,6 +106,8 @@ struct msc_iter {
* struct msc - MSC device representation
* @reg_base: register window base address
* @thdev: intel_th_device pointer
+ * @mbuf: MSU buffer, if assigned
+ * @mbuf_priv MSU buffer's private data, if @mbuf
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
@@ -100,6 +131,10 @@ struct msc {
void __iomem *msu_base;
struct intel_th_device *thdev;
+ const struct msu_buffer *mbuf;
+ void *mbuf_priv;
+
+ struct work_struct work;
struct list_head win_list;
struct sg_table single_sgt;
struct msc_window *cur_win;
@@ -108,6 +143,8 @@ struct msc {
unsigned int single_wrap : 1;
void *base;
dma_addr_t base_addr;
+ u32 orig_addr;
+ u32 orig_sz;
/* <0: no buffer, 0: no users, >0: active users */
atomic_t user_count;
@@ -126,6 +163,101 @@ struct msc {
unsigned int index;
};
+static LIST_HEAD(msu_buffer_list);
+static struct mutex msu_buffer_mutex;
+
+/**
+ * struct msu_buffer_entry - internal MSU buffer bookkeeping
+ * @entry: link to msu_buffer_list
+ * @mbuf: MSU buffer object
+ * @owner: module that provides this MSU buffer
+ */
+struct msu_buffer_entry {
+ struct list_head entry;
+ const struct msu_buffer *mbuf;
+ struct module *owner;
+};
+
+static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ lockdep_assert_held(&msu_buffer_mutex);
+
+ list_for_each_entry(mbe, &msu_buffer_list, entry) {
+ if (!strcmp(mbe->mbuf->name, name))
+ return mbe;
+ }
+
+ return NULL;
+}
+
+static const struct msu_buffer *
+msu_buffer_get(const char *name)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(name);
+ if (mbe && !try_module_get(mbe->owner))
+ mbe = NULL;
+ mutex_unlock(&msu_buffer_mutex);
+
+ return mbe ? mbe->mbuf : NULL;
+}
+
+static void msu_buffer_put(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe)
+ module_put(mbe->owner);
+ mutex_unlock(&msu_buffer_mutex);
+}
+
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
+ struct module *owner)
+{
+ struct msu_buffer_entry *mbe;
+ int ret = 0;
+
+ mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
+ if (!mbe)
+ return -ENOMEM;
+
+ mutex_lock(&msu_buffer_mutex);
+ if (__msu_buffer_entry_find(mbuf->name)) {
+ ret = -EEXIST;
+ kfree(mbe);
+ goto unlock;
+ }
+
+ mbe->mbuf = mbuf;
+ mbe->owner = owner;
+ list_add_tail(&mbe->entry, &msu_buffer_list);
+unlock:
+ mutex_unlock(&msu_buffer_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
+
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
+{
+ struct msu_buffer_entry *mbe;
+
+ mutex_lock(&msu_buffer_mutex);
+ mbe = __msu_buffer_entry_find(mbuf->name);
+ if (mbe) {
+ list_del(&mbe->entry);
+ kfree(mbe);
+ }
+ mutex_unlock(&msu_buffer_mutex);
+}
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
+
static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
{
/* header hasn't been written */
@@ -139,28 +271,25 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
return false;
}
-static inline struct msc_block_desc *
-msc_win_block(struct msc_window *win, unsigned int block)
+static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
{
- return sg_virt(&win->sgt->sgl[block]);
+ return win->sgt->sgl;
}
-static inline size_t
-msc_win_actual_bsz(struct msc_window *win, unsigned int block)
+static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
{
- return win->sgt->sgl[block].length;
+ return sg_virt(msc_win_base_sg(win));
}
-static inline dma_addr_t
-msc_win_baddr(struct msc_window *win, unsigned int block)
+static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
{
- return sg_dma_address(&win->sgt->sgl[block]);
+ return sg_dma_address(msc_win_base_sg(win));
}
static inline unsigned long
-msc_win_bpfn(struct msc_window *win, unsigned int block)
+msc_win_base_pfn(struct msc_window *win)
{
- return msc_win_baddr(win, block) >> PAGE_SHIFT;
+ return PFN_DOWN(msc_win_base_dma(win));
}
/**
@@ -188,6 +317,26 @@ static struct msc_window *msc_next_window(struct msc_window *win)
return list_next_entry(win, entry);
}
+static size_t msc_win_total_sz(struct msc_window *win)
+{
+ struct scatterlist *sg;
+ unsigned int blk;
+ size_t size = 0;
+
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
+
+ if (msc_block_wrapped(bdesc))
+ return win->nr_blocks << PAGE_SHIFT;
+
+ size += msc_total_sz(bdesc);
+ if (msc_block_last_written(bdesc))
+ break;
+ }
+
+ return size;
+}
+
/**
* msc_find_window() - find a window matching a given sg_table
* @msc: MSC device
@@ -216,7 +365,7 @@ msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
found++;
/* skip the empty ones */
- if (nonempty && msc_block_is_empty(msc_win_block(win, 0)))
+ if (nonempty && msc_block_is_empty(msc_win_base(win)))
continue;
if (found)
@@ -250,44 +399,38 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
}
/**
- * msc_win_oldest_block() - locate the oldest block in a given window
+ * msc_win_oldest_sg() - locate the oldest block in a given window
* @win: window to look at
*
* Return: index of the block with the oldest data
*/
-static unsigned int msc_win_oldest_block(struct msc_window *win)
+static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
{
unsigned int blk;
- struct msc_block_desc *bdesc = msc_win_block(win, 0);
+ struct scatterlist *sg;
+ struct msc_block_desc *bdesc = msc_win_base(win);
/* without wrapping, first block is the oldest */
if (!msc_block_wrapped(bdesc))
- return 0;
+ return msc_win_base_sg(win);
/*
* with wrapping, last written block contains both the newest and the
* oldest data for this window.
*/
- for (blk = 0; blk < win->nr_segs; blk++) {
- bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
if (msc_block_last_written(bdesc))
- return blk;
+ return sg;
}
- return 0;
+ return msc_win_base_sg(win);
}
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
{
- return msc_win_block(iter->win, iter->block);
-}
-
-static void msc_iter_init(struct msc_iter *iter)
-{
- memset(iter, 0, sizeof(*iter));
- iter->start_block = -1;
- iter->block = -1;
+ return sg_virt(iter->block);
}
static struct msc_iter *msc_iter_install(struct msc *msc)
@@ -312,7 +455,6 @@ static struct msc_iter *msc_iter_install(struct msc *msc)
goto unlock;
}
- msc_iter_init(iter);
iter->msc = msc;
list_add_tail(&iter->entry, &msc->iter_list);
@@ -333,10 +475,10 @@ static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
static void msc_iter_block_start(struct msc_iter *iter)
{
- if (iter->start_block != -1)
+ if (iter->start_block)
return;
- iter->start_block = msc_win_oldest_block(iter->win);
+ iter->start_block = msc_win_oldest_sg(iter->win);
iter->block = iter->start_block;
iter->wrap_count = 0;
@@ -360,7 +502,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
return -EINVAL;
iter->win = iter->start_win;
- iter->start_block = -1;
+ iter->start_block = NULL;
msc_iter_block_start(iter);
@@ -370,7 +512,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
static int msc_iter_win_advance(struct msc_iter *iter)
{
iter->win = msc_next_window(iter->win);
- iter->start_block = -1;
+ iter->start_block = NULL;
if (iter->win == iter->start_win) {
iter->eof++;
@@ -400,8 +542,10 @@ static int msc_iter_block_advance(struct msc_iter *iter)
return msc_iter_win_advance(iter);
/* block advance */
- if (++iter->block == iter->win->nr_segs)
- iter->block = 0;
+ if (sg_is_last(iter->block))
+ iter->block = msc_win_base_sg(iter->win);
+ else
+ iter->block = sg_next(iter->block);
/* no wrapping, sanity check in case there is no last written block */
if (!iter->wrap_count && iter->block == iter->start_block)
@@ -506,14 +650,15 @@ next_block:
static void msc_buffer_clear_hw_header(struct msc *msc)
{
struct msc_window *win;
+ struct scatterlist *sg;
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
size_t hw_sz = sizeof(struct msc_block_desc) -
offsetof(struct msc_block_desc, hw_tag);
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct msc_block_desc *bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(&bdesc->hw_tag, 0, hw_sz);
}
@@ -527,6 +672,9 @@ static int intel_th_msu_init(struct msc *msc)
if (!msc->do_irq)
return 0;
+ if (!msc->mbuf)
+ return 0;
+
mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
mintctl |= msc->index ? M1BLIE : M0BLIE;
iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
@@ -554,6 +702,49 @@ static void intel_th_msu_deinit(struct msc *msc)
iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
}
+static int msc_win_set_lockout(struct msc_window *win,
+ enum lockout_state expect,
+ enum lockout_state new)
+{
+ enum lockout_state old;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!win->msc->mbuf)
+ return 0;
+
+ spin_lock_irqsave(&win->lo_lock, flags);
+ old = win->lockout;
+
+ if (old != expect) {
+ ret = -EINVAL;
+ dev_warn_ratelimited(msc_dev(win->msc),
+ "expected lockout state %d, got %d\n",
+ expect, old);
+ goto unlock;
+ }
+
+ win->lockout = new;
+
+ if (old == expect && new == WIN_LOCKED)
+ atomic_inc(&win->msc->user_count);
+ else if (old == expect && old == WIN_LOCKED)
+ atomic_dec(&win->msc->user_count);
+
+unlock:
+ spin_unlock_irqrestore(&win->lo_lock, flags);
+
+ if (ret) {
+ if (expect == WIN_READY && old == WIN_LOCKED)
+ return -EBUSY;
+
+ /* from intel_th_msc_window_unlock(), don't warn if not locked */
+ if (expect == WIN_LOCKED && old == new)
+ return 0;
+ }
+
+ return ret;
+}
/**
* msc_configure() - set up MSC hardware
* @msc: the MSC device to configure
@@ -571,8 +762,15 @@ static int msc_configure(struct msc *msc)
if (msc->mode > MSC_MODE_MULTI)
return -ENOTSUPP;
- if (msc->mode == MSC_MODE_MULTI)
+ if (msc->mode == MSC_MODE_MULTI) {
+ if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
+ return -EBUSY;
+
msc_buffer_clear_hw_header(msc);
+ }
+
+ msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
+ msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
reg = msc->base_addr >> PAGE_SHIFT;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
@@ -594,10 +792,14 @@ static int msc_configure(struct msc *msc)
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+ intel_th_msu_init(msc);
+
msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
intel_th_trace_enable(msc->thdev);
msc->enabled = 1;
+ if (msc->mbuf && msc->mbuf->activate)
+ msc->mbuf->activate(msc->mbuf_priv);
return 0;
}
@@ -611,10 +813,17 @@ static int msc_configure(struct msc *msc)
*/
static void msc_disable(struct msc *msc)
{
+ struct msc_window *win = msc->cur_win;
u32 reg;
lockdep_assert_held(&msc->buf_mutex);
+ if (msc->mode == MSC_MODE_MULTI)
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ if (msc->mbuf && msc->mbuf->deactivate)
+ msc->mbuf->deactivate(msc->mbuf_priv);
+ intel_th_msu_deinit(msc);
intel_th_trace_disable(msc->thdev);
if (msc->mode == MSC_MODE_SINGLE) {
@@ -630,16 +839,25 @@ static void msc_disable(struct msc *msc)
reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
reg &= ~MSC_EN;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
msc->enabled = 0;
- iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
- iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
+ iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
+ iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
+
+ reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
+ reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
}
static int intel_th_msc_activate(struct intel_th_device *thdev)
@@ -791,10 +1009,9 @@ static int __msc_buffer_win_alloc(struct msc_window *win,
return nr_segs;
err_nomem:
- for (i--; i >= 0; i--)
+ for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- msc_win_block(win, i),
- msc_win_baddr(win, i));
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
sg_free_table(win->sgt);
@@ -804,20 +1021,26 @@ err_nomem:
#ifdef CONFIG_X86
static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
{
+ struct scatterlist *sg_ptr;
int i;
- for (i = 0; i < nr_segs; i++)
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
/* Set the page as uncached */
- set_memory_uc((unsigned long)msc_win_block(win, i), 1);
+ set_memory_uc((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
static void msc_buffer_set_wb(struct msc_window *win)
{
+ struct scatterlist *sg_ptr;
int i;
- for (i = 0; i < win->nr_segs; i++)
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
/* Reset the page to write-back */
- set_memory_wb((unsigned long)msc_win_block(win, i), 1);
+ set_memory_wb((unsigned long)sg_virt(sg_ptr),
+ PFN_DOWN(sg_ptr->length));
+ }
}
#else /* !X86 */
static inline void
@@ -843,19 +1066,14 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
if (!nr_blocks)
return 0;
- /*
- * This limitation hold as long as we need random access to the
- * block. When that changes, this can go away.
- */
- if (nr_blocks > SG_MAX_SINGLE_ALLOC)
- return -EINVAL;
-
win = kzalloc(sizeof(*win), GFP_KERNEL);
if (!win)
return -ENOMEM;
win->msc = msc;
win->sgt = &win->_sgt;
+ win->lockout = WIN_READY;
+ spin_lock_init(&win->lo_lock);
if (!list_empty(&msc->win_list)) {
struct msc_window *prev = list_last_entry(&msc->win_list,
@@ -865,8 +1083,13 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
win->pgoff = prev->pgoff + prev->nr_blocks;
}
- ret = __msc_buffer_win_alloc(win, nr_blocks);
- if (ret < 0)
+ if (msc->mbuf && msc->mbuf->alloc_window)
+ ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
+ nr_blocks << PAGE_SHIFT);
+ else
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
+
+ if (ret <= 0)
goto err_nomem;
msc_buffer_set_uc(win, ret);
@@ -875,8 +1098,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
win->nr_blocks = nr_blocks;
if (list_empty(&msc->win_list)) {
- msc->base = msc_win_block(win, 0);
- msc->base_addr = msc_win_baddr(win, 0);
+ msc->base = msc_win_base(win);
+ msc->base_addr = msc_win_base_dma(win);
msc->cur_win = win;
}
@@ -893,14 +1116,15 @@ err_nomem:
static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < win->nr_segs; i++) {
- struct page *page = sg_page(&win->sgt->sgl[i]);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
+ struct page *page = sg_page(sg);
page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- msc_win_block(win, i), msc_win_baddr(win, i));
+ sg_virt(sg), sg_dma_address(sg));
}
sg_free_table(win->sgt);
}
@@ -925,7 +1149,10 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc_buffer_set_wb(win);
- __msc_buffer_win_free(msc, win);
+ if (msc->mbuf && msc->mbuf->free_window)
+ msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
+ else
+ __msc_buffer_win_free(msc, win);
kfree(win);
}
@@ -943,6 +1170,7 @@ static void msc_buffer_relink(struct msc *msc)
/* call with msc::mutex locked */
list_for_each_entry(win, &msc->win_list, entry) {
+ struct scatterlist *sg;
unsigned int blk;
u32 sw_tag = 0;
@@ -958,12 +1186,12 @@ static void msc_buffer_relink(struct msc *msc)
next_win = list_next_entry(win, entry);
}
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct msc_block_desc *bdesc = msc_win_block(win, blk);
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct msc_block_desc *bdesc = sg_virt(sg);
memset(bdesc, 0, sizeof(*bdesc));
- bdesc->next_win = msc_win_bpfn(next_win, 0);
+ bdesc->next_win = msc_win_base_pfn(next_win);
/*
* Similarly to last window, last block should point
@@ -971,13 +1199,15 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (blk == win->nr_segs - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
- bdesc->next_blk = msc_win_bpfn(win, 0);
+ bdesc->next_blk = msc_win_base_pfn(win);
} else {
- bdesc->next_blk = msc_win_bpfn(win, blk + 1);
+ dma_addr_t addr = sg_dma_address(sg_next(sg));
+
+ bdesc->next_blk = PFN_DOWN(addr);
}
bdesc->sw_tag = sw_tag;
- bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64;
+ bdesc->block_sz = sg->length / 64;
}
}
@@ -1136,6 +1366,7 @@ static int msc_buffer_free_unless_used(struct msc *msc)
static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
{
struct msc_window *win;
+ struct scatterlist *sg;
unsigned int blk;
if (msc->mode == MSC_MODE_SINGLE)
@@ -1150,9 +1381,9 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
found:
pgoff -= win->pgoff;
- for (blk = 0; blk < win->nr_segs; blk++) {
- struct page *page = sg_page(&win->sgt->sgl[blk]);
- size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk));
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
+ struct page *page = sg_page(sg);
+ size_t pgsz = PFN_DOWN(sg->length);
if (pgoff < pgsz)
return page + pgoff;
@@ -1456,24 +1687,83 @@ static void msc_win_switch(struct msc *msc)
else
msc->cur_win = list_next_entry(msc->cur_win, entry);
- msc->base = msc_win_block(msc->cur_win, 0);
- msc->base_addr = msc_win_baddr(msc->cur_win, 0);
+ msc->base = msc_win_base(msc->cur_win);
+ msc->base_addr = msc_win_base_dma(msc->cur_win);
intel_th_trace_switch(msc->thdev);
}
+/**
+ * intel_th_msc_window_unlock - put the window back in rotation
+ * @dev: MSC device to which this relates
+ * @sgt: buffer's sg_table for the window, does nothing if NULL
+ */
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ struct msc_window *win;
+
+ if (!sgt)
+ return;
+
+ win = msc_find_window(msc, sgt, false);
+ if (!win)
+ return;
+
+ msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
+}
+EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
+
+static void msc_work(struct work_struct *work)
+{
+ struct msc *msc = container_of(work, struct msc, work);
+
+ intel_th_msc_deactivate(msc->thdev);
+}
+
static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
{
struct msc *msc = dev_get_drvdata(&thdev->dev);
u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+ struct msc_window *win, *next_win;
+
+ if (!msc->do_irq || !msc->mbuf)
+ return IRQ_NONE;
+
+ msusts &= mask;
+
+ if (!msusts)
+ return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
+
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
- if (!(msusts & mask)) {
- if (msc->enabled)
- return IRQ_HANDLED;
+ if (!msc->enabled)
return IRQ_NONE;
+
+ /* grab the window before we do the switch */
+ win = msc->cur_win;
+ if (!win)
+ return IRQ_HANDLED;
+ next_win = msc_next_window(win);
+ if (!next_win)
+ return IRQ_HANDLED;
+
+ /* next window: if READY, proceed, if LOCKED, stop the trace */
+ if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
+ schedule_work(&msc->work);
+ return IRQ_HANDLED;
}
+ /* current window: INUSE -> LOCKED */
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
+
+ msc_win_switch(msc);
+
+ if (msc->mbuf && msc->mbuf->ready)
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
+ msc_win_total_sz(win));
+
return IRQ_HANDLED;
}
@@ -1511,21 +1801,43 @@ wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR_RW(wrap);
+static void msc_buffer_unassign(struct msc *msc)
+{
+ lockdep_assert_held(&msc->buf_mutex);
+
+ if (!msc->mbuf)
+ return;
+
+ msc->mbuf->unassign(msc->mbuf_priv);
+ msu_buffer_put(msc->mbuf);
+ msc->mbuf_priv = NULL;
+ msc->mbuf = NULL;
+}
+
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct msc *msc = dev_get_drvdata(dev);
+ const char *mode = msc_mode[msc->mode];
+ ssize_t ret;
+
+ mutex_lock(&msc->buf_mutex);
+ if (msc->mbuf)
+ mode = msc->mbuf->name;
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
+ mutex_unlock(&msc->buf_mutex);
- return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
+ return ret;
}
static ssize_t
mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t size)
{
+ const struct msu_buffer *mbuf = NULL;
struct msc *msc = dev_get_drvdata(dev);
size_t len = size;
- char *cp;
+ char *cp, *mode;
int i, ret;
if (!capable(CAP_SYS_RAWIO))
@@ -1535,17 +1847,59 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
if (cp)
len = cp - buf;
- for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
- if (!strncmp(msc_mode[i], buf, len))
- goto found;
+ mode = kstrndup(buf, len, GFP_KERNEL);
+ i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
+ if (i >= 0)
+ goto found;
+
+ /* Buffer sinks only work with a usable IRQ */
+ if (!msc->do_irq) {
+ kfree(mode);
+ return -EINVAL;
+ }
+
+ mbuf = msu_buffer_get(mode);
+ kfree(mode);
+ if (mbuf)
+ goto found;
return -EINVAL;
found:
mutex_lock(&msc->buf_mutex);
+ ret = 0;
+
+ /* Same buffer: do nothing */
+ if (mbuf && mbuf == msc->mbuf) {
+ /* put the extra reference we just got */
+ msu_buffer_put(mbuf);
+ goto unlock;
+ }
+
ret = msc_buffer_unlocked_free_unless_used(msc);
- if (!ret)
- msc->mode = i;
+ if (ret)
+ goto unlock;
+
+ if (mbuf) {
+ void *mbuf_priv = mbuf->assign(dev, &i);
+
+ if (!mbuf_priv) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ msc_buffer_unassign(msc);
+ msc->mbuf_priv = mbuf_priv;
+ msc->mbuf = mbuf;
+ } else {
+ msc_buffer_unassign(msc);
+ }
+
+ msc->mode = i;
+
+unlock:
+ if (ret && mbuf)
+ msu_buffer_put(mbuf);
mutex_unlock(&msc->buf_mutex);
return ret ? ret : size;
@@ -1667,7 +2021,12 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&msc->buf_mutex);
- if (msc->mode != MSC_MODE_MULTI)
+ /*
+ * Window switch can only happen in the "multi" mode.
+ * If a external buffer is engaged, they have the full
+ * control over window switching.
+ */
+ if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
ret = -ENOTSUPP;
else
msc_win_switch(msc);
@@ -1720,10 +2079,7 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
msc->reg_base = base + msc->index * 0x100;
msc->msu_base = base;
- err = intel_th_msu_init(msc);
- if (err)
- return err;
-
+ INIT_WORK(&msc->work, msc_work);
err = intel_th_msc_init(msc);
if (err)
return err;
@@ -1739,7 +2095,6 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
int ret;
intel_th_msc_deactivate(thdev);
- intel_th_msu_deinit(msc);
/*
* Buffers should not be used at this point except if the
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 574c16004cb2..e771f509bd02 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
*
@@ -44,14 +44,6 @@ enum {
#define M0BLIE BIT(16)
#define M1BLIE BIT(24)
-/* MSC operating modes (MSC_MODE) */
-enum {
- MSC_MODE_SINGLE = 0,
- MSC_MODE_MULTI,
- MSC_MODE_EXI,
- MSC_MODE_DEBUG,
-};
-
/* MSCnSTS bits */
#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */
#define MSCSTS_PLE BIT(2) /* Pipeline Empty */
@@ -93,6 +85,16 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
return bdesc->valid_dw * 4 - MSC_BDESC;
}
+static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->valid_dw * 4;
+}
+
+static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc)
+{
+ return bdesc->block_sz * 64 - MSC_BDESC;
+}
+
static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
{
if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
@@ -104,7 +106,7 @@ static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
static inline bool msc_block_last_written(struct msc_block_desc *bdesc)
{
if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) ||
- (msc_data_sz(bdesc) != DATA_IN_PAGE))
+ (msc_data_sz(bdesc) != msc_block_sz(bdesc)))
return true;
return false;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c0378c3de9a4..91dfeba62485 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)0,
},
{
+ /* Lewisburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Tiger Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381babc84c..7dfc0431333b 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel(R) Trace Hub PTI output data structures
*
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e55b902560de..603b83ac5085 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -89,13 +89,6 @@ static struct class stm_class = {
.dev_groups = stm_groups,
};
-static int stm_dev_match(struct device *dev, const void *data)
-{
- const char *name = data;
-
- return sysfs_streq(name, dev_name(dev));
-}
-
/**
* stm_find_device() - find stm device by name
* @buf: character buffer containing the name
@@ -116,7 +109,7 @@ struct stm_device *stm_find_device(const char *buf)
if (!stm_core_up)
return NULL;
- dev = class_find_device(&stm_class, NULL, buf, stm_dev_match);
+ dev = class_find_device_by_name(&stm_class, buf);
if (!dev)
return NULL;
@@ -1276,7 +1269,6 @@ int stm_source_register_device(struct device *parent,
err:
put_device(&src->dev);
- kfree(src);
return err;
}
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index abedd55a1264..1474e57ecafc 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -54,7 +54,7 @@ config I2C_CHARDEV
Say Y here to use i2c-* device files, usually found in the /dev
directory on your system. They make it possible to have user-space
programs use the I2C bus. Information on how to do this is
- contained in the file <file:Documentation/i2c/dev-interface>.
+ contained in the file <file:Documentation/i2c/dev-interface.rst>.
This support is also available as a module. If so, the module
will be called i2c-dev.
@@ -107,7 +107,7 @@ config I2C_STUB
especially for certain kinds of sensor chips.
If you do build this module, be sure to read the notes and warnings
- in <file:Documentation/i2c/i2c-stub>.
+ in <file:Documentation/i2c/i2c-stub.rst>.
If you don't know what to do here, definitely say N.
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 09367fc014c3..a362245cc558 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -684,7 +684,7 @@ config I2C_IMX_LPI2C
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
- depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IXP4XX || ARCH_IOP13XX
+ depends on ARCH_IOP32X || ARCH_IXP4XX
help
Say Y here if you want to use the IIC bus controller on
the Intel IOPx3xx I/O Processors or IXP4xx Network Processors.
@@ -1206,7 +1206,7 @@ config I2C_PARPORT
and makes it easier to add support for new devices.
An adapter type parameter is now mandatory. Please read the file
- Documentation/i2c/busses/i2c-parport for details.
+ Documentation/i2c/busses/i2c-parport.rst for details.
Another driver exists, named i2c-parport-light, which doesn't depend
on the parport driver. This is meant for embedded systems. Don't say
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index c7fe3b44a860..5e4800d72e00 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -457,18 +457,12 @@ static struct pci_driver amd_mp2_pci_driver = {
};
module_pci_driver(amd_mp2_pci_driver);
-static int amd_mp2_device_match(struct device *dev, const void *data)
-{
- return 1;
-}
-
struct amd_mp2_dev *amd_mp2_find_device(void)
{
struct device *dev;
struct pci_dev *pci_dev;
- dev = driver_find_device(&amd_mp2_pci_driver.driver, NULL, NULL,
- amd_mp2_device_match);
+ dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL);
if (!dev)
return NULL;
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 8d55cdd69ff4..435c7d7377a3 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = {
static struct at91_twi_pdata sama5d2_config = {
.clk_max_div = 7,
- .clk_offset = 4,
+ .clk_offset = 3,
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index e87232f2e708..a3fcc35ffd3b 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
/* send stop when last byte has been written */
- if (--dev->buf_len == 0)
+ if (--dev->buf_len == 0) {
if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
+ }
dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
} else {
at91_twi_write_next_byte(dev);
at91_twi_write(dev, AT91_TWI_IER,
- AT91_TWI_TXCOMP |
- AT91_TWI_NACK |
- AT91_TWI_TXRDY);
+ AT91_TWI_TXCOMP | AT91_TWI_NACK |
+ (dev->buf_len ? AT91_TWI_TXRDY : 0));
}
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 2c7f145a036e..19ef2b0c682a 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
+ uint32_t val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
- if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
- & M_FIFO_RX_CNT_MASK))
+ val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
+
+ /* rx fifo empty */
+ if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
break;
msg->buf[iproc_i2c->rx_bytes] =
- (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
- M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+ (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
iproc_i2c->rx_bytes++;
}
}
@@ -788,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
- u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ u32 val;
+
+ /* We do not support the SMBUS Quick command */
+ val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
if (adap->algo->reg_slave)
val |= I2C_FUNC_SLAVE;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305b2dd9..f5f001738df5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
dev->disable_int(dev);
dev->disable(dev);
+ synchronize_irq(dev->irq);
dev->slave = NULL;
pm_runtime_put(dev->dev);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d983e0..959d4912ec0d 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
struct completion msg_done;
struct clk *sclk;
struct i2c_client *slave;
+ int irq;
};
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
writeb(0, priv->base + I2C_OFS_SVA0);
+ /*
+ * Wait for interrupt to finish. New slave irqs cannot happen because we
+ * cleared the slave address and, thus, only extension codes will be
+ * detected which do not use the slave ptr.
+ */
+ synchronize_irq(priv->irq);
priv->slave = NULL;
return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
{
struct em_i2c_device *priv;
struct resource *r;
- int irq, ret;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+ priv->irq);
return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f2956936c3f2..36e9559f880c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -77,7 +77,7 @@
* SMBus Host Notify yes
* Interrupt processing yes
*
- * See the file Documentation/i2c/busses/i2c-i801 for details.
+ * See the file Documentation/i2c/busses/i2c-i801.rst for details.
*/
#include <linux/interrupt.h>
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
int i;
status = acpi_get_object_info(obj_handle, &info);
- if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID))
+ if (ACPI_FAILURE(status))
return AE_OK;
+ if (!(info->valid & ACPI_VALID_HID))
+ goto smo88xx_not_found;
+
hid = info->hardware_id.string;
if (!hid)
- return AE_OK;
+ goto smo88xx_not_found;
i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
if (i < 0)
- return AE_OK;
+ goto smo88xx_not_found;
+
+ kfree(info);
*((bool *)return_value) = true;
return AE_CTRL_TERMINATE;
+
+smo88xx_not_found:
+ kfree(info);
+ return AE_OK;
}
static bool is_dell_system_with_lis3lv02d(void)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b1b8b938d7f4..15f6cde6452f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
}
/* Functions for DMA support */
-static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+ dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return -ENOMEM;
+ return;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return 0;
+ return;
fail_rx:
dma_release_channel(dma->chan_rx);
@@ -336,8 +336,6 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
- /* return successfully if there is no dma support */
- return ret == -ENODEV ? 0 : ret;
}
static void i2c_imx_dma_callback(void *arg)
@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
i2c_imx->adapter.name);
+ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
/* Init DMA config if supported */
- ret = i2c_imx_dma_request(i2c_imx, phy_addr);
- if (ret < 0)
- goto del_adapter;
+ i2c_imx_dma_request(i2c_imx, phy_addr);
- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
-del_adapter:
- i2c_del_adapter(&i2c_imx->adapter);
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
rpm_disable:
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 252edb433fdf..29eae1bf4f86 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
.max_num_msgs = 255,
};
+static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
static const struct mtk_i2c_compatible mt2712_compat = {
.regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
};
static const struct mtk_i2c_compatible mt8183_compat = {
+ .quirks = &mt8183_i2c_quirks,
.regs = mt_i2c_regs_v2,
.pmic_i2c = 0,
.dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+ return I2C_FUNC_I2C |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ else
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm mtk_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index cfc76b5de726..5a1235fd86bb 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
/*
* We need gpu_i2c_suspend() even if it is stub, for runtime pm to work
* correctly. Without it, lspci shows runtime pm status as "D0" for the card.
- * Documentation/power/pci.txt also insists for driver to provide this.
+ * Documentation/power/pci.rst also insists for driver to provide this.
*/
static __maybe_unused int gpu_i2c_suspend(struct device *dev)
{
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c46c4bddc7ca..cba325eb852f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -91,7 +91,7 @@
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
- switch (PIIX4_dev->device) {
- case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+ (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ PIIX4_dev->revision >= 0x1F)) {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
- break;
- case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
- default:
+ } else {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
- break;
}
} else {
if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d39a4606f72d..531c01100b56 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
enum dma_data_direction dma_direction;
struct reset_control *rstc;
+ int irq;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
WARN_ON(!priv->slave);
+ /* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
+ synchronize_irq(priv->irq);
priv->slave = NULL;
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
struct i2c_timings i2c_t;
- int irq, ret;
+ int ret;
/* Otherwise logic will break because some bytes must always use PIO */
BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
pm_runtime_put(dev);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
if (ret < 0) {
- dev_err(dev, "cannot get irq %d\n", irq);
+ dev_err(dev, "cannot get irq %d\n", priv->irq);
goto out_pm_disable;
}
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d97fb857b0ea..c98ef4c4a0c9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* fall through to the write state, as we will need to
* send a byte as well
*/
+ /* Fall through */
case STATE_WRITE:
/*
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index 868755f82f88..2c21893905a3 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* i2c-stm32.h
*
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index c82e78f57386..37347c93e8e0 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -125,7 +125,7 @@ static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/*
* Voluntarily dropping error code of kstrtou8 since all
* error code that it could return are invalid according
- * to Documentation/i2c/fault-codes.
+ * to Documentation/i2c/fault-codes.rst.
*/
if (kstrtou8(p + 1, 16, &data->byte))
return -EPROTO;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4dbbc9a35f65..9cb2aa1e20ef 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -354,17 +354,13 @@ static int i2c_acpi_find_match_adapter(struct device *dev, const void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int i2c_acpi_find_match_device(struct device *dev, const void *data)
-{
- return ACPI_COMPANION(dev) == data;
-}
-
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
i2c_acpi_find_match_adapter);
+
return dev ? i2c_verify_adapter(dev) : NULL;
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
@@ -373,8 +369,7 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev,
- i2c_acpi_find_match_device);
+ dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
return dev ? i2c_verify_client(dev) : NULL;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f26ed495d384..72b300174cb8 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
- if (!client)
+ if (IS_ERR_OR_NULL(client))
return;
if (client->dev.of_node) {
@@ -2206,7 +2206,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_warn(&adapter->dev,
"This adapter will soon drop class based instantiation of devices. "
"Please make sure client 0x%02x gets instantiated by other means. "
- "Check 'Documentation/i2c/instantiating-devices' for details.\n",
+ "Check 'Documentation/i2c/instantiating-devices.rst' for details.\n",
info.addr);
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
@@ -2236,7 +2236,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
if (adapter->class == I2C_CLASS_DEPRECATED) {
dev_dbg(&adapter->dev,
"This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. "
- "If you need it, check 'Documentation/i2c/instantiating-devices' for alternatives.\n",
+ "If you need it, check 'Documentation/i2c/instantiating-devices.rst' for alternatives.\n",
driver->driver.name);
return 0;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index d1c48dec7118..6f632d543fcc 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -113,11 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap)
of_node_put(bus);
}
-static int of_dev_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
static int of_dev_or_parent_node_match(struct device *dev, const void *data)
{
if (dev->of_node == data)
@@ -135,7 +130,7 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
struct device *dev;
struct i2c_client *client;
- dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
+ dev = bus_find_device_by_of_node(&i2c_bus_type, node);
if (!dev)
return NULL;
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index 69cc040c3a1c..9e2e1406f85e 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -201,6 +201,59 @@ struct i3c_device *dev_to_i3cdev(struct device *dev)
EXPORT_SYMBOL_GPL(dev_to_i3cdev);
/**
+ * i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
+ * @i3cdev: I3C device
+ * @id_table: I3C device match table
+ *
+ * Return: a pointer to an i3c_device_id object or NULL if there's no match.
+ */
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table)
+{
+ struct i3c_device_info devinfo;
+ const struct i3c_device_id *id;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+
+ /*
+ * The lower 32bits of the provisional ID is just filled with a random
+ * value, try to match using DCR info.
+ */
+ if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
+ u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ u16 part = I3C_PID_PART_ID(devinfo.pid);
+ u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ /* First try to match by manufacturer/part ID. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
+ I3C_MATCH_MANUF_AND_PART)
+ continue;
+
+ if (manuf != id->manuf_id || part != id->part_id)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+ ext_info != id->extra_info)
+ continue;
+
+ return id;
+ }
+ }
+
+ /* Fallback to DCR match. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_DCR) &&
+ id->dcr == devinfo.dcr)
+ return id;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_device_match_id);
+
+/**
* i3c_driver_register_with_owner() - register an I3C device driver
*
* @drv: driver to register
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d6f8b038a896..5c051dba32a5 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -123,7 +123,7 @@ static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
if (dev->type == &i3c_device_type)
return dev_to_i3cdev(dev)->desc;
- master = container_of(dev, struct i3c_master_controller, dev);
+ master = dev_to_i3cmaster(dev);
return master->this;
}
@@ -276,51 +276,6 @@ static const struct device_type i3c_device_type = {
.uevent = i3c_device_uevent,
};
-static const struct i3c_device_id *
-i3c_device_match_id(struct i3c_device *i3cdev,
- const struct i3c_device_id *id_table)
-{
- struct i3c_device_info devinfo;
- const struct i3c_device_id *id;
-
- i3c_device_get_info(i3cdev, &devinfo);
-
- /*
- * The lower 32bits of the provisional ID is just filled with a random
- * value, try to match using DCR info.
- */
- if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
- u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
- u16 part = I3C_PID_PART_ID(devinfo.pid);
- u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
-
- /* First try to match by manufacturer/part ID. */
- for (id = id_table; id->match_flags != 0; id++) {
- if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
- I3C_MATCH_MANUF_AND_PART)
- continue;
-
- if (manuf != id->manuf_id || part != id->part_id)
- continue;
-
- if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
- ext_info != id->extra_info)
- continue;
-
- return id;
- }
- }
-
- /* Fallback to DCR match. */
- for (id = id_table; id->match_flags != 0; id++) {
- if ((id->match_flags & I3C_MATCH_DCR) &&
- id->dcr == devinfo.dcr)
- return id;
- }
-
- return NULL;
-}
-
static int i3c_device_match(struct device *dev, struct device_driver *drv)
{
struct i3c_device *i3cdev;
@@ -645,6 +600,8 @@ i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
dev->common.master = master;
dev->boardinfo = boardinfo;
+ dev->addr = boardinfo->base.addr;
+ dev->lvr = boardinfo->lvr;
return dev;
}
@@ -963,8 +920,8 @@ int i3c_master_defslvs_locked(struct i3c_master_controller *master)
desc = defslvs->slaves;
i3c_bus_for_each_i2cdev(bus, i2cdev) {
- desc->lvr = i2cdev->boardinfo->lvr;
- desc->static_addr = i2cdev->boardinfo->base.addr << 1;
+ desc->lvr = i2cdev->lvr;
+ desc->static_addr = i2cdev->addr << 1;
desc++;
}
@@ -1084,8 +1041,10 @@ static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
if (ret)
goto out;
- if (dest.payload.len != sizeof(*mwl))
- return -EIO;
+ if (dest.payload.len != sizeof(*mwl)) {
+ ret = -EIO;
+ goto out;
+ }
info->max_write_len = be16_to_cpu(mwl->len);
@@ -1631,8 +1590,8 @@ static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
common.node) {
i3c_master_detach_i2c_dev(i2cdev);
i3c_bus_set_addr_slot_status(&master->bus,
- i2cdev->boardinfo->base.addr,
- I3C_ADDR_SLOT_FREE);
+ i2cdev->addr,
+ I3C_ADDR_SLOT_FREE);
i3c_master_free_i2c_dev(i2cdev);
}
}
@@ -2093,8 +2052,10 @@ static int of_populate_i3c_bus(struct i3c_master_controller *master)
for_each_available_child_of_node(i3cbus_np, node) {
ret = of_i3c_master_add_dev(master, node);
- if (ret)
+ if (ret) {
+ of_node_put(node);
return ret;
+ }
}
/*
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 09912d75c6d5..b0ff0e12d84c 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1033,12 +1033,12 @@ static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
return -ENOMEM;
data->index = pos;
- master->addrs[pos] = dev->boardinfo->base.addr;
+ master->addrs[pos] = dev->addr;
master->free_pos &= ~BIT(pos);
i2c_dev_set_master_data(dev, data);
writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
- DEV_ADDR_TABLE_STATIC_ADDR(dev->boardinfo->base.addr),
+ DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 237f24adddc6..10db0bf0655a 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -903,7 +903,8 @@ static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
u8 dyn_addr)
{
- u32 activedevs, rr;
+ unsigned long activedevs;
+ u32 rr;
int i;
if (!dyn_addr) {
@@ -913,13 +914,10 @@ static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
return ffs(master->free_rr_slots) - 1;
}
- activedevs = readl(master->regs + DEVS_CTRL) &
- DEVS_CTRL_DEVS_ACTIVE_MASK;
-
- for (i = 1; i <= master->maxdevs; i++) {
- if (!(BIT(i) & activedevs))
- continue;
+ activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ activedevs &= ~BIT(0);
+ for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
rr = readl(master->regs + DEV_ID_RR0(i));
if (!(rr & DEV_ID_RR0_IS_I3C) ||
DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
@@ -1005,9 +1003,9 @@ static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
master->free_rr_slots &= ~BIT(slot);
i2c_dev_set_master_data(dev, data);
- writel(prepare_rr0_dev_address(dev->boardinfo->base.addr),
+ writel(prepare_rr0_dev_address(dev->addr),
master->regs + DEV_ID_RR0(data->id));
- writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
+ writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
writel(readl(master->regs + DEVS_CTRL) |
DEVS_CTRL_DEV_ACTIVE(data->id),
master->regs + DEVS_CTRL);
@@ -1126,18 +1124,16 @@ static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
{
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
- u32 olddevs, newdevs;
+ unsigned long olddevs, newdevs;
int ret, slot;
u8 addrs[MAX_DEVS] = { };
u8 last_addr = 0;
olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ olddevs |= BIT(0);
/* Prepare RR slots before launching DAA. */
- for (slot = 1; slot <= master->maxdevs; slot++) {
- if (olddevs & BIT(slot))
- continue;
-
+ for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
ret = i3c_master_get_free_addr(m, last_addr + 1);
if (ret < 0)
return -ENOSPC;
@@ -1161,10 +1157,8 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
* Clear all retaining registers filled during DAA. We already
* have the addressed assigned to them in the addrs array.
*/
- for (slot = 1; slot <= master->maxdevs; slot++) {
- if (newdevs & BIT(slot))
- i3c_master_add_i3c_dev_locked(m, addrs[slot]);
- }
+ for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
+ i3c_master_add_i3c_dev_locked(m, addrs[slot]);
/*
* Clear slots that ended up not being used. Can be caused by I3C
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 9eada392df15..1c227ea8ecd3 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -567,15 +567,6 @@ config BLK_DEV_SVWKS
This driver adds PIO/(U)DMA support for the ServerWorks OSB4/CSB5
chipsets.
-config BLK_DEV_SGIIOC4
- tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support"
- depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4
- select BLK_DEV_IDEDMA_PCI
- help
- This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4
- chipset, which has one channel and can support two devices.
- Please say Y here if you have an Altix System from SGI.
-
config BLK_DEV_SIIMAGE
tristate "Silicon Image chipset support"
select BLK_DEV_IDEDMA_PCI
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 9f617a77970f..cac02db4098d 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
-obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
deleted file mode 100644
index 2d35e9f7516f..000000000000
--- a/drivers/ide/sgiioc4.c
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (C) 2008-2009 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/ioc4.h>
-#include <linux/io.h>
-#include <linux/ide.h>
-
-#define DRV_NAME "SGIIOC4"
-
-/* IOC4 Specific Definitions */
-#define IOC4_CMD_OFFSET 0x100
-#define IOC4_CTRL_OFFSET 0x120
-#define IOC4_DMA_OFFSET 0x140
-#define IOC4_INTR_OFFSET 0x0
-
-#define IOC4_TIMING 0x00
-#define IOC4_DMA_PTR_L 0x01
-#define IOC4_DMA_PTR_H 0x02
-#define IOC4_DMA_ADDR_L 0x03
-#define IOC4_DMA_ADDR_H 0x04
-#define IOC4_BC_DEV 0x05
-#define IOC4_BC_MEM 0x06
-#define IOC4_DMA_CTRL 0x07
-#define IOC4_DMA_END_ADDR 0x08
-
-/* Bits in the IOC4 Control/Status Register */
-#define IOC4_S_DMA_START 0x01
-#define IOC4_S_DMA_STOP 0x02
-#define IOC4_S_DMA_DIR 0x04
-#define IOC4_S_DMA_ACTIVE 0x08
-#define IOC4_S_DMA_ERROR 0x10
-#define IOC4_ATA_MEMERR 0x02
-
-/* Read/Write Directions */
-#define IOC4_DMA_WRITE 0x04
-#define IOC4_DMA_READ 0x00
-
-/* Interrupt Register Offsets */
-#define IOC4_INTR_REG 0x03
-#define IOC4_INTR_SET 0x05
-#define IOC4_INTR_CLEAR 0x07
-
-#define IOC4_IDE_CACHELINE_SIZE 128
-#define IOC4_CMD_CTL_BLK_SIZE 0x20
-#define IOC4_SUPPORTED_FIRMWARE_REV 46
-
-struct ioc4_dma_regs {
- u32 timing_reg0;
- u32 timing_reg1;
- u32 low_mem_ptr;
- u32 high_mem_ptr;
- u32 low_mem_addr;
- u32 high_mem_addr;
- u32 dev_byte_count;
- u32 mem_byte_count;
- u32 status;
-};
-
-/* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */
-/* IOC4 has only 1 IDE channel */
-#define IOC4_PRD_BYTES 16
-#define IOC4_PRD_ENTRIES (PAGE_SIZE / (4 * IOC4_PRD_BYTES))
-
-
-static void sgiioc4_init_hwif_ports(struct ide_hw *hw,
- unsigned long data_port,
- unsigned long ctrl_port,
- unsigned long irq_port)
-{
- unsigned long reg = data_port;
- int i;
-
- /* Registers are word (32 bit) aligned */
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = reg + i * 4;
-
- hw->io_ports.ctl_addr = ctrl_port;
- hw->io_ports.irq_addr = irq_port;
-}
-
-static int sgiioc4_checkirq(ide_hwif_t *hwif)
-{
- unsigned long intr_addr = hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
-
- if (readl((void __iomem *)intr_addr) & 0x03)
- return 1;
-
- return 0;
-}
-
-static u8 sgiioc4_read_status(ide_hwif_t *);
-
-static int sgiioc4_clearirq(ide_drive_t *drive)
-{
- u32 intr_reg;
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
-
- /* Code to check for PCI error conditions */
- intr_reg = readl((void __iomem *)other_ir);
- if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
- /*
- * Using sgiioc4_read_status to read the Status register has a
- * side effect of clearing the interrupt. The first read should
- * clear it if it is set. The second read should return
- * a "clear" status if it got cleared. If not, then spin
- * for a bit trying to clear it.
- */
- u8 stat = sgiioc4_read_status(hwif);
- int count = 0;
-
- stat = sgiioc4_read_status(hwif);
- while ((stat & ATA_BUSY) && (count++ < 100)) {
- udelay(1);
- stat = sgiioc4_read_status(hwif);
- }
-
- if (intr_reg & 0x02) {
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- /* Error when transferring DMA data on PCI bus */
- u32 pci_err_addr_low, pci_err_addr_high,
- pci_stat_cmd_reg;
-
- pci_err_addr_low =
- readl((void __iomem *)io_ports->irq_addr);
- pci_err_addr_high =
- readl((void __iomem *)(io_ports->irq_addr + 4));
- pci_read_config_dword(dev, PCI_COMMAND,
- &pci_stat_cmd_reg);
- printk(KERN_ERR "%s(%s): PCI Bus Error when doing DMA: "
- "status-cmd reg is 0x%x\n",
- __func__, drive->name, pci_stat_cmd_reg);
- printk(KERN_ERR "%s(%s): PCI Error Address is 0x%x%x\n",
- __func__, drive->name,
- pci_err_addr_high, pci_err_addr_low);
- /* Clear the PCI Error indicator */
- pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
- }
-
- /* Clear the Interrupt, Error bits on the IOC4 */
- writel(0x03, (void __iomem *)other_ir);
-
- intr_reg = readl((void __iomem *)other_ir);
- }
-
- return intr_reg & 3;
-}
-
-static void sgiioc4_dma_start(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
- unsigned int reg = readl((void __iomem *)ioc4_dma_addr);
- unsigned int temp_reg = reg | IOC4_S_DMA_START;
-
- writel(temp_reg, (void __iomem *)ioc4_dma_addr);
-}
-
-static u32 sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
-{
- unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
- u32 ioc4_dma;
- int count;
-
- count = 0;
- ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
- while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) {
- udelay(1);
- ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
- }
- return ioc4_dma;
-}
-
-/* Stops the IOC4 DMA Engine */
-static int sgiioc4_dma_end(ide_drive_t *drive)
-{
- u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
- ide_hwif_t *hwif = drive->hwif;
- unsigned long dma_base = hwif->dma_base;
- int dma_stat = 0;
- unsigned long *ending_dma = ide_get_hwifdata(hwif);
-
- writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4));
-
- ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
-
- if (ioc4_dma & IOC4_S_DMA_STOP) {
- printk(KERN_ERR
- "%s(%s): IOC4 DMA STOP bit is still 1 :"
- "ioc4_dma_reg 0x%x\n",
- __func__, drive->name, ioc4_dma);
- dma_stat = 1;
- }
-
- /*
- * The IOC4 will DMA 1's to the ending DMA area to indicate that
- * previous data DMA is complete. This is necessary because of relaxed
- * ordering between register reads and DMA writes on the Altix.
- */
- while ((cnt++ < 200) && (!valid)) {
- for (num = 0; num < 16; num++) {
- if (ending_dma[num]) {
- valid = 1;
- break;
- }
- }
- udelay(1);
- }
- if (!valid) {
- printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
- drive->name);
- dma_stat = 1;
- }
-
- bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4));
- bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4));
-
- if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) {
- if (bc_dev > bc_mem + 8) {
- printk(KERN_ERR
- "%s(%s): WARNING!! byte_count_dev %d "
- "!= byte_count_mem %d\n",
- __func__, drive->name, bc_dev, bc_mem);
- }
- }
-
- return dma_stat;
-}
-
-static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-}
-
-/* Returns 1 if DMA IRQ issued, 0 otherwise */
-static int sgiioc4_dma_test_irq(ide_drive_t *drive)
-{
- return sgiioc4_checkirq(drive->hwif);
-}
-
-static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
-{
- if (!on)
- sgiioc4_clearirq(drive);
-}
-
-static void sgiioc4_resetproc(ide_drive_t *drive)
-{
- struct ide_cmd *cmd = &drive->hwif->cmd;
-
- sgiioc4_dma_end(drive);
- ide_dma_unmap_sg(drive, cmd);
- sgiioc4_clearirq(drive);
-}
-
-static void sgiioc4_dma_lost_irq(ide_drive_t *drive)
-{
- sgiioc4_resetproc(drive);
-
- ide_dma_lost_irq(drive);
-}
-
-static u8 sgiioc4_read_status(ide_hwif_t *hwif)
-{
- unsigned long port = hwif->io_ports.status_addr;
- u8 reg = (u8) readb((void __iomem *) port);
-
- if (!(reg & ATA_BUSY)) { /* Not busy... check for interrupt */
- unsigned long other_ir = port - 0x110;
- unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
-
- /* Clear the Interrupt, Error bits on the IOC4 */
- if (intr_reg & 0x03) {
- writel(0x03, (void __iomem *) other_ir);
- intr_reg = (u32) readl((void __iomem *) other_ir);
- }
- }
-
- return reg;
-}
-
-/* Creates a DMA map for the scatter-gather list entries */
-static int ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
- int num_ports = sizeof(struct ioc4_dma_regs);
- void *pad;
-
- printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
-
- if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) {
- printk(KERN_ERR "%s(%s) -- ERROR: addresses 0x%08lx to 0x%08lx "
- "already in use\n", __func__, hwif->name,
- dma_base, dma_base + num_ports - 1);
- return -1;
- }
-
- hwif->dma_base = (unsigned long)hwif->io_ports.irq_addr +
- IOC4_DMA_OFFSET;
-
- hwif->sg_max_nents = IOC4_PRD_ENTRIES;
-
- hwif->prd_max_nents = IOC4_PRD_ENTRIES;
- hwif->prd_ent_size = IOC4_PRD_BYTES;
-
- if (ide_allocate_dma_engine(hwif))
- goto dma_pci_alloc_failure;
-
- pad = dma_alloc_coherent(&dev->dev, IOC4_IDE_CACHELINE_SIZE,
- (dma_addr_t *)&hwif->extra_base, GFP_KERNEL);
- if (pad) {
- ide_set_hwifdata(hwif, pad);
- return 0;
- }
-
- ide_release_dma_engine(hwif);
-
- printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
- __func__, hwif->name);
- printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
-
-dma_pci_alloc_failure:
- release_mem_region(dma_base, num_ports);
-
- return -1;
-}
-
-/* Initializes the IOC4 DMA Engine */
-static void sgiioc4_configure_for_dma(int dma_direction, ide_drive_t *drive)
-{
- u32 ioc4_dma;
- ide_hwif_t *hwif = drive->hwif;
- unsigned long dma_base = hwif->dma_base;
- unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4;
- u32 dma_addr, ending_dma_addr;
-
- ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
-
- if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
- printk(KERN_WARNING "%s(%s): Warning!! DMA from previous "
- "transfer was still active\n", __func__, drive->name);
- writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
- ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
-
- if (ioc4_dma & IOC4_S_DMA_STOP)
- printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is "
- "still 1\n", __func__, drive->name);
- }
-
- ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
- if (ioc4_dma & IOC4_S_DMA_ERROR) {
- printk(KERN_WARNING "%s(%s): Warning!! DMA Error during "
- "previous transfer, status 0x%x\n",
- __func__, drive->name, ioc4_dma);
- writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
- ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
-
- if (ioc4_dma & IOC4_S_DMA_STOP)
- printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is "
- "still 1\n", __func__, drive->name);
- }
-
- /* Address of the Scatter Gather List */
- dma_addr = cpu_to_le32(hwif->dmatable_dma);
- writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4));
-
- /* Address of the Ending DMA */
- memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE);
- ending_dma_addr = cpu_to_le32(hwif->extra_base);
- writel(ending_dma_addr, (void __iomem *)(dma_base +
- IOC4_DMA_END_ADDR * 4));
-
- writel(dma_direction, (void __iomem *)ioc4_dma_addr);
-}
-
-/* IOC4 Scatter Gather list Format */
-/* 128 Bit entries to support 64 bit addresses in the future */
-/* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */
-/* --------------------------------------------------------------------- */
-/* | Upper 32 bits - Zero | Lower 32 bits- address | */
-/* --------------------------------------------------------------------- */
-/* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */
-/* --------------------------------------------------------------------- */
-/* Creates the scatter gather list, DMA Table */
-
-static int sgiioc4_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- ide_hwif_t *hwif = drive->hwif;
- unsigned int *table = hwif->dmatable_cpu;
- unsigned int count = 0, i = cmd->sg_nents;
- struct scatterlist *sg = hwif->sg_table;
-
- while (i && sg_dma_len(sg)) {
- dma_addr_t cur_addr;
- int cur_len;
- cur_addr = sg_dma_address(sg);
- cur_len = sg_dma_len(sg);
-
- while (cur_len) {
- if (count++ >= IOC4_PRD_ENTRIES) {
- printk(KERN_WARNING
- "%s: DMA table too small\n",
- drive->name);
- return 0;
- } else {
- u32 bcount =
- 0x10000 - (cur_addr & 0xffff);
-
- if (bcount > cur_len)
- bcount = cur_len;
-
- /*
- * Put the address, length in
- * the IOC4 dma-table format
- */
- *table = 0x0;
- table++;
- *table = cpu_to_be32(cur_addr);
- table++;
- *table = 0x0;
- table++;
-
- *table = cpu_to_be32(bcount);
- table++;
-
- cur_addr += bcount;
- cur_len -= bcount;
- }
- }
-
- sg = sg_next(sg);
- i--;
- }
-
- if (count) {
- table--;
- *table |= cpu_to_be32(0x80000000);
- return count;
- }
-
- return 0; /* revert to PIO for this request */
-}
-
-static int sgiioc4_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
- int ddir;
- u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
-
- if (sgiioc4_build_dmatable(drive, cmd) == 0)
- /* try PIO instead of DMA */
- return 1;
-
- if (write)
- /* Writes TO the IOC4 FROM Main Memory */
- ddir = IOC4_DMA_READ;
- else
- /* Writes FROM the IOC4 TO Main Memory */
- ddir = IOC4_DMA_WRITE;
-
- sgiioc4_configure_for_dma(ddir, drive);
-
- return 0;
-}
-
-static const struct ide_tp_ops sgiioc4_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = sgiioc4_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
-static const struct ide_port_ops sgiioc4_port_ops = {
- .set_dma_mode = sgiioc4_set_dma_mode,
- /* reset DMA engine, clear IRQs */
- .resetproc = sgiioc4_resetproc,
-};
-
-static const struct ide_dma_ops sgiioc4_dma_ops = {
- .dma_host_set = sgiioc4_dma_host_set,
- .dma_setup = sgiioc4_dma_setup,
- .dma_start = sgiioc4_dma_start,
- .dma_end = sgiioc4_dma_end,
- .dma_test_irq = sgiioc4_dma_test_irq,
- .dma_lost_irq = sgiioc4_dma_lost_irq,
-};
-
-static const struct ide_port_info sgiioc4_port_info = {
- .name = DRV_NAME,
- .chipset = ide_pci,
- .init_dma = ide_dma_sgiioc4,
- .tp_ops = &sgiioc4_tp_ops,
- .port_ops = &sgiioc4_port_ops,
- .dma_ops = &sgiioc4_dma_ops,
- .host_flags = IDE_HFLAG_MMIO,
- .irq_flags = IRQF_SHARED,
- .mwdma_mask = ATA_MWDMA2_ONLY,
-};
-
-static int sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
-{
- unsigned long cmd_base, irqport;
- unsigned long bar0, cmd_phys_base, ctl;
- void __iomem *virt_base;
- struct ide_hw hw, *hws[] = { &hw };
- int rc;
-
- /* Get the CmdBlk and CtrlBlk base registers */
- bar0 = pci_resource_start(dev, 0);
- virt_base = pci_ioremap_bar(dev, 0);
- if (virt_base == NULL) {
- printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
- DRV_NAME, bar0);
- return -ENOMEM;
- }
- cmd_base = (unsigned long)virt_base + IOC4_CMD_OFFSET;
- ctl = (unsigned long)virt_base + IOC4_CTRL_OFFSET;
- irqport = (unsigned long)virt_base + IOC4_INTR_OFFSET;
-
- cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
- if (request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
- DRV_NAME) == NULL) {
- printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx "
- "already in use\n", DRV_NAME, pci_name(dev),
- cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
- rc = -EBUSY;
- goto req_mem_rgn_err;
- }
-
- /* Initialize the IO registers */
- memset(&hw, 0, sizeof(hw));
- sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
- hw.irq = dev->irq;
- hw.dev = &dev->dev;
-
- /* Initialize chipset IRQ registers */
- writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
-
- rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL);
- if (!rc)
- return 0;
-
- release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
-req_mem_rgn_err:
- iounmap(virt_base);
- return rc;
-}
-
-static unsigned int pci_init_sgiioc4(struct pci_dev *dev)
-{
- int ret;
-
- printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n",
- DRV_NAME, pci_name(dev), dev->revision);
-
- if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) {
- printk(KERN_ERR "Skipping %s IDE controller in slot %s: "
- "firmware is obsolete - please upgrade to "
- "revision46 or higher\n",
- DRV_NAME, pci_name(dev));
- ret = -EAGAIN;
- goto out;
- }
- ret = sgiioc4_ide_setup_pci_device(dev);
-out:
- return ret;
-}
-
-static int ioc4_ide_attach_one(struct ioc4_driver_data *idd)
-{
- /*
- * PCI-RT does not bring out IDE connection.
- * Do not attach to this particular IOC4.
- */
- if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
- return 0;
-
- return pci_init_sgiioc4(idd->idd_pdev);
-}
-
-static struct ioc4_submodule ioc4_ide_submodule = {
- .is_name = "IOC4_ide",
- .is_owner = THIS_MODULE,
- .is_probe = ioc4_ide_attach_one,
-};
-
-static int __init ioc4_ide_init(void)
-{
- return ioc4_register_submodule(&ioc4_ide_submodule);
-}
-
-late_initcall(ioc4_ide_init); /* Call only after IDE init is done */
-
-MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon");
-MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");
-MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index fa5ff77b8fe4..347b08b56042 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1072,26 +1072,26 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht),
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
- INTEL_CPU_FAM6(HASWELL_CORE, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_ULT, idle_cpu_hsw),
- INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw),
- INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn),
- INTEL_CPU_FAM6(BROADWELL_CORE, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw),
+ INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
+ INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
- INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw),
- INTEL_CPU_FAM6(SKYLAKE_MOBILE, idle_cpu_skl),
- INTEL_CPU_FAM6(SKYLAKE_DESKTOP, idle_cpu_skl),
- INTEL_CPU_FAM6(KABYLAKE_MOBILE, idle_cpu_skl),
- INTEL_CPU_FAM6(KABYLAKE_DESKTOP, idle_cpu_skl),
+ INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
+ INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
+ INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
+ INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl),
INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx),
INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl),
INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl),
INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv),
- INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv),
+ INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv),
{}
};
@@ -1311,7 +1311,7 @@ static void intel_idle_state_table_update(void)
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
bxt_idle_state_table_update();
break;
- case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_SKYLAKE:
sklh_idle_state_table_update();
break;
}
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 9b9656ce37e6..d4ef35aeb579 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -202,9 +202,7 @@ config HID_SENSOR_ACCEL_3D
config IIO_CROS_EC_ACCEL_LEGACY
tristate "ChromeOS EC Legacy Accelerometer Sensor"
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- select CROS_EC_LPC_REGISTER_DEVICE
+ depends on IIO_CROS_EC_SENSORS_CORE
help
Say yes here to get support for accelerometers on Chromebook using
legacy EC firmware.
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 46bb2e421bb9..39002cb5605d 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -5,13 +5,14 @@
* Copyright 2017 Google, Inc
*
* This driver uses the memory mapper cros-ec interface to communicate
- * with the Chrome OS EC about accelerometer data.
+ * with the Chrome OS EC about accelerometer data or older commands.
* Accelerometer access is presented through iio sysfs.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
@@ -25,160 +26,41 @@
#define DRV_NAME "cros-ec-accel-legacy"
+#define CROS_EC_SENSOR_LEGACY_NUM 2
/*
* Sensor scale hard coded at 10 bits per g, computed as:
* g / (2^10 - 1) = 0.009586168; with g = 9.80665 m.s^-2
*/
#define ACCEL_LEGACY_NSCALE 9586168
-/* Indices for EC sensor values. */
-enum {
- X,
- Y,
- Z,
- MAX_AXIS,
-};
-
-/* State data for cros_ec_accel_legacy iio driver. */
-struct cros_ec_accel_legacy_state {
- struct cros_ec_device *ec;
-
- /*
- * Array holding data from a single capture. 2 bytes per channel
- * for the 3 channels plus the timestamp which is always last and
- * 8-bytes aligned.
- */
- s16 capture_data[8];
- s8 sign[MAX_AXIS];
- u8 sensor_num;
-};
-
-static int ec_cmd_read_u8(struct cros_ec_device *ec, unsigned int offset,
- u8 *dest)
-{
- return ec->cmd_readmem(ec, offset, 1, dest);
-}
-
-static int ec_cmd_read_u16(struct cros_ec_device *ec, unsigned int offset,
- u16 *dest)
-{
- __le16 tmp;
- int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
-
- *dest = le16_to_cpu(tmp);
-
- return ret;
-}
-
-/**
- * read_ec_until_not_busy() - Read from EC status byte until it reads not busy.
- * @st: Pointer to state information for device.
- *
- * This function reads EC status until its busy bit gets cleared. It does not
- * wait indefinitely and returns -EIO if the EC status is still busy after a
- * few hundreds milliseconds.
- *
- * Return: 8-bit status if ok, -EIO on error
- */
-static int read_ec_until_not_busy(struct cros_ec_accel_legacy_state *st)
-{
- struct cros_ec_device *ec = st->ec;
- u8 status;
- int attempts = 0;
-
- ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
- while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
- /* Give up after enough attempts, return error. */
- if (attempts++ >= 50)
- return -EIO;
-
- /* Small delay every so often. */
- if (attempts % 5 == 0)
- msleep(25);
-
- ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
- }
-
- return status;
-}
-
-/**
- * read_ec_accel_data_unsafe() - Read acceleration data from EC shared memory.
- * @st: Pointer to state information for device.
- * @scan_mask: Bitmap of the sensor indices to scan.
- * @data: Location to store data.
- *
- * This is the unsafe function for reading the EC data. It does not guarantee
- * that the EC will not modify the data as it is being read in.
- */
-static void read_ec_accel_data_unsafe(struct cros_ec_accel_legacy_state *st,
- unsigned long scan_mask, s16 *data)
-{
- int i = 0;
- int num_enabled = bitmap_weight(&scan_mask, MAX_AXIS);
-
- /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
- while (num_enabled--) {
- i = find_next_bit(&scan_mask, MAX_AXIS, i);
- ec_cmd_read_u16(st->ec,
- EC_MEMMAP_ACC_DATA +
- sizeof(s16) *
- (1 + i + st->sensor_num * MAX_AXIS),
- data);
- *data *= st->sign[i];
- i++;
- data++;
- }
-}
-
-/**
- * read_ec_accel_data() - Read acceleration data from EC shared memory.
- * @st: Pointer to state information for device.
- * @scan_mask: Bitmap of the sensor indices to scan.
- * @data: Location to store data.
- *
- * This is the safe function for reading the EC data. It guarantees that
- * the data sampled was not modified by the EC while being read.
- *
- * Return: 0 if ok, -ve on error
- */
-static int read_ec_accel_data(struct cros_ec_accel_legacy_state *st,
- unsigned long scan_mask, s16 *data)
+static int cros_ec_accel_legacy_read_cmd(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data)
{
- u8 samp_id = 0xff;
- u8 status = 0;
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
int ret;
- int attempts = 0;
+ unsigned int i;
+ u8 sensor_num;
/*
- * Continually read all data from EC until the status byte after
- * all reads reflects that the EC is not busy and the sample id
- * matches the sample id from before all reads. This guarantees
- * that data read in was not modified by the EC while reading.
+ * Read all sensor data through a command.
+ * Save sensor_num, it is assumed to stay.
*/
- while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
- EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
- /* If we have tried to read too many times, return error. */
- if (attempts++ >= 5)
- return -EIO;
-
- /* Read status byte until EC is not busy. */
- ret = read_ec_until_not_busy(st);
- if (ret < 0)
- return ret;
- status = ret;
-
- /*
- * Store the current sample id so that we can compare to the
- * sample id after reading the data.
- */
- samp_id = status & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
-
- /* Read all EC data, format it, and store it into data. */
- read_ec_accel_data_unsafe(st, scan_mask, data);
+ sensor_num = st->param.info.sensor_num;
+ st->param.cmd = MOTIONSENSE_CMD_DUMP;
+ st->param.dump.max_sensor_count = CROS_EC_SENSOR_LEGACY_NUM;
+ ret = cros_ec_motion_send_host_cmd(st,
+ sizeof(st->resp->dump) + CROS_EC_SENSOR_LEGACY_NUM *
+ sizeof(struct ec_response_motion_sensor_data));
+ st->param.info.sensor_num = sensor_num;
+ if (ret != 0) {
+ dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
+ return ret;
+ }
- /* Read status byte. */
- ec_cmd_read_u8(st->ec, EC_MEMMAP_ACC_STATUS, &status);
+ for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+ *data = st->resp->dump.sensor[sensor_num].data[i] *
+ st->sign[i];
+ data++;
}
return 0;
@@ -188,28 +70,40 @@ static int cros_ec_accel_legacy_read(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
s16 data = 0;
- int ret = IIO_VAL_INT;
+ int ret;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->cmd_lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = read_ec_accel_data(st, (1 << chan->scan_index), &data);
- if (ret)
- return ret;
+ ret = st->read_ec_sensors_data(indio_dev, 1 << idx, &data);
+ if (ret < 0)
+ break;
+ ret = IIO_VAL_INT;
*val = data;
- return IIO_VAL_INT;
+ break;
case IIO_CHAN_INFO_SCALE:
+ WARN_ON(st->type != MOTIONSENSE_TYPE_ACCEL);
*val = 0;
*val2 = ACCEL_LEGACY_NSCALE;
- return IIO_VAL_INT_PLUS_NANO;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
/* Calibration not supported. */
*val = 0;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ break;
default:
- return -EINVAL;
+ ret = cros_ec_sensors_core_read(st, chan, val, val2,
+ mask);
+ break;
}
+ mutex_unlock(&st->cmd_lock);
+
+ return ret;
}
static int cros_ec_accel_legacy_write(struct iio_dev *indio_dev,
@@ -231,86 +125,14 @@ static const struct iio_info cros_ec_accel_legacy_info = {
.write_raw = &cros_ec_accel_legacy_write,
};
-/**
- * cros_ec_accel_legacy_capture() - The trigger handler function
- * @irq: The interrupt number.
- * @p: Private data - always a pointer to the poll func.
- *
- * On a trigger event occurring, if the pollfunc is attached then this
- * handler is called as a threaded interrupt (and hence may sleep). It
- * is responsible for grabbing data from the device and pushing it into
- * the associated buffer.
- *
- * Return: IRQ_HANDLED
+/*
+ * Present the channel using HTML5 standard:
+ * need to invert X and Y and invert some lid axis.
*/
-static irqreturn_t cros_ec_accel_legacy_capture(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
-
- /* Clear capture data. */
- memset(st->capture_data, 0, sizeof(st->capture_data));
-
- /*
- * Read data based on which channels are enabled in scan mask. Note
- * that on a capture we are always reading the calibrated data.
- */
- read_ec_accel_data(st, *indio_dev->active_scan_mask, st->capture_data);
-
- iio_push_to_buffers_with_timestamp(indio_dev, (void *)st->capture_data,
- iio_get_time_ns(indio_dev));
-
- /*
- * Tell the core we are done with this trigger and ready for the
- * next one.
- */
- iio_trigger_notify_done(indio_dev->trig);
-
- return IRQ_HANDLED;
-}
-
-static char *cros_ec_accel_legacy_loc_strings[] = {
- [MOTIONSENSE_LOC_BASE] = "base",
- [MOTIONSENSE_LOC_LID] = "lid",
- [MOTIONSENSE_LOC_MAX] = "unknown",
-};
-
-static ssize_t cros_ec_accel_legacy_loc(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- char *buf)
-{
- struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%s\n",
- cros_ec_accel_legacy_loc_strings[st->sensor_num +
- MOTIONSENSE_LOC_BASE]);
-}
-
-static ssize_t cros_ec_accel_legacy_id(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- char *buf)
-{
- struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
-
- return sprintf(buf, "%d\n", st->sensor_num);
-}
-
-static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
- {
- .name = "id",
- .shared = IIO_SHARED_BY_ALL,
- .read = cros_ec_accel_legacy_id,
- },
- {
- .name = "location",
- .shared = IIO_SHARED_BY_ALL,
- .read = cros_ec_accel_legacy_loc,
- },
- { }
-};
+#define CROS_EC_ACCEL_ROTATE_AXIS(_axis) \
+ ((_axis) == CROS_EC_SENSOR_Z ? CROS_EC_SENSOR_Z : \
+ ((_axis) == CROS_EC_SENSOR_X ? CROS_EC_SENSOR_Y : \
+ CROS_EC_SENSOR_X))
#define CROS_EC_ACCEL_LEGACY_CHAN(_axis) \
{ \
@@ -319,31 +141,30 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
.modified = 1, \
.info_mask_separate = \
BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
- .ext_info = cros_ec_accel_legacy_ext_info, \
+ .ext_info = cros_ec_sensors_ext_info, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
+ .realbits = CROS_EC_SENSOR_BITS, \
+ .storagebits = CROS_EC_SENSOR_BITS, \
}, \
+ .scan_index = CROS_EC_ACCEL_ROTATE_AXIS(_axis), \
} \
-static struct iio_chan_spec ec_accel_channels[] = {
- CROS_EC_ACCEL_LEGACY_CHAN(X),
- CROS_EC_ACCEL_LEGACY_CHAN(Y),
- CROS_EC_ACCEL_LEGACY_CHAN(Z),
- IIO_CHAN_SOFT_TIMESTAMP(MAX_AXIS)
+static const struct iio_chan_spec cros_ec_accel_legacy_channels[] = {
+ CROS_EC_ACCEL_LEGACY_CHAN(CROS_EC_SENSOR_X),
+ CROS_EC_ACCEL_LEGACY_CHAN(CROS_EC_SENSOR_Y),
+ CROS_EC_ACCEL_LEGACY_CHAN(CROS_EC_SENSOR_Z),
+ IIO_CHAN_SOFT_TIMESTAMP(CROS_EC_SENSOR_MAX_AXIS)
};
static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
- struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
struct iio_dev *indio_dev;
- struct cros_ec_accel_legacy_state *state;
+ struct cros_ec_sensors_core_state *state;
int ret;
if (!ec || !ec->ec_dev) {
@@ -351,46 +172,32 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!ec->ec_dev->cmd_readmem) {
- dev_warn(&pdev->dev, "EC does not support direct reads.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
- platform_set_drvdata(pdev, indio_dev);
- state = iio_priv(indio_dev);
- state->ec = ec->ec_dev;
- state->sensor_num = sensor_platform->sensor_num;
-
- indio_dev->dev.parent = dev;
- indio_dev->name = pdev->name;
- indio_dev->channels = ec_accel_channels;
- /*
- * Present the channel using HTML5 standard:
- * need to invert X and Y and invert some lid axis.
- */
- ec_accel_channels[X].scan_index = Y;
- ec_accel_channels[Y].scan_index = X;
- ec_accel_channels[Z].scan_index = Z;
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ if (ret)
+ return ret;
- state->sign[Y] = 1;
+ indio_dev->info = &cros_ec_accel_legacy_info;
+ state = iio_priv(indio_dev);
- if (state->sensor_num == MOTIONSENSE_LOC_LID)
- state->sign[X] = state->sign[Z] = -1;
+ if (state->ec->cmd_readmem != NULL)
+ state->read_ec_sensors_data = cros_ec_sensors_read_lpc;
else
- state->sign[X] = state->sign[Z] = 1;
-
- indio_dev->num_channels = ARRAY_SIZE(ec_accel_channels);
- indio_dev->dev.parent = &pdev->dev;
- indio_dev->info = &cros_ec_accel_legacy_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
+ state->read_ec_sensors_data = cros_ec_accel_legacy_read_cmd;
+
+ indio_dev->channels = cros_ec_accel_legacy_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cros_ec_accel_legacy_channels);
+ /* The lid sensor needs to be presented inverted. */
+ if (state->loc == MOTIONSENSE_LOC_LID) {
+ state->sign[CROS_EC_SENSOR_X] = -1;
+ state->sign[CROS_EC_SENSOR_Z] = -1;
+ }
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_accel_legacy_capture,
- NULL);
+ cros_ec_sensors_capture, NULL);
if (ret)
return ret;
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 6645771aa349..fee535d6e45b 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1486,8 +1486,8 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KIOX0008", KXCJ91008},
{"KIOX0009", KXTJ21009},
{"KIOX000A", KXCJ91008},
- {"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
- {"KIOX020A", KXCJ91008},
+ {"KIOX010A", KXCJ91008}, /* KXCJ91008 in the display of a yoga 2-in-1 */
+ {"KIOX020A", KXCJ91008}, /* KXCJ91008 in the base of a yoga 2-in-1 */
{"KXTJ1009", KXTJ21009},
{"KXJ2109", KXTJ21009},
{"SMO8500", KXCJ91008},
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 637e6e676061..3d5bea651923 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -424,7 +424,7 @@ static int mxc4005_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mxc4005_info;
- ret = iio_triggered_buffer_setup(indio_dev,
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
iio_pollfunc_store_time,
mxc4005_trigger_handler,
NULL);
@@ -452,7 +452,7 @@ static int mxc4005_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev,
"failed to init threaded irq\n");
- goto err_buffer_cleanup;
+ return ret;
}
data->dready_trig->dev.parent = &client->dev;
@@ -460,43 +460,16 @@ static int mxc4005_probe(struct i2c_client *client,
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
indio_dev->trig = data->dready_trig;
iio_trigger_get(indio_dev->trig);
- ret = iio_trigger_register(data->dready_trig);
+ ret = devm_iio_trigger_register(&client->dev,
+ data->dready_trig);
if (ret) {
dev_err(&client->dev,
"failed to register trigger\n");
- goto err_trigger_unregister;
+ return ret;
}
}
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev,
- "unable to register iio device %d\n", ret);
- goto err_buffer_cleanup;
- }
-
- return 0;
-
-err_trigger_unregister:
- iio_trigger_unregister(data->dready_trig);
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int mxc4005_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct mxc4005_data *data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
- if (data->dready_trig)
- iio_trigger_unregister(data->dready_trig);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct acpi_device_id mxc4005_acpi_match[] = {
@@ -517,7 +490,6 @@ static struct i2c_driver mxc4005_driver = {
.acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
},
.probe = mxc4005_probe,
- .remove = mxc4005_remove,
.id_table = mxc4005_id,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index a923f90f6e80..66d768d971e1 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -111,7 +111,7 @@
/* Currently unsupported */
#define SCA3000_MD_CTRL_AND_Y BIT(3)
#define SCA3000_MD_CTRL_AND_X BIT(4)
-#define SAC3000_MD_CTRL_AND_Z BIT(5)
+#define SCA3000_MD_CTRL_AND_Z BIT(5)
/*
* Some control registers of complex access methods requiring this register to
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 979ab9679b99..af09943f38c9 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -68,6 +68,7 @@ static const struct st_sensors_platform_data default_accel_pdata = {
.drdy_int_pin = 1,
};
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
int st_accel_common_probe(struct iio_dev *indio_dev);
void st_accel_common_remove(struct iio_dev *indio_dev);
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index 0205c0167cdd..9f2b40474b8e 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -29,65 +29,51 @@ int st_accel_trig_set_state(struct iio_trigger *trig, bool state)
return st_sensors_set_dataready_irq(indio_dev, state);
}
-static int st_accel_buffer_preenable(struct iio_dev *indio_dev)
-{
- return st_sensors_set_enable(indio_dev, true);
-}
-
static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *adata = iio_priv(indio_dev);
-
- adata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (adata->buffer_data == NULL) {
- err = -ENOMEM;
- goto allocate_memory_error;
- }
err = iio_triggered_buffer_postenable(indio_dev);
if (err < 0)
- goto st_accel_buffer_postenable_error;
+ return err;
err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ (u8)indio_dev->active_scan_mask[0]);
if (err < 0)
- goto st_sensors_set_axis_enable_error;
+ goto st_accel_buffer_predisable;
- return err;
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_accel_buffer_enable_all_axis;
-st_sensors_set_axis_enable_error:
+ return 0;
+
+st_accel_buffer_enable_all_axis:
+ st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+st_accel_buffer_predisable:
iio_triggered_buffer_predisable(indio_dev);
-st_accel_buffer_postenable_error:
- kfree(adata->buffer_data);
-allocate_memory_error:
return err;
}
static int st_accel_buffer_predisable(struct iio_dev *indio_dev)
{
int err, err2;
- struct st_sensor_data *adata = iio_priv(indio_dev);
-
- err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
- if (err < 0)
- goto st_accel_buffer_predisable_error;
err = st_sensors_set_enable(indio_dev, false);
if (err < 0)
- goto st_accel_buffer_predisable_error;
+ goto st_accel_buffer_predisable;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
-st_accel_buffer_predisable_error:
+st_accel_buffer_predisable:
err2 = iio_triggered_buffer_predisable(indio_dev);
if (!err)
err = err2;
- kfree(adata->buffer_data);
return err;
}
static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
- .preenable = &st_accel_buffer_preenable,
.postenable = &st_accel_buffer_postenable,
.predisable = &st_accel_buffer_predisable,
};
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index e02b79931979..2e37f8a6d8cf 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -13,7 +13,6 @@
#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -1147,32 +1146,45 @@ out:
#endif
}
+/*
+ * st_accel_get_settings() - get sensor settings from device name
+ * @name: device name buffer reference.
+ *
+ * Return: valid reference on success, NULL otherwise.
+ */
+const struct st_sensor_settings *st_accel_get_settings(const char *name)
+{
+ int index = st_sensors_get_settings_index(name,
+ st_accel_sensors_settings,
+ ARRAY_SIZE(st_accel_sensors_settings));
+ if (index < 0)
+ return NULL;
+
+ return &st_accel_sensors_settings[index];
+}
+EXPORT_SYMBOL(st_accel_get_settings);
+
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
struct st_sensors_platform_data *pdata =
(struct st_sensors_platform_data *)adata->dev->platform_data;
- int irq = adata->get_irq_data_ready(indio_dev);
struct iio_chan_spec *channels;
size_t channels_size;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &accel_info;
- mutex_init(&adata->tb.buf_lock);
err = st_sensors_power_enable(indio_dev);
if (err)
return err;
- err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_accel_sensors_settings),
- st_accel_sensors_settings);
+ err = st_sensors_verify_id(indio_dev);
if (err < 0)
goto st_accel_power_off;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
- adata->multiread_bit = adata->sensor_settings->multi_read_bit;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
channels_size = indio_dev->num_channels * sizeof(struct iio_chan_spec);
@@ -1204,7 +1216,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
if (err < 0)
goto st_accel_power_off;
- if (irq > 0) {
+ if (adata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_ACCEL_TRIGGER_OPS);
if (err < 0)
@@ -1221,7 +1233,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
return 0;
st_accel_device_register_error:
- if (irq > 0)
+ if (adata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_accel_probe_trigger_error:
st_accel_deallocate_ring(indio_dev);
@@ -1239,7 +1251,7 @@ void st_accel_common_remove(struct iio_dev *indio_dev)
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (adata->get_irq_data_ready(indio_dev) > 0)
+ if (adata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_accel_deallocate_ring(indio_dev);
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index c3c8f2e73c2a..50fa0fc32baa 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -150,22 +150,33 @@ MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
static int st_accel_i2c_probe(struct i2c_client *client)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *adata;
+ struct iio_dev *indio_dev;
const char *match;
int ret;
+ match = device_get_match_data(&client->dev);
+ if (match)
+ strlcpy(client->name, match, sizeof(client->name));
+
+ settings = st_accel_get_settings(client->name);
+ if (!settings) {
+ dev_err(&client->dev, "device name %s not recognized.\n",
+ client->name);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adata));
if (!indio_dev)
return -ENOMEM;
adata = iio_priv(indio_dev);
+ adata->sensor_settings = (struct st_sensor_settings *)settings;
- match = device_get_match_data(&client->dev);
- if (match)
- strlcpy(client->name, match, sizeof(client->name));
-
- st_sensors_i2c_configure(indio_dev, client, adata);
+ ret = st_sensors_i2c_configure(indio_dev, client);
+ if (ret < 0)
+ return ret;
ret = st_accel_common_probe(indio_dev);
if (ret < 0)
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 474742e35d92..8af7027d5598 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -102,19 +102,31 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
static int st_accel_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *adata;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
+ spi->modalias, sizeof(spi->modalias));
+
+ settings = st_accel_get_settings(spi->modalias);
+ if (!settings) {
+ dev_err(&spi->dev, "device name %s not recognized.\n",
+ spi->modalias);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adata));
if (!indio_dev)
return -ENOMEM;
adata = iio_priv(indio_dev);
+ adata->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
- spi->modalias, sizeof(spi->modalias));
- st_sensors_spi_configure(indio_dev, spi, adata);
+ err = st_sensors_spi_configure(indio_dev, spi);
+ if (err < 0)
+ return err;
err = st_accel_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7e3286265a38..f0af3a42f53c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -958,7 +958,7 @@ config TI_ADC161S626
config TI_ADS1015
tristate "Texas Instruments ADS1015 ADC"
- depends on I2C && !SENSORS_ADS1015
+ depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index aba0fd123a51..f5ba94c03a8d 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -35,6 +35,11 @@ static const unsigned int ad7606_scale_avail[2] = {
152588, 305176
};
+
+static const unsigned int ad7616_sw_scale_avail[3] = {
+ 76293, 152588, 305176
+};
+
static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
@@ -55,6 +60,29 @@ static int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
+static int ad7606_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval) {
+ ret = st->bops->reg_read(st, reg);
+ if (ret < 0)
+ goto err_unlock;
+ *readval = ret;
+ ret = 0;
+ } else {
+ ret = st->bops->reg_write(st, reg, writeval);
+ }
+err_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
static int ad7606_read_samples(struct ad7606_state *st)
{
unsigned int num = st->chip_info->num_channels;
@@ -308,29 +336,6 @@ static const struct attribute_group ad7606_attribute_group_range = {
.attrs = ad7606_attributes_range,
};
-#define AD760X_CHANNEL(num, mask) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = num, \
- .address = num, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
- .info_mask_shared_by_all = mask, \
- .scan_index = num, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 16, \
- .storagebits = 16, \
- .endianness = IIO_CPU, \
- }, \
-}
-
-#define AD7605_CHANNEL(num) \
- AD760X_CHANNEL(num, 0)
-
-#define AD7606_CHANNEL(num) \
- AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO))
-
static const struct iio_chan_spec ad7605_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(4),
AD7605_CHANNEL(0),
@@ -405,12 +410,19 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
},
+ [ID_AD7606B] = {
+ .channels = ad7606_channels,
+ .num_channels = 9,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
+ },
[ID_AD7616] = {
.channels = ad7616_channels,
.num_channels = 17,
.oversampling_avail = ad7616_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
.os_req_reset = true,
+ .init_delay_ms = 15,
},
};
@@ -519,6 +531,14 @@ static const struct iio_info ad7606_info_os_and_range = {
.validate_trigger = &ad7606_validate_trigger,
};
+static const struct iio_info ad7606_info_os_range_and_debug = {
+ .read_raw = &ad7606_read_raw,
+ .write_raw = &ad7606_write_raw,
+ .debugfs_reg_access = &ad7606_reg_access,
+ .attrs = &ad7606_attribute_group_os_and_range,
+ .validate_trigger = &ad7606_validate_trigger,
+};
+
static const struct iio_info ad7606_info_os = {
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
@@ -617,35 +637,29 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
+ /* AD7616 requires al least 15ms to reconfigure after a reset */
+ if (st->chip_info->init_delay_ms) {
+ if (msleep_interruptible(st->chip_info->init_delay_ms))
+ return -ERESTARTSYS;
+ }
+
st->write_scale = ad7606_write_scale_hw;
st->write_os = ad7606_write_os_hw;
- if (st->chip_info->sw_mode_config)
+ if (st->bops->sw_mode_config)
st->sw_mode_en = device_property_present(st->dev,
"adi,sw-mode");
if (st->sw_mode_en) {
+ /* Scale of 0.076293 is only available in sw mode */
+ st->scale_avail = ad7616_sw_scale_avail;
+ st->num_scales = ARRAY_SIZE(ad7616_sw_scale_avail);
+
/* After reset, in software mode, ±10 V is set by default */
memset32(st->range, 2, ARRAY_SIZE(st->range));
- indio_dev->info = &ad7606_info_os_and_range;
-
- /*
- * In software mode, the range gpio has no longer its function.
- * Instead, the scale can be configured individually for each
- * channel from the range registers.
- */
- if (st->chip_info->write_scale_sw)
- st->write_scale = st->chip_info->write_scale_sw;
-
- /*
- * In software mode, the oversampling is no longer configured
- * with GPIO pins. Instead, the oversampling can be configured
- * in configuratiion register.
- */
- if (st->chip_info->write_os_sw)
- st->write_os = st->chip_info->write_os_sw;
-
- ret = st->chip_info->sw_mode_config(indio_dev);
+ indio_dev->info = &ad7606_info_os_range_and_debug;
+
+ ret = st->bops->sw_mode_config(indio_dev);
if (ret < 0)
return ret;
}
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index d8a509c2c428..9350ef1f63b5 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -8,6 +8,36 @@
#ifndef IIO_ADC_AD7606_H_
#define IIO_ADC_AD7606_H_
+#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = num, \
+ .address = num, \
+ .info_mask_separate = mask_sep, \
+ .info_mask_shared_by_type = mask_type, \
+ .info_mask_shared_by_all = mask_all, \
+ .scan_index = num, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define AD7605_CHANNEL(num) \
+ AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
+ BIT(IIO_CHAN_INFO_SCALE), 0)
+
+#define AD7606_CHANNEL(num) \
+ AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO))
+
+#define AD7616_CHANNEL(num) \
+ AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),\
+ 0, BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO))
+
/**
* struct ad7606_chip_info - chip specific information
* @channels: channel specification
@@ -16,12 +46,8 @@
* oversampling ratios.
* @oversampling_num number of elements stored in oversampling_avail array
* @os_req_reset some devices require a reset to update oversampling
- * @write_scale_sw pointer to the function which writes the scale via spi
- in software mode
- * @write_os_sw pointer to the function which writes the os via spi
- in software mode
- * @sw_mode_config: pointer to a function which configured the device
- * for software mode
+ * @init_delay_ms required delay in miliseconds for initialization
+ * after a restart
*/
struct ad7606_chip_info {
const struct iio_chan_spec *channels;
@@ -29,9 +55,7 @@ struct ad7606_chip_info {
const unsigned int *oversampling_avail;
unsigned int oversampling_num;
bool os_req_reset;
- int (*write_scale_sw)(struct iio_dev *indio_dev, int ch, int val);
- int (*write_os_sw)(struct iio_dev *indio_dev, int val);
- int (*sw_mode_config)(struct iio_dev *indio_dev);
+ unsigned long init_delay_ms;
};
/**
@@ -63,6 +87,7 @@ struct ad7606_chip_info {
* @complete completion to indicate end of conversion
* @trig The IIO trigger associated with the device.
* @data buffer for reading data from the device
+ * @d16 be16 buffer for reading data from the device
*/
struct ad7606_state {
struct device *dev;
@@ -96,15 +121,32 @@ struct ad7606_state {
* 16 * 16-bit samples + 64-bit timestamp
*/
unsigned short data[20] ____cacheline_aligned;
+ __be16 d16[2];
};
/**
* struct ad7606_bus_ops - driver bus operations
* @read_block function pointer for reading blocks of data
+ * @sw_mode_config: pointer to a function which configured the device
+ * for software mode
+ * @reg_read function pointer for reading spi register
+ * @reg_write function pointer for writing spi register
+ * @write_mask function pointer for write spi register with mask
+ * @rd_wr_cmd pointer to the function which calculates the spi address
*/
struct ad7606_bus_ops {
/* more methods added in future? */
int (*read_block)(struct device *dev, int num, void *data);
+ int (*sw_mode_config)(struct iio_dev *indio_dev);
+ int (*reg_read)(struct ad7606_state *st, unsigned int addr);
+ int (*reg_write)(struct ad7606_state *st,
+ unsigned int addr,
+ unsigned int val);
+ int (*write_mask)(struct ad7606_state *st,
+ unsigned int addr,
+ unsigned long mask,
+ unsigned int val);
+ u16 (*rd_wr_cmd)(int addr, char isWriteOp);
};
int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
@@ -116,6 +158,7 @@ enum ad7606_supported_device_ids {
ID_AD7606_8,
ID_AD7606_6,
ID_AD7606_4,
+ ID_AD7606B,
ID_AD7616,
};
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 1b08028facde..f732b3ac7878 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -53,10 +53,8 @@ static int ad7606_par_probe(struct platform_device *pdev)
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index b7faef69a58f..29945ad07dca 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -15,6 +15,91 @@
#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
+#define AD7616_CONFIGURATION_REGISTER 0x02
+#define AD7616_OS_MASK GENMASK(4, 2)
+#define AD7616_BURST_MODE BIT(6)
+#define AD7616_SEQEN_MODE BIT(5)
+#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
+#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
+/*
+ * Range of channels from a group are stored in 2 registers.
+ * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
+ * For channels from second group(8-15) the order is the same, only with
+ * an offset of 2 for register address.
+ */
+#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
+/* The range of the channel is stored in 2 bits */
+#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
+#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
+
+#define AD7606_CONFIGURATION_REGISTER 0x02
+#define AD7606_SINGLE_DOUT 0x00
+
+/*
+ * Range for AD7606B channels are stored in registers starting with address 0x3.
+ * Each register stores range for 2 channels(4 bits per channel).
+ */
+#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_MODE(ch, mode) \
+ ((GENMASK(3, 0) & mode) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
+#define AD7606_OS_MODE 0x08
+
+static const struct iio_chan_spec ad7616_sw_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+ AD7616_CHANNEL(0),
+ AD7616_CHANNEL(1),
+ AD7616_CHANNEL(2),
+ AD7616_CHANNEL(3),
+ AD7616_CHANNEL(4),
+ AD7616_CHANNEL(5),
+ AD7616_CHANNEL(6),
+ AD7616_CHANNEL(7),
+ AD7616_CHANNEL(8),
+ AD7616_CHANNEL(9),
+ AD7616_CHANNEL(10),
+ AD7616_CHANNEL(11),
+ AD7616_CHANNEL(12),
+ AD7616_CHANNEL(13),
+ AD7616_CHANNEL(14),
+ AD7616_CHANNEL(15),
+};
+
+static const struct iio_chan_spec ad7606b_sw_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+ AD7616_CHANNEL(0),
+ AD7616_CHANNEL(1),
+ AD7616_CHANNEL(2),
+ AD7616_CHANNEL(3),
+ AD7616_CHANNEL(4),
+ AD7616_CHANNEL(5),
+ AD7616_CHANNEL(6),
+ AD7616_CHANNEL(7),
+};
+
+static const unsigned int ad7606B_oversampling_avail[9] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256
+};
+
+static u16 ad7616_spi_rd_wr_cmd(int addr, char isWriteOp)
+{
+ /*
+ * The address of register consist of one w/r bit
+ * 6 bits of address followed by one reserved bit.
+ */
+ return ((addr & 0x7F) << 1) | ((isWriteOp & 0x1) << 7);
+}
+
+static u16 ad7606B_spi_rd_wr_cmd(int addr, char is_write_op)
+{
+ /*
+ * The address of register consists of one bit which
+ * specifies a read command placed in bit 6, followed by
+ * 6 bits of address.
+ */
+ return (addr & 0x3F) | (((~is_write_op) & 0x1) << 6);
+}
+
static int ad7606_spi_read_block(struct device *dev,
int count, void *buf)
{
@@ -35,17 +120,210 @@ static int ad7606_spi_read_block(struct device *dev,
return 0;
}
+static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d16[0],
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = &st->d16[1],
+ .len = 2,
+ },
+ };
+ int ret;
+
+ st->d16[0] = cpu_to_be16(st->bops->rd_wr_cmd(addr, 0) << 8);
+
+ ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16[1]);
+}
+
+static int ad7606_spi_reg_write(struct ad7606_state *st,
+ unsigned int addr,
+ unsigned int val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+
+ st->d16[0] = cpu_to_be16((st->bops->rd_wr_cmd(addr, 1) << 8) |
+ (val & 0x1FF));
+
+ return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
+}
+
+static int ad7606_spi_write_mask(struct ad7606_state *st,
+ unsigned int addr,
+ unsigned long mask,
+ unsigned int val)
+{
+ int readval;
+
+ readval = st->bops->reg_read(st, addr);
+ if (readval < 0)
+ return readval;
+
+ readval &= ~mask;
+ readval |= val;
+
+ return st->bops->reg_write(st, addr, readval);
+}
+
+static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr, mode, ch_index;
+
+
+ /*
+ * Ad7616 has 16 channels divided in group A and group B.
+ * The range of channels from A are stored in registers with address 4
+ * while channels from B are stored in register with address 6.
+ * The last bit from channels determines if it is from group A or B
+ * because the order of channels in iio is 0A, 0B, 1A, 1B...
+ */
+ ch_index = ch >> 1;
+
+ ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
+
+ if ((ch & 0x1) == 0) /* channel A */
+ ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
+ else /* channel B */
+ ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
+
+ /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
+ mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
+ return st->bops->write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
+ mode);
+}
+
+static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return st->bops->write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_OS_MASK, val << 2);
+}
+
+static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_spi_write_mask(st,
+ AD7606_RANGE_CH_ADDR(ch),
+ AD7606_RANGE_CH_MSK(ch),
+ AD7606_RANGE_CH_MODE(ch, val));
+}
+
+static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_spi_reg_write(st, AD7606_OS_MODE, val);
+}
+
+static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ /*
+ * Scale can be configured individually for each channel
+ * in software mode.
+ */
+ indio_dev->channels = ad7616_sw_channels;
+
+ st->write_scale = ad7616_write_scale_sw;
+ st->write_os = &ad7616_write_os_sw;
+
+ /* Activate Burst mode and SEQEN MODE */
+ return st->bops->write_mask(st,
+ AD7616_CONFIGURATION_REGISTER,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+}
+
+static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned long os[3] = {1};
+
+ /*
+ * Software mode is enabled when all three oversampling
+ * pins are set to high. If oversampling gpios are defined
+ * in the device tree, then they need to be set to high,
+ * otherwise, they must be hardwired to VDD
+ */
+ if (st->gpio_os) {
+ gpiod_set_array_value(ARRAY_SIZE(os),
+ st->gpio_os->desc, st->gpio_os->info, os);
+ }
+ /* OS of 128 and 256 are available only in software mode */
+ st->oversampling_avail = ad7606B_oversampling_avail;
+ st->num_os_ratios = ARRAY_SIZE(ad7606B_oversampling_avail);
+
+ st->write_scale = ad7606_write_scale_sw;
+ st->write_os = &ad7606_write_os_sw;
+
+ /* Configure device spi to output on a single channel */
+ st->bops->reg_write(st,
+ AD7606_CONFIGURATION_REGISTER,
+ AD7606_SINGLE_DOUT);
+
+ /*
+ * Scale can be configured individually for each channel
+ * in software mode.
+ */
+ indio_dev->channels = ad7606b_sw_channels;
+
+ return 0;
+}
+
static const struct ad7606_bus_ops ad7606_spi_bops = {
.read_block = ad7606_spi_read_block,
};
+static const struct ad7606_bus_ops ad7616_spi_bops = {
+ .read_block = ad7606_spi_read_block,
+ .reg_read = ad7606_spi_reg_read,
+ .reg_write = ad7606_spi_reg_write,
+ .write_mask = ad7606_spi_write_mask,
+ .rd_wr_cmd = ad7616_spi_rd_wr_cmd,
+ .sw_mode_config = ad7616_sw_mode_config,
+};
+
+static const struct ad7606_bus_ops ad7606B_spi_bops = {
+ .read_block = ad7606_spi_read_block,
+ .reg_read = ad7606_spi_reg_read,
+ .reg_write = ad7606_spi_reg_write,
+ .write_mask = ad7606_spi_write_mask,
+ .rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
+ .sw_mode_config = ad7606B_sw_mode_config,
+};
+
static int ad7606_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct ad7606_bus_ops *bops;
+
+ switch (id->driver_data) {
+ case ID_AD7616:
+ bops = &ad7616_spi_bops;
+ break;
+ case ID_AD7606B:
+ bops = &ad7606B_spi_bops;
+ break;
+ default:
+ bops = &ad7606_spi_bops;
+ break;
+ }
return ad7606_probe(&spi->dev, spi->irq, NULL,
id->name, id->driver_data,
- &ad7606_spi_bops);
+ bops);
}
static const struct spi_device_id ad7606_id_table[] = {
@@ -53,6 +331,7 @@ static const struct spi_device_id ad7606_id_table[] = {
{ "ad7606-4", ID_AD7606_4 },
{ "ad7606-6", ID_AD7606_6 },
{ "ad7606-8", ID_AD7606_8 },
+ { "ad7606b", ID_AD7606B },
{ "ad7616", ID_AD7616 },
{}
};
@@ -63,6 +342,7 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-4" },
{ .compatible = "adi,ad7606-6" },
{ .compatible = "adi,ad7606-8" },
+ { .compatible = "adi,ad7606b" },
{ .compatible = "adi,ad7616" },
{ },
};
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 32f1c4a33b20..abe99856c823 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1179,10 +1179,8 @@ static int at91_adc_probe(struct platform_device *pdev)
idev->info = &at91_adc_info;
st->irq = platform_get_irq(pdev, 0);
- if (st->irq < 0) {
- dev_err(&pdev->dev, "No IRQ ID is designated\n");
+ if (st->irq < 0)
return -ENODEV;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 31d51bcc5f2c..adc9cf7a075d 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -225,10 +225,8 @@ static int axp288_adc_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->irq = platform_get_irq(pdev, 0);
- if (info->irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (info->irq < 0)
return info->irq;
- }
platform_set_drvdata(pdev, indio_dev);
info->regmap = axp20x->regmap;
/*
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index c46c0aa15376..646ebdc0a8b4 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -540,11 +540,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
}
adc_priv->irqno = platform_get_irq(pdev, 0);
- if (adc_priv->irqno <= 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
- ret = -ENODEV;
- return ret;
- }
+ if (adc_priv->irqno <= 0)
+ return -ENODEV;
ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
IPROC_ADC_AUXIN_SCAN_ENA, 0);
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index 354433996101..ae8bcc32f63d 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -337,10 +337,8 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
init_completion(&gpadc->complete);
irq = platform_get_irq_byname(pdev, "GPADC");
- if (irq < 0) {
- dev_err(dev, "Failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL, da9150_gpadc_irq,
IRQF_ONESHOT, "GPADC", gpadc);
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index 2f2b563c1162..28f3d6758eb5 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -357,11 +357,8 @@ static int envelope_detector_probe(struct platform_device *pdev)
}
env->comp_irq = platform_get_irq_byname(pdev, "comp");
- if (env->comp_irq < 0) {
- if (env->comp_irq != -EPROBE_DEFER)
- dev_err(dev, "failed to get compare interrupt\n");
+ if (env->comp_irq < 0)
return env->comp_irq;
- }
ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
0, "envelope-detector", env);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index d4c3ece21679..42a3ced11fbd 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -805,10 +805,8 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (irq < 0)
return irq;
- }
info->irq = irq;
irq = platform_get_irq(pdev, 1);
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index df19ecae52f7..fa71489195c6 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -340,7 +340,6 @@ static int mx25_gcq_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq <= 0) {
- dev_err(dev, "Failed to get IRQ\n");
ret = priv->irq;
if (!ret)
ret = -ENXIO;
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 35951c47004e..8da45bf36d36 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -456,6 +456,11 @@ err_read:
return IRQ_HANDLED;
}
+static void hi8435_triggered_event_cleanup(void *data)
+{
+ iio_triggered_event_cleanup(data);
+}
+
static int hi8435_probe(struct spi_device *spi)
{
struct iio_dev *idev;
@@ -477,7 +482,7 @@ static int hi8435_probe(struct spi_device *spi)
hi8435_writeb(priv, HI8435_CTRL_REG, 0);
} else {
udelay(5);
- gpiod_set_value(reset_gpio, 1);
+ gpiod_set_value_cansleep(reset_gpio, 1);
}
spi_set_drvdata(spi, idev);
@@ -513,27 +518,13 @@ static int hi8435_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = iio_device_register(idev);
- if (ret < 0) {
- dev_err(&spi->dev, "unable to register device\n");
- goto unregister_triggered_event;
- }
-
- return 0;
-
-unregister_triggered_event:
- iio_triggered_event_cleanup(idev);
- return ret;
-}
-
-static int hi8435_remove(struct spi_device *spi)
-{
- struct iio_dev *idev = spi_get_drvdata(spi);
-
- iio_device_unregister(idev);
- iio_triggered_event_cleanup(idev);
+ ret = devm_add_action_or_reset(&spi->dev,
+ hi8435_triggered_event_cleanup,
+ idev);
+ if (ret)
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, idev);
}
static const struct of_device_id hi8435_dt_ids[] = {
@@ -554,7 +545,6 @@ static struct spi_driver hi8435_driver = {
.of_match_table = of_match_ptr(hi8435_dt_ids),
},
.probe = hi8435_probe,
- .remove = hi8435_remove,
.id_table = hi8435_id,
};
module_spi_driver(hi8435_driver);
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 26a7bbe4d534..2a2fbf788e95 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -492,10 +492,8 @@ static int imx7d_adc_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "No irq resource?\n");
+ if (irq < 0)
return irq;
- }
info->clk = devm_clk_get(dev, "adc");
if (IS_ERR(info->clk)) {
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index 92b1d5037ac9..e234970b7150 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -11,6 +11,7 @@
#include <linux/iio/iio.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -22,8 +23,11 @@
#define JZ_ADC_REG_ADTCH 0x18
#define JZ_ADC_REG_ADBDAT 0x1c
#define JZ_ADC_REG_ADSDAT 0x20
+#define JZ_ADC_REG_ADCLK 0x28
#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
+#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
+#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
#define JZ_ADC_AUX_VREF 3300
#define JZ_ADC_AUX_VREF_BITS 12
@@ -34,6 +38,8 @@
#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+struct ingenic_adc;
+
struct ingenic_adc_soc_data {
unsigned int battery_high_vref;
unsigned int battery_high_vref_bits;
@@ -41,6 +47,7 @@ struct ingenic_adc_soc_data {
size_t battery_raw_avail_size;
const int *battery_scale_avail;
size_t battery_scale_avail_size;
+ int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
};
struct ingenic_adc {
@@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = {
JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
};
+static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
+{
+ struct clk *parent_clk;
+ unsigned long parent_rate, rate;
+ unsigned int div_main, div_10us;
+
+ parent_clk = clk_get_parent(adc->clk);
+ if (!parent_clk) {
+ dev_err(dev, "ADC clock has no parent\n");
+ return -ENODEV;
+ }
+ parent_rate = clk_get_rate(parent_clk);
+
+ /*
+ * The JZ4725B ADC works at 500 kHz to 8 MHz.
+ * We pick the highest rate possible.
+ * In practice we typically get 6 MHz, half of the 12 MHz EXT clock.
+ */
+ div_main = DIV_ROUND_UP(parent_rate, 8000000);
+ div_main = clamp(div_main, 1u, 64u);
+ rate = parent_rate / div_main;
+ if (rate < 500000 || rate > 8000000) {
+ dev_err(dev, "No valid divider for ADC main clock\n");
+ return -EINVAL;
+ }
+
+ /* We also need a divider that produces a 10us clock. */
+ div_10us = DIV_ROUND_UP(rate, 100000);
+
+ writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
+ adc->base + JZ_ADC_REG_ADCLK);
+
+ return 0;
+}
+
static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
.battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
.battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
@@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
.battery_scale_avail = jz4725b_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
+ .init_clk_div = jz4725b_adc_init_clk_div,
};
static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
@@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
.battery_scale_avail = jz4740_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
+ .init_clk_div = NULL, /* no ADCLK register on JZ4740 */
};
static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
@@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev)
return ret;
}
+ /* Set clock dividers. */
+ if (soc_data->init_clk_div) {
+ ret = soc_data->init_clk_div(dev, adc);
+ if (ret) {
+ clk_disable_unprepare(adc->clk);
+ return ret;
+ }
+ }
+
/* Put hardware in a known passive state. */
writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index a6ee1c3a9064..b896f7ff4572 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -172,10 +172,8 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "failed getting interrupt resource\n");
+ if (irq <= 0)
return -ENXIO;
- }
retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
LPC32XXAD_NAME, st);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index da84adfdb819..214883458582 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -427,8 +427,9 @@ static int max1027_probe(struct spi_device *spi)
return -ENOMEM;
}
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- &max1027_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &max1027_trigger_handler, NULL);
if (ret < 0) {
dev_err(&indio_dev->dev, "Failed to setup buffer\n");
return ret;
@@ -439,7 +440,7 @@ static int max1027_probe(struct spi_device *spi)
if (st->trig == NULL) {
ret = -ENOMEM;
dev_err(&indio_dev->dev, "Failed to allocate iio trigger\n");
- goto fail_trigger_alloc;
+ return ret;
}
st->trig->ops = &max1027_trigger_ops;
@@ -454,7 +455,7 @@ static int max1027_probe(struct spi_device *spi)
spi->dev.driver->name, st->trig);
if (ret < 0) {
dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
- goto fail_dev_register;
+ return ret;
}
/* Disable averaging */
@@ -462,34 +463,10 @@ static int max1027_probe(struct spi_device *spi)
ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
dev_err(&indio_dev->dev, "Failed to configure averaging register\n");
- goto fail_dev_register;
- }
-
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to register iio device\n");
- goto fail_dev_register;
+ return ret;
}
- return 0;
-
-fail_dev_register:
-fail_trigger_alloc:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int max1027_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- pr_debug("%s: remove(spi = 0x%p)\n", __func__, spi);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver max1027_driver = {
@@ -498,7 +475,6 @@ static struct spi_driver max1027_driver = {
.of_match_table = of_match_ptr(max1027_adc_dt_ids),
},
.probe = max1027_probe,
- .remove = max1027_remove,
.id_table = max1027_id,
};
module_spi_driver(max1027_driver);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 917223d5ff5b..da073d72f649 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -83,7 +83,7 @@
#define MAX9611_TEMP_MAX_POS 0x7f80
#define MAX9611_TEMP_MAX_NEG 0xff80
#define MAX9611_TEMP_MIN_NEG 0xd980
-#define MAX9611_TEMP_MASK GENMASK(7, 15)
+#define MAX9611_TEMP_MASK GENMASK(15, 7)
#define MAX9611_TEMP_SHIFT 0x07
#define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
#define MAX9611_TEMP_SCALE_NUM 1000000
@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
if (ret)
return ret;
- regval = ret & MAX9611_TEMP_MASK;
+ regval &= MAX9611_TEMP_MASK;
if ((regval > MAX9611_TEMP_MAX_POS &&
regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 193b3b81de4d..910f3585fa54 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -225,7 +225,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
goto err_disable_clk;
}
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 2d685730f867..c37f201294b2 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Only %i channels supported with %pOFn, but reg = <%i>.\n",
num_channels, child, reg);
- return ret;
+ return -EINVAL;
}
}
@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Channel %i uses different ADC mode than the rest.\n",
reg);
- return ret;
+ return -EINVAL;
}
/* Channel is valid, grab the regulator. */
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index dd8299831e09..582ba047c4a6 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -244,10 +244,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
init_completion(&info->completion);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, rockchip_saradc_isr,
0, dev_name(&pdev->dev), info);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index f7f7a18904b4..a6c046575ec3 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -3,7 +3,6 @@
#include <linux/hwspinlock.h>
#include <linux/iio/iio.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
@@ -46,14 +45,18 @@
/* Bits definitions for SC27XX_ADC_INT_CLR registers */
#define SC27XX_ADC_IRQ_CLR BIT(0)
+/* Bits definitions for SC27XX_ADC_INT_RAW registers */
+#define SC27XX_ADC_IRQ_RAW BIT(0)
+
/* Mask definition for SC27XX_ADC_DATA register */
#define SC27XX_ADC_DATA_MASK GENMASK(11, 0)
/* Timeout (ms) for the trylock of hardware spinlocks */
#define SC27XX_ADC_HWLOCK_TIMEOUT 5000
-/* Timeout (ms) for ADC data conversion according to ADC datasheet */
-#define SC27XX_ADC_RDY_TIMEOUT 100
+/* Timeout (us) for ADC data conversion according to ADC datasheet */
+#define SC27XX_ADC_RDY_TIMEOUT 1000000
+#define SC27XX_ADC_POLL_RAW_STATUS 500
/* Maximum ADC channel number */
#define SC27XX_ADC_CHANNEL_MAX 32
@@ -72,10 +75,8 @@ struct sc27xx_adc_data {
* subsystems which will access the unique ADC controller.
*/
struct hwspinlock *hwlock;
- struct completion completion;
int channel_scale[SC27XX_ADC_CHANNEL_MAX];
u32 base;
- int value;
int irq;
};
@@ -188,9 +189,7 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
int scale, int *val)
{
int ret;
- u32 tmp;
-
- reinit_completion(&data->completion);
+ u32 tmp, value, status;
ret = hwspin_lock_timeout_raw(data->hwlock, SC27XX_ADC_HWLOCK_TIMEOUT);
if (ret) {
@@ -203,6 +202,11 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
if (ret)
goto unlock_adc;
+ ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
+ SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
+ if (ret)
+ goto disable_adc;
+
/* Configure the channel id and scale */
tmp = (scale << SC27XX_ADC_SCALE_SHIFT) & SC27XX_ADC_SCALE_MASK;
tmp |= channel & SC27XX_ADC_CHN_ID_MASK;
@@ -226,15 +230,22 @@ static int sc27xx_adc_read(struct sc27xx_adc_data *data, int channel,
if (ret)
goto disable_adc;
- ret = wait_for_completion_timeout(&data->completion,
- msecs_to_jiffies(SC27XX_ADC_RDY_TIMEOUT));
- if (!ret) {
- dev_err(data->dev, "read ADC data timeout\n");
- ret = -ETIMEDOUT;
- } else {
- ret = 0;
+ ret = regmap_read_poll_timeout(data->regmap,
+ data->base + SC27XX_ADC_INT_RAW,
+ status, (status & SC27XX_ADC_IRQ_RAW),
+ SC27XX_ADC_POLL_RAW_STATUS,
+ SC27XX_ADC_RDY_TIMEOUT);
+ if (ret) {
+ dev_err(data->dev, "read adc timeout, status = 0x%x\n", status);
+ goto disable_adc;
}
+ ret = regmap_read(data->regmap, data->base + SC27XX_ADC_DATA, &value);
+ if (ret)
+ goto disable_adc;
+
+ value &= SC27XX_ADC_DATA_MASK;
+
disable_adc:
regmap_update_bits(data->regmap, data->base + SC27XX_ADC_CTL,
SC27XX_ADC_EN, 0);
@@ -242,32 +253,11 @@ unlock_adc:
hwspin_unlock_raw(data->hwlock);
if (!ret)
- *val = data->value;
+ *val = value;
return ret;
}
-static irqreturn_t sc27xx_adc_isr(int irq, void *dev_id)
-{
- struct sc27xx_adc_data *data = dev_id;
- int ret;
-
- ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_CLR,
- SC27XX_ADC_IRQ_CLR, SC27XX_ADC_IRQ_CLR);
- if (ret)
- return IRQ_RETVAL(ret);
-
- ret = regmap_read(data->regmap, data->base + SC27XX_ADC_DATA,
- &data->value);
- if (ret)
- return IRQ_RETVAL(ret);
-
- data->value &= SC27XX_ADC_DATA_MASK;
- complete(&data->completion);
-
- return IRQ_HANDLED;
-}
-
static void sc27xx_adc_volt_ratio(struct sc27xx_adc_data *data,
int channel, int scale,
u32 *div_numerator, u32 *div_denominator)
@@ -454,11 +444,6 @@ static int sc27xx_adc_enable(struct sc27xx_adc_data *data)
if (ret)
goto disable_adc;
- ret = regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
- SC27XX_ADC_IRQ_EN, SC27XX_ADC_IRQ_EN);
- if (ret)
- goto disable_clk;
-
/* ADC channel scales' calibration from nvmem device */
ret = sc27xx_adc_scale_calibration(data, true);
if (ret)
@@ -484,9 +469,6 @@ static void sc27xx_adc_disable(void *_data)
{
struct sc27xx_adc_data *data = _data;
- regmap_update_bits(data->regmap, data->base + SC27XX_ADC_INT_EN,
- SC27XX_ADC_IRQ_EN, 0);
-
/* Disable ADC work clock and controller clock */
regmap_update_bits(data->regmap, SC27XX_ARM_CLK_EN,
SC27XX_CLK_ADC_EN | SC27XX_CLK_ADC_CLK_EN, 0);
@@ -504,88 +486,76 @@ static void sc27xx_adc_free_hwlock(void *_data)
static int sc27xx_adc_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct sc27xx_adc_data *sc27xx_data;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*sc27xx_data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*sc27xx_data));
if (!indio_dev)
return -ENOMEM;
sc27xx_data = iio_priv(indio_dev);
- sc27xx_data->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ sc27xx_data->regmap = dev_get_regmap(dev->parent, NULL);
if (!sc27xx_data->regmap) {
- dev_err(&pdev->dev, "failed to get ADC regmap\n");
+ dev_err(dev, "failed to get ADC regmap\n");
return -ENODEV;
}
ret = of_property_read_u32(np, "reg", &sc27xx_data->base);
if (ret) {
- dev_err(&pdev->dev, "failed to get ADC base address\n");
+ dev_err(dev, "failed to get ADC base address\n");
return ret;
}
sc27xx_data->irq = platform_get_irq(pdev, 0);
- if (sc27xx_data->irq < 0) {
- dev_err(&pdev->dev, "failed to get ADC irq number\n");
+ if (sc27xx_data->irq < 0)
return sc27xx_data->irq;
- }
ret = of_hwspin_lock_get_id(np, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ dev_err(dev, "failed to get hwspinlock id\n");
return ret;
}
sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
if (!sc27xx_data->hwlock) {
- dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ dev_err(dev, "failed to request hwspinlock\n");
return -ENXIO;
}
- ret = devm_add_action(&pdev->dev, sc27xx_adc_free_hwlock,
+ ret = devm_add_action_or_reset(dev, sc27xx_adc_free_hwlock,
sc27xx_data->hwlock);
if (ret) {
- sc27xx_adc_free_hwlock(sc27xx_data->hwlock);
- dev_err(&pdev->dev, "failed to add hwspinlock action\n");
+ dev_err(dev, "failed to add hwspinlock action\n");
return ret;
}
- init_completion(&sc27xx_data->completion);
- sc27xx_data->dev = &pdev->dev;
+ sc27xx_data->dev = dev;
ret = sc27xx_adc_enable(sc27xx_data);
if (ret) {
- dev_err(&pdev->dev, "failed to enable ADC module\n");
- return ret;
- }
-
- ret = devm_add_action(&pdev->dev, sc27xx_adc_disable, sc27xx_data);
- if (ret) {
- sc27xx_adc_disable(sc27xx_data);
- dev_err(&pdev->dev, "failed to add ADC disable action\n");
+ dev_err(dev, "failed to enable ADC module\n");
return ret;
}
- ret = devm_request_threaded_irq(&pdev->dev, sc27xx_data->irq, NULL,
- sc27xx_adc_isr, IRQF_ONESHOT,
- pdev->name, sc27xx_data);
+ ret = devm_add_action_or_reset(dev, sc27xx_adc_disable, sc27xx_data);
if (ret) {
- dev_err(&pdev->dev, "failed to request ADC irq\n");
+ dev_err(dev, "failed to add ADC disable action\n");
return ret;
}
- indio_dev->dev.parent = &pdev->dev;
- indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->name = dev_name(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &sc27xx_info;
indio_dev->channels = sc27xx_channels;
indio_dev->num_channels = ARRAY_SIZE(sc27xx_channels);
- ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
- dev_err(&pdev->dev, "could not register iio (ADC)");
+ dev_err(dev, "could not register iio (ADC)");
return ret;
}
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index a33d0a4cc088..592b97c464da 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -301,7 +301,6 @@ static int spear_adc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
goto errout2;
}
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 1f7ce5186dfc..9b85fefc0a96 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -14,9 +14,11 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdesc.h>
#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -51,6 +53,17 @@
#define STM32_ADC_CORE_SLEEP_DELAY_MS 2000
+/* SYSCFG registers */
+#define STM32MP1_SYSCFG_PMCSETR 0x04
+#define STM32MP1_SYSCFG_PMCCLRR 0x44
+
+/* SYSCFG bit fields */
+#define STM32MP1_SYSCFG_ANASWVDD_MASK BIT(9)
+
+/* SYSCFG capability flags */
+#define HAS_VBOOSTER BIT(0)
+#define HAS_ANASWVDD BIT(1)
+
/**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
* @csr: common status register offset
@@ -74,11 +87,13 @@ struct stm32_adc_priv;
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
+ * @has_syscfg: SYSCFG capability flags
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
+ unsigned int has_syscfg;
};
/**
@@ -87,22 +102,32 @@ struct stm32_adc_priv_cfg {
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
+ * @booster: booster supply reference
+ * @vdd: vdd supply reference
* @vdda: vdda analog supply reference
* @vref: regulator reference
+ * @vdd_uv: vdd supply voltage (microvolts)
+ * @vdda_uv: vdda supply voltage (microvolts)
* @cfg: compatible configuration data
* @common: common data for all ADC instances
* @ccr_bak: backup CCR in low power mode
+ * @syscfg: reference to syscon, system control registers
*/
struct stm32_adc_priv {
int irq[STM32_ADC_MAX_ADCS];
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
+ struct regulator *booster;
+ struct regulator *vdd;
struct regulator *vdda;
struct regulator *vref;
+ int vdd_uv;
+ int vdda_uv;
const struct stm32_adc_priv_cfg *cfg;
struct stm32_adc_common common;
u32 ccr_bak;
+ struct regmap *syscfg;
};
static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
@@ -349,7 +374,6 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
*/
if (i && priv->irq[i] == -ENXIO)
continue;
- dev_err(&pdev->dev, "failed to get irq\n");
return priv->irq[i];
}
@@ -390,6 +414,82 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
}
}
+static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv,
+ struct device *dev)
+{
+ int ret;
+
+ /*
+ * On STM32H7 and STM32MP1, the ADC inputs are multiplexed with analog
+ * switches (via PCSEL) which have reduced performances when their
+ * supply is below 2.7V (vdda by default):
+ * - Voltage booster can be used, to get full ADC performances
+ * (increases power consumption).
+ * - Vdd can be used to supply them, if above 2.7V (STM32MP1 only).
+ *
+ * Recommended settings for ANASWVDD and EN_BOOSTER:
+ * - vdda < 2.7V but vdd > 2.7V: ANASWVDD = 1, EN_BOOSTER = 0 (stm32mp1)
+ * - vdda < 2.7V and vdd < 2.7V: ANASWVDD = 0, EN_BOOSTER = 1
+ * - vdda >= 2.7V: ANASWVDD = 0, EN_BOOSTER = 0 (default)
+ */
+ if (priv->vdda_uv < 2700000) {
+ if (priv->syscfg && priv->vdd_uv > 2700000) {
+ ret = regulator_enable(priv->vdd);
+ if (ret < 0) {
+ dev_err(dev, "vdd enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(priv->syscfg,
+ STM32MP1_SYSCFG_PMCSETR,
+ STM32MP1_SYSCFG_ANASWVDD_MASK);
+ if (ret < 0) {
+ regulator_disable(priv->vdd);
+ dev_err(dev, "vdd select failed, %d\n", ret);
+ return ret;
+ }
+ dev_dbg(dev, "analog switches supplied by vdd\n");
+
+ return 0;
+ }
+
+ if (priv->booster) {
+ /*
+ * This is optional, as this is a trade-off between
+ * analog performance and power consumption.
+ */
+ ret = regulator_enable(priv->booster);
+ if (ret < 0) {
+ dev_err(dev, "booster enable failed %d\n", ret);
+ return ret;
+ }
+ dev_dbg(dev, "analog switches supplied by booster\n");
+
+ return 0;
+ }
+ }
+
+ /* Fallback using vdda (default), nothing to do */
+ dev_dbg(dev, "analog switches supplied by vdda (%d uV)\n",
+ priv->vdda_uv);
+
+ return 0;
+}
+
+static void stm32_adc_core_switches_supply_dis(struct stm32_adc_priv *priv)
+{
+ if (priv->vdda_uv < 2700000) {
+ if (priv->syscfg && priv->vdd_uv > 2700000) {
+ regmap_write(priv->syscfg, STM32MP1_SYSCFG_PMCCLRR,
+ STM32MP1_SYSCFG_ANASWVDD_MASK);
+ regulator_disable(priv->vdd);
+ return;
+ }
+ if (priv->booster)
+ regulator_disable(priv->booster);
+ }
+}
+
static int stm32_adc_core_hw_start(struct device *dev)
{
struct stm32_adc_common *common = dev_get_drvdata(dev);
@@ -402,10 +502,21 @@ static int stm32_adc_core_hw_start(struct device *dev)
return ret;
}
+ ret = regulator_get_voltage(priv->vdda);
+ if (ret < 0) {
+ dev_err(dev, "vdda get voltage failed, %d\n", ret);
+ goto err_vdda_disable;
+ }
+ priv->vdda_uv = ret;
+
+ ret = stm32_adc_core_switches_supply_en(priv, dev);
+ if (ret < 0)
+ goto err_vdda_disable;
+
ret = regulator_enable(priv->vref);
if (ret < 0) {
dev_err(dev, "vref enable failed\n");
- goto err_vdda_disable;
+ goto err_switches_dis;
}
if (priv->bclk) {
@@ -433,6 +544,8 @@ err_bclk_disable:
clk_disable_unprepare(priv->bclk);
err_regulator_disable:
regulator_disable(priv->vref);
+err_switches_dis:
+ stm32_adc_core_switches_supply_dis(priv);
err_vdda_disable:
regulator_disable(priv->vdda);
@@ -451,9 +564,80 @@ static void stm32_adc_core_hw_stop(struct device *dev)
if (priv->bclk)
clk_disable_unprepare(priv->bclk);
regulator_disable(priv->vref);
+ stm32_adc_core_switches_supply_dis(priv);
regulator_disable(priv->vdda);
}
+static int stm32_adc_core_switches_probe(struct device *dev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /* Analog switches supply can be controlled by syscfg (optional) */
+ priv->syscfg = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(priv->syscfg)) {
+ ret = PTR_ERR(priv->syscfg);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Can't probe syscfg: %d\n", ret);
+ return ret;
+ }
+ priv->syscfg = NULL;
+ }
+
+ /* Booster can be used to supply analog switches (optional) */
+ if (priv->cfg->has_syscfg & HAS_VBOOSTER &&
+ of_property_read_bool(np, "booster-supply")) {
+ priv->booster = devm_regulator_get_optional(dev, "booster");
+ if (IS_ERR(priv->booster)) {
+ ret = PTR_ERR(priv->booster);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "can't get booster %d\n",
+ ret);
+ return ret;
+ }
+ priv->booster = NULL;
+ }
+ }
+
+ /* Vdd can be used to supply analog switches (optional) */
+ if (priv->cfg->has_syscfg & HAS_ANASWVDD &&
+ of_property_read_bool(np, "vdd-supply")) {
+ priv->vdd = devm_regulator_get_optional(dev, "vdd");
+ if (IS_ERR(priv->vdd)) {
+ ret = PTR_ERR(priv->vdd);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "can't get vdd %d\n", ret);
+ return ret;
+ }
+ priv->vdd = NULL;
+ }
+ }
+
+ if (priv->vdd) {
+ ret = regulator_enable(priv->vdd);
+ if (ret < 0) {
+ dev_err(dev, "vdd enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(priv->vdd);
+ if (ret < 0) {
+ dev_err(dev, "vdd get voltage failed %d\n", ret);
+ regulator_disable(priv->vdd);
+ return ret;
+ }
+ priv->vdd_uv = ret;
+
+ regulator_disable(priv->vdd);
+ }
+
+ return 0;
+}
+
static int stm32_adc_probe(struct platform_device *pdev)
{
struct stm32_adc_priv *priv;
@@ -514,6 +698,10 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->bclk = NULL;
}
+ ret = stm32_adc_core_switches_probe(dev, priv);
+ if (ret)
+ return ret;
+
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_set_autosuspend_delay(dev, STM32_ADC_CORE_SLEEP_DELAY_MS);
@@ -611,12 +799,14 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.regs = &stm32h7_adc_common_regs,
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
+ .has_syscfg = HAS_VBOOSTER,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.regs = &stm32h7_adc_common_regs,
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 40000000,
+ .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 205e1699f954..6a7dd08b1e0b 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1919,10 +1919,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
}
adc->irq = platform_get_irq(pdev, 0);
- if (adc->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ if (adc->irq < 0)
return adc->irq;
- }
ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
0, pdev->name, adc);
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index ee1e0569d0e1..e493242c266e 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1601,11 +1601,8 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
* So IRQ associated to filter instance 0 is dedicated to the Filter 0.
*/
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "Failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
0, pdev->name, adc);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index f13c6248a662..176e1cb4abb1 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -460,10 +460,8 @@ static int sun4i_irq_init(struct platform_device *pdev, const char *name,
atomic_set(atomic, 1);
ret = platform_get_irq_byname(pdev, name);
- if (ret < 0) {
- dev_err(&pdev->dev, "no %s interrupt registered\n", name);
+ if (ret < 0)
return ret;
- }
ret = regmap_irq_get_virq(mfd_dev->regmap_irqc, ret);
if (ret < 0) {
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index 2fa6ec83bb13..f24148bd15de 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -905,10 +905,8 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL,
twl6030_gpadc_irq_handler,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 41d3621c4787..98b30475bbc6 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -821,10 +821,8 @@ static int vf610_adc_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(info->dev, irq,
vf610_adc_isr, 0,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 17af4e0fd5f8..6a4919cd83c3 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -63,10 +63,35 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
/* Save values */
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
- st->core.calib[i] =
+ st->core.calib[i].offset =
st->core.resp->sensor_offset.offset[i];
ret = IIO_VAL_INT;
- *val = st->core.calib[idx];
+ *val = st->core.calib[idx].offset;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_SCALE;
+ st->core.param.sensor_offset.flags = 0;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret == -EPROTO) {
+ /* Reading calibscale is not supported on older EC. */
+ *val = 1;
+ *val2 = 0;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ } else if (ret) {
+ break;
+ }
+
+ /* Save values */
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.calib[i].scale =
+ st->core.resp->sensor_scale.scale[i];
+
+ *val = st->core.calib[idx].scale >> 15;
+ *val2 = ((st->core.calib[idx].scale & 0x7FFF) * 1000000LL) /
+ MOTION_SENSE_DEFAULT_SCALE;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
@@ -134,7 +159,7 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
- st->core.calib[idx] = val;
+ st->core.calib[idx].offset = val;
/* Send to EC for each axis, even if not complete */
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
@@ -142,12 +167,27 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
MOTION_SENSE_SET_OFFSET;
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
st->core.param.sensor_offset.offset[i] =
- st->core.calib[i];
+ st->core.calib[i].offset;
st->core.param.sensor_offset.temp =
EC_MOTION_SENSE_INVALID_CALIB_TEMP;
ret = cros_ec_motion_send_host_cmd(&st->core, 0);
break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ st->core.calib[idx].scale = val;
+ /* Send to EC for each axis, even if not complete */
+
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_SCALE;
+ st->core.param.sensor_offset.flags =
+ MOTION_SENSE_SET_OFFSET;
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ st->core.param.sensor_scale.scale[i] =
+ st->core.calib[i].scale;
+ st->core.param.sensor_scale.temp =
+ EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ break;
case IIO_CHAN_INFO_SCALE:
if (st->core.type == MOTIONSENSE_TYPE_MAG) {
ret = -EINVAL;
@@ -175,6 +215,7 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
static const struct iio_info ec_sensors_info = {
.read_raw = &cros_ec_sensors_read,
.write_raw = &cros_ec_sensors_write,
+ .read_avail = &cros_ec_sensors_core_read_avail,
};
static int cros_ec_sensors_probe(struct platform_device *pdev)
@@ -206,11 +247,14 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
/* Common part */
channel->info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBBIAS);
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE);
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_FREQUENCY) |
BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ channel->info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
channel->scan_index = i;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 130362ca421b..d44ae126f457 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -25,6 +25,62 @@ static char *cros_ec_loc[] = {
[MOTIONSENSE_LOC_MAX] = "unknown",
};
+static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
+ u16 cmd_offset, u16 cmd, u32 *mask)
+{
+ int ret;
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_params_get_cmd_versions params;
+ struct ec_response_get_cmd_versions resp;
+ };
+ } __packed buf = {
+ .msg = {
+ .command = EC_CMD_GET_CMD_VERSIONS + cmd_offset,
+ .insize = sizeof(struct ec_response_get_cmd_versions),
+ .outsize = sizeof(struct ec_params_get_cmd_versions)
+ },
+ .params = {.cmd = cmd}
+ };
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
+ if (ret >= 0)
+ *mask = buf.resp.version_mask;
+ return ret;
+}
+
+static void get_default_min_max_freq(enum motionsensor_type type,
+ u32 *min_freq,
+ u32 *max_freq)
+{
+ switch (type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ case MOTIONSENSE_TYPE_GYRO:
+ *min_freq = 12500;
+ *max_freq = 100000;
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ *min_freq = 5000;
+ *max_freq = 25000;
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ case MOTIONSENSE_TYPE_LIGHT:
+ *min_freq = 100;
+ *max_freq = 50000;
+ break;
+ case MOTIONSENSE_TYPE_BARO:
+ *min_freq = 250;
+ *max_freq = 20000;
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ default:
+ *min_freq = 0;
+ *max_freq = 0;
+ break;
+ }
+}
+
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
bool physical_device)
@@ -33,6 +89,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+ u32 ver_mask;
+ int ret, i;
platform_set_drvdata(pdev, indio_dev);
@@ -47,8 +105,15 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
mutex_init(&state->cmd_lock);
+ ret = cros_ec_get_host_cmd_version_mask(state->ec,
+ ec->cmd_offset,
+ EC_CMD_MOTION_SENSE_CMD,
+ &ver_mask);
+ if (ret < 0)
+ return ret;
+
/* Set up the host command structure. */
- state->msg->version = 2;
+ state->msg->version = fls(ver_mask) - 1;
state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
state->msg->outsize = sizeof(struct ec_params_motion_sense);
@@ -60,12 +125,32 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
state->param.cmd = MOTIONSENSE_CMD_INFO;
state->param.info.sensor_num = sensor_platform->sensor_num;
- if (cros_ec_motion_send_host_cmd(state, 0)) {
+ ret = cros_ec_motion_send_host_cmd(state, 0);
+ if (ret) {
dev_warn(dev, "Can not access sensor info\n");
- return -EIO;
+ return ret;
}
state->type = state->resp->info.type;
state->loc = state->resp->info.location;
+
+ /* Set sign vector, only used for backward compatibility. */
+ memset(state->sign, 1, CROS_EC_SENSOR_MAX_AXIS);
+
+ for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+ state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE;
+
+ /* 0 is a correct value used to stop the device */
+ state->frequencies[0] = 0;
+ if (state->msg->version < 3) {
+ get_default_min_max_freq(state->resp->info.type,
+ &state->frequencies[1],
+ &state->frequencies[2]);
+ } else {
+ state->frequencies[1] =
+ state->resp->info_3.min_frequency;
+ state->frequencies[2] =
+ state->resp->info_3.max_frequency;
+ }
}
return 0;
@@ -86,7 +171,7 @@ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
if (ret < 0)
- return -EIO;
+ return ret;
if (ret &&
state->resp != (struct ec_response_motion_sense *)state->msg->data)
@@ -118,7 +203,7 @@ static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
} else {
/* Save values */
for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
- st->calib[i] = st->resp->perform_calib.offset[i];
+ st->calib[i].offset = st->resp->perform_calib.offset[i];
}
mutex_unlock(&st->cmd_lock);
@@ -268,6 +353,7 @@ static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
+ *data *= st->sign[i];
data++;
}
@@ -396,7 +482,7 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret = IIO_VAL_INT;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -404,22 +490,27 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
st->param.ec_rate.data =
EC_MOTION_SENSE_NO_VALUE;
- if (cros_ec_motion_send_host_cmd(st, 0))
- ret = -EIO;
- else
- *val = st->resp->ec_rate.ret;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret)
+ break;
+
+ *val = st->resp->ec_rate.ret;
+ ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_FREQUENCY:
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
st->param.sensor_odr.data =
EC_MOTION_SENSE_NO_VALUE;
- if (cros_ec_motion_send_host_cmd(st, 0))
- ret = -EIO;
- else
- *val = st->resp->sensor_odr.ret;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret)
+ break;
+
+ *val = st->resp->sensor_odr.ret;
+ ret = IIO_VAL_INT;
break;
default:
+ ret = -EINVAL;
break;
}
@@ -427,11 +518,32 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long mask)
+{
+ struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *length = ARRAY_SIZE(state->frequencies);
+ *vals = (const int *)&state->frequencies;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail);
+
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- int ret = 0;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_FREQUENCY:
@@ -441,17 +553,16 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
/* Always roundup, so caller gets at least what it asks for. */
st->param.sensor_odr.roundup = 1;
- if (cros_ec_motion_send_host_cmd(st, 0))
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
st->param.ec_rate.data = val;
- if (cros_ec_motion_send_host_cmd(st, 0))
- ret = -EIO;
- else
- st->curr_sampl_freq = val;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret)
+ break;
+ st->curr_sampl_freq = val;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index a8a3fe428d8d..442ff787f7af 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -8,11 +8,16 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/time.h>
+
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#define HZ_PER_MHZ 1000000L
+
static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
@@ -68,16 +73,6 @@ static struct {
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
};
-static int pow_10(unsigned power)
-{
- int i;
- int ret = 1;
- for (i = 0; i < power; ++i)
- ret = ret * 10;
-
- return ret;
-}
-
static void simple_div(int dividend, int divisor, int *whole,
int *micro_frac)
{
@@ -96,14 +91,16 @@ static void simple_div(int dividend, int divisor, int *whole,
rem *= 10;
exp++;
}
- *micro_frac = (rem / divisor) * pow_10(6-exp);
+ *micro_frac = (rem / divisor) * int_pow(10, 6 - exp);
}
}
static void split_micro_fraction(unsigned int no, int exp, int *val1, int *val2)
{
- *val1 = no/pow_10(exp);
- *val2 = no%pow_10(exp) * pow_10(6-exp);
+ int divisor = int_pow(10, exp);
+
+ *val1 = no / divisor;
+ *val2 = no % divisor * int_pow(10, 6 - exp);
}
/*
@@ -125,7 +122,7 @@ static void convert_from_vtf_format(u32 value, int size, int exp,
}
exp = hid_sensor_convert_exponent(exp);
if (exp >= 0) {
- *val1 = sign * value * pow_10(exp);
+ *val1 = sign * value * int_pow(10, exp);
*val2 = 0;
} else {
split_micro_fraction(value, -exp, val1, val2);
@@ -138,6 +135,7 @@ static void convert_from_vtf_format(u32 value, int size, int exp,
static u32 convert_to_vtf_format(int size, int exp, int val1, int val2)
{
+ int divisor;
u32 value;
int sign = 1;
@@ -145,10 +143,13 @@ static u32 convert_to_vtf_format(int size, int exp, int val1, int val2)
sign = -1;
exp = hid_sensor_convert_exponent(exp);
if (exp < 0) {
- value = abs(val1) * pow_10(-exp);
- value += abs(val2) / pow_10(6+exp);
- } else
- value = abs(val1) / pow_10(exp);
+ divisor = int_pow(10, 6 + exp);
+ value = abs(val1) * int_pow(10, -exp);
+ value += abs(val2) / divisor;
+ } else {
+ divisor = int_pow(10, exp);
+ value = abs(val1) / divisor;
+ }
if (sign < 0)
value = ((1LL << (size * 8)) - value);
@@ -211,12 +212,12 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
if (val1 < 0 || val2 < 0)
return -EINVAL;
- value = val1 * pow_10(6) + val2;
+ value = val1 * HZ_PER_MHZ + val2;
if (value) {
if (st->poll.units == HID_USAGE_SENSOR_UNITS_MILLISECOND)
- value = pow_10(9)/value;
+ value = NSEC_PER_SEC / value;
else if (st->poll.units == HID_USAGE_SENSOR_UNITS_SECOND)
- value = pow_10(6)/value;
+ value = USEC_PER_SEC / value;
else
value = 0;
}
@@ -305,40 +306,44 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
static void adjust_exponent_nano(int *val0, int *val1, int scale0,
int scale1, int exp)
{
+ int divisor;
int i;
int x;
int res;
int rem;
if (exp > 0) {
- *val0 = scale0 * pow_10(exp);
+ *val0 = scale0 * int_pow(10, exp);
res = 0;
if (exp > 9) {
*val1 = 0;
return;
}
for (i = 0; i < exp; ++i) {
- x = scale1 / pow_10(8 - i);
- res += (pow_10(exp - 1 - i) * x);
- scale1 = scale1 % pow_10(8 - i);
+ divisor = int_pow(10, 8 - i);
+ x = scale1 / divisor;
+ res += int_pow(10, exp - 1 - i) * x;
+ scale1 = scale1 % divisor;
}
*val0 += res;
- *val1 = scale1 * pow_10(exp);
+ *val1 = scale1 * int_pow(10, exp);
} else if (exp < 0) {
exp = abs(exp);
if (exp > 9) {
*val0 = *val1 = 0;
return;
}
- *val0 = scale0 / pow_10(exp);
- rem = scale0 % pow_10(exp);
+ divisor = int_pow(10, exp);
+ *val0 = scale0 / divisor;
+ rem = scale0 % divisor;
res = 0;
for (i = 0; i < (9 - exp); ++i) {
- x = scale1 / pow_10(8 - i);
- res += (pow_10(8 - exp - i) * x);
- scale1 = scale1 % pow_10(8 - i);
+ divisor = int_pow(10, 8 - i);
+ x = scale1 / divisor;
+ res += int_pow(10, 8 - exp - i) * x;
+ scale1 = scale1 % divisor;
}
- *val1 = rem * pow_10(9 - exp) + res;
+ *val1 = rem * int_pow(10, 9 - exp) + res;
} else {
*val0 = scale0;
*val1 = scale1;
diff --git a/drivers/iio/common/st_sensors/Kconfig b/drivers/iio/common/st_sensors/Kconfig
index 91b98e152d75..9364ec7a811f 100644
--- a/drivers/iio/common/st_sensors/Kconfig
+++ b/drivers/iio/common/st_sensors/Kconfig
@@ -5,9 +5,11 @@
config IIO_ST_SENSORS_I2C
tristate
+ select REGMAP_I2C
config IIO_ST_SENSORS_SPI
tristate
+ select REGMAP_SPI
config IIO_ST_SENSORS_CORE
tristate
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index 4a68669dc555..eee30130ae23 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -17,15 +17,16 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/irqreturn.h>
+#include <linux/regmap.h>
#include <linux/iio/common/st_sensors.h>
static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
{
- int i;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned int num_data_channels = sdata->num_data_channels;
+ int i;
for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
const struct iio_chan_spec *channel = &indio_dev->channels[i];
@@ -36,11 +37,8 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
channel->scan_type.storagebits >> 3;
buf = PTR_ALIGN(buf, storage_bytes);
- if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- channel->address,
- bytes_to_read, buf,
- sdata->multiread_bit) <
- bytes_to_read)
+ if (regmap_bulk_read(sdata->regmap, channel->address,
+ buf, bytes_to_read) < 0)
return -EIO;
/* Advance the buffer pointer */
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8b22dc241482..4a3064fb6cd9 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -15,6 +15,7 @@
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
@@ -28,19 +29,10 @@ static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
u8 reg_addr, u8 mask, u8 data)
{
- int err;
- u8 new_data;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data);
- if (err < 0)
- goto st_sensors_write_data_with_mask_error;
-
- new_data = ((new_data & (~mask)) | ((data << __ffs(mask)) & mask));
- err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data);
-
-st_sensors_write_data_with_mask_error:
- return err;
+ return regmap_update_bits(sdata->regmap,
+ reg_addr, mask, data << __ffs(mask));
}
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
@@ -48,19 +40,15 @@ int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned *readval)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
- u8 readdata;
int err;
if (!readval)
- return sdata->tf->write_byte(&sdata->tb, sdata->dev,
- (u8)reg, (u8)writeval);
+ return regmap_write(sdata->regmap, reg, writeval);
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev, (u8)reg, &readdata);
+ err = regmap_read(sdata->regmap, reg, readval);
if (err < 0)
return err;
- *readval = (unsigned)readdata;
-
return 0;
}
EXPORT_SYMBOL(st_sensors_debugfs_reg_access);
@@ -545,7 +533,7 @@ st_sensors_match_scale_error:
EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain);
static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
- struct iio_chan_spec const *ch, int *data)
+ struct iio_chan_spec const *ch, int *data)
{
int err;
u8 *outdata;
@@ -554,13 +542,12 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits +
ch->scan_type.shift, 8);
- outdata = kmalloc(byte_for_channel, GFP_KERNEL);
+ outdata = kmalloc(byte_for_channel, GFP_DMA | GFP_KERNEL);
if (!outdata)
return -ENOMEM;
- err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- ch->address, byte_for_channel,
- outdata, sdata->multiread_bit);
+ err = regmap_bulk_read(sdata->regmap, ch->address,
+ outdata, byte_for_channel);
if (err < 0)
goto st_sensors_free_memory;
@@ -608,69 +595,55 @@ out:
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
-static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
- const struct st_sensor_settings *sensor_settings)
+/*
+ * st_sensors_get_settings_index() - get index of the sensor settings for a
+ * specific device from list of settings
+ * @name: device name buffer reference.
+ * @list: sensor settings list.
+ * @list_length: length of sensor settings list.
+ *
+ * Return: non negative number on success (valid index),
+ * negative error code otherwise.
+ */
+int st_sensors_get_settings_index(const char *name,
+ const struct st_sensor_settings *list,
+ const int list_length)
{
- struct st_sensor_data *sdata = iio_priv(indio_dev);
- struct device_node *np = sdata->dev->of_node;
- struct st_sensors_platform_data *pdata;
+ int i, n;
- pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
- if (((np && of_property_read_bool(np, "spi-3wire")) ||
- (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
- int err;
-
- err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
- sensor_settings->sim.addr,
- sensor_settings->sim.value);
- if (err < 0) {
- dev_err(&indio_dev->dev,
- "failed to init interface mode\n");
- return err;
+ for (i = 0; i < list_length; i++) {
+ for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) {
+ if (strcmp(name, list[i].sensors_supported[n]) == 0)
+ return i;
}
}
- return 0;
+ return -ENODEV;
}
+EXPORT_SYMBOL(st_sensors_get_settings_index);
-int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list,
- const struct st_sensor_settings *sensor_settings)
+/*
+ * st_sensors_verify_id() - verify sensor ID (WhoAmI) is matching with the
+ * expected value
+ * @indio_dev: IIO device reference.
+ *
+ * Return: 0 on success (valid sensor ID), else a negative error code.
+ */
+int st_sensors_verify_id(struct iio_dev *indio_dev)
{
- int i, n, err = 0;
- u8 wai;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int wai, err;
- for (i = 0; i < num_sensors_list; i++) {
- for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) {
- if (strcmp(indio_dev->name,
- sensor_settings[i].sensors_supported[n]) == 0) {
- break;
- }
- }
- if (n < ST_SENSORS_MAX_4WAI)
- break;
- }
- if (i == num_sensors_list) {
- dev_err(&indio_dev->dev, "device name %s not recognized.\n",
- indio_dev->name);
- return -ENODEV;
- }
-
- err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
- if (err < 0)
- return err;
-
- if (sensor_settings[i].wai_addr) {
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sensor_settings[i].wai_addr, &wai);
+ if (sdata->sensor_settings->wai_addr) {
+ err = regmap_read(sdata->regmap,
+ sdata->sensor_settings->wai_addr, &wai);
if (err < 0) {
dev_err(&indio_dev->dev,
"failed to read Who-Am-I register.\n");
return err;
}
- if (sensor_settings[i].wai != wai) {
+ if (sdata->sensor_settings->wai != wai) {
dev_err(&indio_dev->dev,
"%s: WhoAmI mismatch (0x%x).\n",
indio_dev->name, wai);
@@ -678,12 +651,9 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
}
}
- sdata->sensor_settings =
- (struct st_sensor_settings *)&sensor_settings[i];
-
- return i;
+ return 0;
}
-EXPORT_SYMBOL(st_sensors_check_device_support);
+EXPORT_SYMBOL(st_sensors_verify_id);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index b1c9812407e7..aa89d54a7c59 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -13,68 +13,58 @@
#include <linux/iio/iio.h>
#include <linux/of_device.h>
#include <linux/acpi.h>
+#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_i2c.h>
#define ST_SENSORS_I2C_MULTIREAD 0x80
-static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *sdata = iio_priv(indio_dev);
-
- return to_i2c_client(sdata->dev)->irq;
-}
-
-static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 *res_byte)
-{
- int err;
-
- err = i2c_smbus_read_byte_data(to_i2c_client(dev), reg_addr);
- if (err < 0)
- goto st_accel_i2c_read_byte_error;
-
- *res_byte = err & 0xff;
-
-st_accel_i2c_read_byte_error:
- return err < 0 ? err : 0;
-}
-
-static int st_sensors_i2c_read_multiple_byte(
- struct st_sensor_transfer_buffer *tb, struct device *dev,
- u8 reg_addr, int len, u8 *data, bool multiread_bit)
-{
- if (multiread_bit)
- reg_addr |= ST_SENSORS_I2C_MULTIREAD;
-
- return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev),
- reg_addr, len, data);
-}
-
-static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 data)
-{
- return i2c_smbus_write_byte_data(to_i2c_client(dev), reg_addr, data);
-}
+static const struct regmap_config st_sensors_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
-static const struct st_sensor_transfer_function st_sensors_tf_i2c = {
- .read_byte = st_sensors_i2c_read_byte,
- .write_byte = st_sensors_i2c_write_byte,
- .read_multiple_byte = st_sensors_i2c_read_multiple_byte,
+static const struct regmap_config st_sensors_i2c_regmap_multiread_bit_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = ST_SENSORS_I2C_MULTIREAD,
};
-void st_sensors_i2c_configure(struct iio_dev *indio_dev,
- struct i2c_client *client, struct st_sensor_data *sdata)
+/*
+ * st_sensors_i2c_configure() - configure I2C interface
+ * @indio_dev: IIO device reference.
+ * @client: i2c client reference.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client)
{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ const struct regmap_config *config;
+
+ if (sdata->sensor_settings->multi_read_bit)
+ config = &st_sensors_i2c_regmap_multiread_bit_config;
+ else
+ config = &st_sensors_i2c_regmap_config;
+
+ sdata->regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(sdata->regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap (%d)\n",
+ (int)PTR_ERR(sdata->regmap));
+ return PTR_ERR(sdata->regmap);
+ }
+
i2c_set_clientdata(client, indio_dev);
indio_dev->dev.parent = &client->dev;
indio_dev->name = client->name;
sdata->dev = &client->dev;
- sdata->tf = &st_sensors_tf_i2c;
- sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
+ sdata->irq = client->irq;
+
+ return 0;
}
EXPORT_SYMBOL(st_sensors_i2c_configure);
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 2213843f02cb..2262f81b07c2 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -11,108 +11,108 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
+#include <linux/regmap.h>
#include <linux/iio/common/st_sensors_spi.h>
-
+#include "st_sensors_core.h"
#define ST_SENSORS_SPI_MULTIREAD 0xc0
-#define ST_SENSORS_SPI_READ 0x80
-static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev)
-{
- struct st_sensor_data *sdata = iio_priv(indio_dev);
+static const struct regmap_config st_sensors_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
- return to_spi_device(sdata->dev)->irq;
-}
+static const struct regmap_config st_sensors_spi_regmap_multiread_bit_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = ST_SENSORS_SPI_MULTIREAD,
+};
-static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit)
+/*
+ * st_sensors_is_spi_3_wire() - check if SPI 3-wire mode has been selected
+ * @spi: spi device reference.
+ *
+ * Return: true if SPI 3-wire mode is selected, false otherwise.
+ */
+static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
{
- int err;
-
- struct spi_transfer xfers[] = {
- {
- .tx_buf = tb->tx_buf,
- .bits_per_word = 8,
- .len = 1,
- },
- {
- .rx_buf = tb->rx_buf,
- .bits_per_word = 8,
- .len = len,
- }
- };
-
- mutex_lock(&tb->buf_lock);
- if ((multiread_bit) && (len > 1))
- tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD;
- else
- tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ;
+ struct device_node *np = spi->dev.of_node;
+ struct st_sensors_platform_data *pdata;
- err = spi_sync_transfer(to_spi_device(dev), xfers, ARRAY_SIZE(xfers));
- if (err)
- goto acc_spi_read_error;
+ pdata = (struct st_sensors_platform_data *)spi->dev.platform_data;
+ if ((np && of_property_read_bool(np, "spi-3wire")) ||
+ (pdata && pdata->spi_3wire)) {
+ return true;
+ }
- memcpy(data, tb->rx_buf, len);
- mutex_unlock(&tb->buf_lock);
- return len;
-
-acc_spi_read_error:
- mutex_unlock(&tb->buf_lock);
- return err;
+ return false;
}
-static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 *res_byte)
+/*
+ * st_sensors_configure_spi_3_wire() - configure SPI 3-wire if needed
+ * @spi: spi device reference.
+ * @settings: sensor specific settings reference.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+static int st_sensors_configure_spi_3_wire(struct spi_device *spi,
+ struct st_sensor_settings *settings)
{
- return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false);
-}
+ if (settings->sim.addr) {
+ u8 buffer[] = {
+ settings->sim.addr,
+ settings->sim.value
+ };
-static int st_sensors_spi_read_multiple_byte(
- struct st_sensor_transfer_buffer *tb, struct device *dev,
- u8 reg_addr, int len, u8 *data, bool multiread_bit)
-{
- return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit);
+ return spi_write(spi, buffer, 2);
+ }
+
+ return 0;
}
-static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 data)
+/*
+ * st_sensors_spi_configure() - configure SPI interface
+ * @indio_dev: IIO device reference.
+ * @spi: spi device reference.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi)
{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ const struct regmap_config *config;
int err;
- struct spi_transfer xfers = {
- .tx_buf = tb->tx_buf,
- .bits_per_word = 8,
- .len = 2,
- };
+ if (st_sensors_is_spi_3_wire(spi)) {
+ err = st_sensors_configure_spi_3_wire(spi,
+ sdata->sensor_settings);
+ if (err < 0)
+ return err;
+ }
- mutex_lock(&tb->buf_lock);
- tb->tx_buf[0] = reg_addr;
- tb->tx_buf[1] = data;
-
- err = spi_sync_transfer(to_spi_device(dev), &xfers, 1);
- mutex_unlock(&tb->buf_lock);
+ if (sdata->sensor_settings->multi_read_bit)
+ config = &st_sensors_spi_regmap_multiread_bit_config;
+ else
+ config = &st_sensors_spi_regmap_config;
- return err;
-}
+ sdata->regmap = devm_regmap_init_spi(spi, config);
+ if (IS_ERR(sdata->regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap (%d)\n",
+ (int)PTR_ERR(sdata->regmap));
+ return PTR_ERR(sdata->regmap);
+ }
-static const struct st_sensor_transfer_function st_sensors_tf_spi = {
- .read_byte = st_sensors_spi_read_byte,
- .write_byte = st_sensors_spi_write_byte,
- .read_multiple_byte = st_sensors_spi_read_multiple_byte,
-};
-
-void st_sensors_spi_configure(struct iio_dev *indio_dev,
- struct spi_device *spi, struct st_sensor_data *sdata)
-{
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi->modalias;
sdata->dev = &spi->dev;
- sdata->tf = &st_sensors_tf_spi;
- sdata->get_irq_data_ready = st_sensors_spi_get_irq;
+ sdata->irq = spi->irq;
+
+ return 0;
}
EXPORT_SYMBOL(st_sensors_spi_configure);
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 630c8cb35e8b..4a2efa00f7f2 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -13,6 +13,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/iio/common/st_sensors.h>
#include "st_sensors_core.h"
@@ -26,8 +27,7 @@
static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
struct st_sensor_data *sdata)
{
- u8 status;
- int ret;
+ int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
@@ -37,9 +37,9 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
if (!indio_dev->active_scan_mask)
return 0;
- ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.stat_drdy.addr,
- &status);
+ ret = regmap_read(sdata->regmap,
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr,
+ &status);
if (ret < 0) {
dev_err(sdata->dev,
"error checking samples available\n");
@@ -121,9 +121,9 @@ static irqreturn_t st_sensors_irq_thread(int irq, void *p)
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
- int err, irq;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned long irq_trig;
+ int err;
sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
if (sdata->trig == NULL) {
@@ -135,8 +135,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->trig->ops = trigger_ops;
sdata->trig->dev.parent = sdata->dev;
- irq = sdata->get_irq_data_ready(indio_dev);
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(sdata->irq));
/*
* If the IRQ is triggered on falling edge, we need to mark the
* interrupt as active low, if the hardware supports this.
@@ -206,12 +205,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr)
irq_trig |= IRQF_SHARED;
- err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
- st_sensors_irq_handler,
- st_sensors_irq_thread,
- irq_trig,
- sdata->trig->name,
- sdata->trig);
+ err = request_threaded_irq(sdata->irq,
+ st_sensors_irq_handler,
+ st_sensors_irq_thread,
+ irq_trig,
+ sdata->trig->name,
+ sdata->trig);
if (err) {
dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
goto iio_trigger_free;
@@ -227,7 +226,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
return 0;
iio_trigger_register_error:
- free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+ free_irq(sdata->irq, sdata->trig);
iio_trigger_free:
iio_trigger_free(sdata->trig);
return err;
@@ -239,7 +238,7 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
struct st_sensor_data *sdata = iio_priv(indio_dev);
iio_trigger_unregister(sdata->trig);
- free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+ free_irq(sdata->irq, sdata->trig);
iio_trigger_free(sdata->trig);
}
EXPORT_SYMBOL(st_sensors_deallocate_trigger);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 4335214800d2..2ebe08326048 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -220,7 +220,7 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
*val >>= chan->scan_type.shift;
- val -= (1 << chan->scan_type.realbits) / 2;
+ *val -= (1 << chan->scan_type.realbits) / 2;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 2 * st->vref;
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index 8f99c005458a..6cb02299a215 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -693,9 +693,9 @@ static int iio_dummy_remove(struct iio_sw_device *swd)
* Varies depending on bus type of the device. As there is no device
* here, call probe directly. For information on device registration
* i2c:
- * Documentation/i2c/writing-clients
+ * Documentation/i2c/writing-clients.rst
* spi:
- * Documentation/spi/spi-summary
+ * Documentation/spi/spi-summary.rst
*/
static const struct iio_sw_device_ops iio_dummy_device_ops = {
.probe = iio_dummy_probe,
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index e48f15cc9ab5..ff82863cbf42 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
st->buf[0] = st->integer >> 8;
st->buf[1] = 0x40; /* REG12 default */
st->buf[2] = 0x00;
- st->buf[3] = st->fract2 & 0xFF;
- st->buf[4] = st->fract2 >> 7;
- st->buf[5] = st->fract2 >> 15;
+ st->buf[3] = st->fract1 & 0xFF;
+ st->buf[4] = st->fract1 >> 8;
+ st->buf[5] = st->fract1 >> 16;
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
- ADF4371_FRAC1WORD(st->fract1 >> 23);
+ ADF4371_FRAC1WORD(st->fract1 >> 24);
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
st->buf[8] = st->mod2 & 0xFF;
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 460d190be4a4..592f6b34e987 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -32,6 +32,7 @@ static const struct st_sensors_platform_data gyro_pdata = {
.drdy_int_pin = 2,
};
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
int st_gyro_common_probe(struct iio_dev *indio_dev);
void st_gyro_common_remove(struct iio_dev *indio_dev);
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index 6e362f735e92..7465ad62391c 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -29,61 +29,51 @@ int st_gyro_trig_set_state(struct iio_trigger *trig, bool state)
return st_sensors_set_dataready_irq(indio_dev, state);
}
-static int st_gyro_buffer_preenable(struct iio_dev *indio_dev)
-{
- return st_sensors_set_enable(indio_dev, true);
-}
-
static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *gdata = iio_priv(indio_dev);
- gdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (gdata->buffer_data == NULL) {
- err = -ENOMEM;
- goto allocate_memory_error;
- }
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ return err;
err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ (u8)indio_dev->active_scan_mask[0]);
if (err < 0)
- goto st_gyro_buffer_postenable_error;
+ goto st_gyro_buffer_predisable;
- err = iio_triggered_buffer_postenable(indio_dev);
+ err = st_sensors_set_enable(indio_dev, true);
if (err < 0)
- goto st_gyro_buffer_postenable_error;
+ goto st_gyro_buffer_enable_all_axis;
- return err;
+ return 0;
-st_gyro_buffer_postenable_error:
- kfree(gdata->buffer_data);
-allocate_memory_error:
+st_gyro_buffer_enable_all_axis:
+ st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+st_gyro_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
return err;
}
static int st_gyro_buffer_predisable(struct iio_dev *indio_dev)
{
- int err;
- struct st_sensor_data *gdata = iio_priv(indio_dev);
+ int err, err2;
- err = iio_triggered_buffer_predisable(indio_dev);
+ err = st_sensors_set_enable(indio_dev, false);
if (err < 0)
- goto st_gyro_buffer_predisable_error;
+ goto st_gyro_buffer_predisable;
err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
- if (err < 0)
- goto st_gyro_buffer_predisable_error;
- err = st_sensors_set_enable(indio_dev, false);
+st_gyro_buffer_predisable:
+ err2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!err)
+ err = err2;
-st_gyro_buffer_predisable_error:
- kfree(gdata->buffer_data);
return err;
}
static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
- .preenable = &st_gyro_buffer_preenable,
.postenable = &st_gyro_buffer_postenable,
.predisable = &st_gyro_buffer_predisable,
};
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index e995dc77e30e..c0acbb5d2ffb 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -368,28 +367,41 @@ static const struct iio_trigger_ops st_gyro_trigger_ops = {
#define ST_GYRO_TRIGGER_OPS NULL
#endif
+/*
+ * st_gyro_get_settings() - get sensor settings from device name
+ * @name: device name buffer reference.
+ *
+ * Return: valid reference on success, NULL otherwise.
+ */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name)
+{
+ int index = st_sensors_get_settings_index(name,
+ st_gyro_sensors_settings,
+ ARRAY_SIZE(st_gyro_sensors_settings));
+ if (index < 0)
+ return NULL;
+
+ return &st_gyro_sensors_settings[index];
+}
+EXPORT_SYMBOL(st_gyro_get_settings);
+
int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
- int irq = gdata->get_irq_data_ready(indio_dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &gyro_info;
- mutex_init(&gdata->tb.buf_lock);
err = st_sensors_power_enable(indio_dev);
if (err)
return err;
- err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_gyro_sensors_settings),
- st_gyro_sensors_settings);
+ err = st_sensors_verify_id(indio_dev);
if (err < 0)
goto st_gyro_power_off;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
- gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
@@ -406,7 +418,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
if (err < 0)
goto st_gyro_power_off;
- if (irq > 0) {
+ if (gdata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_GYRO_TRIGGER_OPS);
if (err < 0)
@@ -423,7 +435,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
return 0;
st_gyro_device_register_error:
- if (irq > 0)
+ if (gdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_gyro_probe_trigger_error:
st_gyro_deallocate_ring(indio_dev);
@@ -441,7 +453,7 @@ void st_gyro_common_remove(struct iio_dev *indio_dev)
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (gdata->get_irq_data_ready(indio_dev) > 0)
+ if (gdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_gyro_deallocate_ring(indio_dev);
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index f2a8683db7d9..05a1a0874bd5 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -63,21 +63,33 @@ MODULE_DEVICE_TABLE(of, st_gyro_of_match);
#endif
static int st_gyro_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *gdata;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
+ client->name, sizeof(client->name));
+
+ settings = st_gyro_get_settings(client->name);
+ if (!settings) {
+ dev_err(&client->dev, "device name %s not recognized.\n",
+ client->name);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*gdata));
if (!indio_dev)
return -ENOMEM;
gdata = iio_priv(indio_dev);
- st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
- client->name, sizeof(client->name));
+ gdata->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_i2c_configure(indio_dev, client, gdata);
+ err = st_sensors_i2c_configure(indio_dev, client);
+ if (err < 0)
+ return err;
err = st_gyro_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 93c48248bea6..b5c624251231 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -69,19 +69,31 @@ MODULE_DEVICE_TABLE(of, st_gyro_of_match);
static int st_gyro_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *gdata;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
+ spi->modalias, sizeof(spi->modalias));
+
+ settings = st_gyro_get_settings(spi->modalias);
+ if (!settings) {
+ dev_err(&spi->dev, "device name %s not recognized.\n",
+ spi->modalias);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*gdata));
if (!indio_dev)
return -ENOMEM;
gdata = iio_priv(indio_dev);
+ gdata->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
- spi->modalias, sizeof(spi->modalias));
- st_sensors_spi_configure(indio_dev, spi, gdata);
+ err = st_sensors_spi_configure(indio_dev, spi);
+ if (err < 0)
+ return err;
err = st_gyro_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index f18da7859229..3bac98e731d9 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -240,32 +240,15 @@ static int am2315_probe(struct i2c_client *client,
indio_dev->channels = am2315_channels;
indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
- ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ ret = devm_iio_triggered_buffer_setup(&client->dev,
+ indio_dev, iio_pollfunc_store_time,
am2315_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto err_buffer_cleanup;
-
- return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
- return ret;
-}
-
-static int am2315_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id am2315_i2c_id[] = {
@@ -287,7 +270,6 @@ static struct i2c_driver am2315_driver = {
.acpi_match_table = ACPI_PTR(am2315_acpi_id),
},
.probe = am2315_probe,
- .remove = am2315_remove,
.id_table = am2315_i2c_id,
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 066e05f92081..bfe1cdb16846 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -385,28 +385,16 @@ static int hdc100x_probe(struct i2c_client *client,
hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(&client->dev,
+ indio_dev, NULL,
hdc100x_trigger_handler,
&hdc_buffer_setup_ops);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- iio_triggered_buffer_cleanup(indio_dev);
- return ret;
-}
-
-static int hdc100x_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id hdc100x_id[] = {
@@ -436,7 +424,6 @@ static struct i2c_driver hdc100x_driver = {
.of_match_table = of_match_ptr(hdc100x_dt_ids),
},
.probe = hdc100x_probe,
- .remove = hdc100x_remove,
.id_table = hdc100x_id,
};
module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 4957e6df447e..f3c7282321a8 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -17,6 +17,18 @@ config ADIS16400
adis16365, adis16400 and adis16405 triaxial inertial sensors
(adis16400 series also have magnetometers).
+config ADIS16460
+ tristate "Analog Devices ADIS16460 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16460 inertial
+ sensor.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16460.
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 9e452fce1aaf..4a6958865504 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16400) += adis16400.o
+obj-$(CONFIG_ADIS16460) += adis16460.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 30281e91dbf9..1631c255deab 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -39,18 +39,24 @@ int adis_write_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
@@ -133,12 +139,16 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->write_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.rx_buf = adis->rx,
@@ -146,6 +156,8 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
.len = 2,
.cs_change = 1,
.delay_usecs = adis->data->read_delay,
+ .cs_change_delay = adis->data->cs_change_delay,
+ .cs_change_delay_unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
new file mode 100644
index 000000000000..6aed9e84abbf
--- /dev/null
+++ b/drivers/iio/imu/adis16460.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADIS16460 IMU driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/debugfs.h>
+
+#define ADIS16460_REG_FLASH_CNT 0x00
+#define ADIS16460_REG_DIAG_STAT 0x02
+#define ADIS16460_REG_X_GYRO_LOW 0x04
+#define ADIS16460_REG_X_GYRO_OUT 0x06
+#define ADIS16460_REG_Y_GYRO_LOW 0x08
+#define ADIS16460_REG_Y_GYRO_OUT 0x0A
+#define ADIS16460_REG_Z_GYRO_LOW 0x0C
+#define ADIS16460_REG_Z_GYRO_OUT 0x0E
+#define ADIS16460_REG_X_ACCL_LOW 0x10
+#define ADIS16460_REG_X_ACCL_OUT 0x12
+#define ADIS16460_REG_Y_ACCL_LOW 0x14
+#define ADIS16460_REG_Y_ACCL_OUT 0x16
+#define ADIS16460_REG_Z_ACCL_LOW 0x18
+#define ADIS16460_REG_Z_ACCL_OUT 0x1A
+#define ADIS16460_REG_SMPL_CNTR 0x1C
+#define ADIS16460_REG_TEMP_OUT 0x1E
+#define ADIS16460_REG_X_DELT_ANG 0x24
+#define ADIS16460_REG_Y_DELT_ANG 0x26
+#define ADIS16460_REG_Z_DELT_ANG 0x28
+#define ADIS16460_REG_X_DELT_VEL 0x2A
+#define ADIS16460_REG_Y_DELT_VEL 0x2C
+#define ADIS16460_REG_Z_DELT_VEL 0x2E
+#define ADIS16460_REG_MSC_CTRL 0x32
+#define ADIS16460_REG_SYNC_SCAL 0x34
+#define ADIS16460_REG_DEC_RATE 0x36
+#define ADIS16460_REG_FLTR_CTRL 0x38
+#define ADIS16460_REG_GLOB_CMD 0x3E
+#define ADIS16460_REG_X_GYRO_OFF 0x40
+#define ADIS16460_REG_Y_GYRO_OFF 0x42
+#define ADIS16460_REG_Z_GYRO_OFF 0x44
+#define ADIS16460_REG_X_ACCL_OFF 0x46
+#define ADIS16460_REG_Y_ACCL_OFF 0x48
+#define ADIS16460_REG_Z_ACCL_OFF 0x4A
+#define ADIS16460_REG_LOT_ID1 0x52
+#define ADIS16460_REG_LOT_ID2 0x54
+#define ADIS16460_REG_PROD_ID 0x56
+#define ADIS16460_REG_SERIAL_NUM 0x58
+#define ADIS16460_REG_CAL_SGNTR 0x60
+#define ADIS16460_REG_CAL_CRC 0x62
+#define ADIS16460_REG_CODE_SGNTR 0x64
+#define ADIS16460_REG_CODE_CRC 0x66
+
+struct adis16460_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+ unsigned int gyro_max_val;
+ unsigned int gyro_max_scale;
+ unsigned int accel_max_val;
+ unsigned int accel_max_scale;
+};
+
+struct adis16460 {
+ const struct adis16460_chip_info *chip_info;
+ struct adis adis;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int adis16460_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16460 *adis16460 = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_SERIAL_NUM,
+ &serial);
+ if (ret < 0)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16460_serial_number_fops,
+ adis16460_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16460_show_product_id(void *arg, u64 *val)
+{
+ struct adis16460 *adis16460 = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_PROD_ID,
+ &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16460_product_id_fops,
+ adis16460_show_product_id, NULL, "%llu\n");
+
+static int adis16460_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16460 *adis16460 = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&adis16460->adis, ADIS16460_REG_FLASH_CNT,
+ &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16460_flash_count_fops,
+ adis16460_show_flash_count, NULL, "%lld\n");
+
+static int adis16460_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16460 *adis16460 = iio_priv(indio_dev);
+
+ debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
+ adis16460, &adis16460_serial_number_fops);
+ debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ adis16460, &adis16460_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ adis16460, &adis16460_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16460_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+static int adis16460_set_freq(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct adis16460 *st = iio_priv(indio_dev);
+ int t;
+
+ t = val * 1000 + val2 / 1000;
+ if (t <= 0)
+ return -EINVAL;
+
+ t = 2048000 / t;
+ if (t > 2048)
+ t = 2048;
+
+ if (t != 0)
+ t--;
+
+ return adis_write_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, t);
+}
+
+static int adis16460_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+{
+ struct adis16460 *st = iio_priv(indio_dev);
+ uint16_t t;
+ int ret;
+ unsigned int freq;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t);
+ if (ret < 0)
+ return ret;
+
+ freq = 2048000 / (t + 1);
+ *val = freq / 1000;
+ *val2 = (freq % 1000) * 1000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int adis16460_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct adis16460 *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->chip_info->gyro_max_scale;
+ *val2 = st->chip_info->gyro_max_val;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->chip_info->accel_max_scale;
+ *val2 = st->chip_info->accel_max_val;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = 50; /* 50 milli degrees Celsius/LSB */
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 500; /* 25 degrees Celsius = 0x0000 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return adis16460_get_freq(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16460_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return adis16460_set_freq(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+enum {
+ ADIS16460_SCAN_GYRO_X,
+ ADIS16460_SCAN_GYRO_Y,
+ ADIS16460_SCAN_GYRO_Z,
+ ADIS16460_SCAN_ACCEL_X,
+ ADIS16460_SCAN_ACCEL_Y,
+ ADIS16460_SCAN_ACCEL_Z,
+ ADIS16460_SCAN_TEMP,
+};
+
+#define ADIS16460_MOD_CHANNEL(_type, _mod, _address, _si, _bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_bits), \
+ .storagebits = (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16460_GYRO_CHANNEL(_mod) \
+ ADIS16460_MOD_CHANNEL(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16460_REG_ ## _mod ## _GYRO_LOW, ADIS16460_SCAN_GYRO_ ## _mod, \
+ 32)
+
+#define ADIS16460_ACCEL_CHANNEL(_mod) \
+ ADIS16460_MOD_CHANNEL(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16460_REG_ ## _mod ## _ACCL_LOW, ADIS16460_SCAN_ACCEL_ ## _mod, \
+ 32)
+
+#define ADIS16460_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ADIS16460_REG_TEMP_OUT, \
+ .scan_index = ADIS16460_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16460_channels[] = {
+ ADIS16460_GYRO_CHANNEL(X),
+ ADIS16460_GYRO_CHANNEL(Y),
+ ADIS16460_GYRO_CHANNEL(Z),
+ ADIS16460_ACCEL_CHANNEL(X),
+ ADIS16460_ACCEL_CHANNEL(Y),
+ ADIS16460_ACCEL_CHANNEL(Z),
+ ADIS16460_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+static const struct adis16460_chip_info adis16460_chip_info = {
+ .channels = adis16460_channels,
+ .num_channels = ARRAY_SIZE(adis16460_channels),
+ /*
+ * storing the value in rad/degree and the scale in degree
+ * gives us the result in rad and better precession than
+ * storing the scale directly in rad.
+ */
+ .gyro_max_val = IIO_RAD_TO_DEGREE(200 << 16),
+ .gyro_max_scale = 1,
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
+ .accel_max_scale = 5,
+};
+
+static const struct iio_info adis16460_info = {
+ .read_raw = &adis16460_read_raw,
+ .write_raw = &adis16460_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static int adis16460_enable_irq(struct adis *adis, bool enable)
+{
+ /*
+ * There is no way to gate the data-ready signal internally inside the
+ * ADIS16460 :(
+ */
+ if (enable)
+ enable_irq(adis->spi->irq);
+ else
+ disable_irq(adis->spi->irq);
+
+ return 0;
+}
+
+static int adis16460_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16460 *st = iio_priv(indio_dev);
+ uint16_t prod_id;
+ unsigned int device_id;
+ int ret;
+
+ adis_reset(&st->adis);
+ msleep(222);
+
+ ret = adis_write_reg_16(&st->adis, ADIS16460_REG_GLOB_CMD, BIT(1));
+ if (ret)
+ return ret;
+ msleep(75);
+
+ ret = adis_check_status(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16460_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ return 0;
+}
+
+#define ADIS16460_DIAG_STAT_IN_CLK_OOS 7
+#define ADIS16460_DIAG_STAT_FLASH_MEM 6
+#define ADIS16460_DIAG_STAT_SELF_TEST 5
+#define ADIS16460_DIAG_STAT_OVERRANGE 4
+#define ADIS16460_DIAG_STAT_SPI_COMM 3
+#define ADIS16460_DIAG_STAT_FLASH_UPT 2
+
+static const char * const adis16460_status_error_msgs[] = {
+ [ADIS16460_DIAG_STAT_IN_CLK_OOS] = "Input clock out of sync",
+ [ADIS16460_DIAG_STAT_FLASH_MEM] = "Flash memory failure",
+ [ADIS16460_DIAG_STAT_SELF_TEST] = "Self test diagnostic failure",
+ [ADIS16460_DIAG_STAT_OVERRANGE] = "Sensor overrange",
+ [ADIS16460_DIAG_STAT_SPI_COMM] = "SPI communication failure",
+ [ADIS16460_DIAG_STAT_FLASH_UPT] = "Flash update failure",
+};
+
+static const struct adis_data adis16460_data = {
+ .diag_stat_reg = ADIS16460_REG_DIAG_STAT,
+ .glob_cmd_reg = ADIS16460_REG_GLOB_CMD,
+ .has_paging = false,
+ .read_delay = 5,
+ .write_delay = 5,
+ .cs_change_delay = 16,
+ .status_error_msgs = adis16460_status_error_msgs,
+ .status_error_mask = BIT(ADIS16460_DIAG_STAT_IN_CLK_OOS) |
+ BIT(ADIS16460_DIAG_STAT_FLASH_MEM) |
+ BIT(ADIS16460_DIAG_STAT_SELF_TEST) |
+ BIT(ADIS16460_DIAG_STAT_OVERRANGE) |
+ BIT(ADIS16460_DIAG_STAT_SPI_COMM) |
+ BIT(ADIS16460_DIAG_STAT_FLASH_UPT),
+ .enable_irq = adis16460_enable_irq,
+};
+
+static int adis16460_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis16460 *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st = iio_priv(indio_dev);
+
+ st->chip_info = &adis16460_chip_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = &adis16460_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16460_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ adis16460_enable_irq(&st->adis, 0);
+
+ ret = adis16460_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ adis16460_debugfs_init(indio_dev);
+
+ return 0;
+
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+ return ret;
+}
+
+static int adis16460_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16460 *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16460_ids[] = {
+ { "adis16460", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adis16460_ids);
+
+static const struct of_device_id adis16460_of_match[] = {
+ { .compatible = "adi,adis16460" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adis16460_of_match);
+
+static struct spi_driver adis16460_driver = {
+ .driver = {
+ .name = "adis16460",
+ .of_match_table = adis16460_of_match,
+ },
+ .id_table = adis16460_ids,
+ .probe = adis16460_probe,
+ .remove = adis16460_remove,
+};
+module_spi_driver(adis16460_driver);
+
+MODULE_AUTHOR("Dragos Bogdan <dragos.bogdan@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16460 IMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 395f3bd7de0a..e4c4c12236a7 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -14,8 +14,9 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050/6500/9150 and
- ICM20608/20602 motion tracking devices over I2C.
+ This driver supports the Invensense MPU6000/6050/6500/6515,
+ MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
+ over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -25,7 +26,8 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6050/6500/9150 and
- ICM20608/20602 motion tracking devices over SPI.
+ This driver supports the Invensense MPU6000/6050/6500/6515,
+ MPU9150/9250/9255 and ICM20608/20602 motion tracking devices
+ over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 53a59957cc54..b17f060b52fc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
};
+static const unsigned long inv_mpu_scan_masks[] = {
+ /* 3-axis accel */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ /* 3-axis gyro */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 6-axis accel + gyro */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ 0,
+};
+
static const struct iio_chan_spec inv_icm20602_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
{
@@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = {
INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
};
+static const unsigned long inv_icm20602_scan_masks[] = {
+ /* 3-axis accel + temp (mandatory) */
+ BIT(INV_ICM20602_SCAN_ACCL_X)
+ | BIT(INV_ICM20602_SCAN_ACCL_Y)
+ | BIT(INV_ICM20602_SCAN_ACCL_Z)
+ | BIT(INV_ICM20602_SCAN_TEMP),
+ /* 3-axis gyro + temp (mandatory) */
+ BIT(INV_ICM20602_SCAN_GYRO_X)
+ | BIT(INV_ICM20602_SCAN_GYRO_Y)
+ | BIT(INV_ICM20602_SCAN_GYRO_Z)
+ | BIT(INV_ICM20602_SCAN_TEMP),
+ /* 6-axis accel + gyro + temp (mandatory) */
+ BIT(INV_ICM20602_SCAN_ACCL_X)
+ | BIT(INV_ICM20602_SCAN_ACCL_Y)
+ | BIT(INV_ICM20602_SCAN_ACCL_Z)
+ | BIT(INV_ICM20602_SCAN_GYRO_X)
+ | BIT(INV_ICM20602_SCAN_GYRO_Y)
+ | BIT(INV_ICM20602_SCAN_GYRO_Z)
+ | BIT(INV_ICM20602_SCAN_TEMP),
+ 0,
+};
+
/*
* The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
* INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
@@ -1096,10 +1137,9 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
if (result)
return result;
- result = devm_add_action(dev, inv_mpu_core_disable_regulator_action,
+ result = devm_add_action_or_reset(dev, inv_mpu_core_disable_regulator_action,
st);
if (result) {
- inv_mpu_core_disable_regulator_action(st);
dev_err(dev, "Failed to setup regulator cleanup action %d\n",
result);
return result;
@@ -1130,9 +1170,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
if (chip_type == INV_ICM20602) {
indio_dev->channels = inv_icm20602_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
+ indio_dev->available_scan_masks = inv_icm20602_scan_masks;
} else {
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
}
indio_dev->info = &mpu_info;
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 002a423eae52..77aa0e77212d 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -2,15 +2,17 @@
config IIO_ST_LSM6DSX
tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors"
- depends on (I2C || SPI)
+ depends on (I2C || SPI || I3C)
select IIO_BUFFER
select IIO_KFIFO_BUF
select IIO_ST_LSM6DSX_I2C if (I2C)
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
+ select IIO_ST_LSM6DSX_I3C if (I3C)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr
+ ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
+ ism330dhcx and the accelerometer/gyroscope of lsm9ds1.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
@@ -24,3 +26,8 @@ config IIO_ST_LSM6DSX_SPI
tristate
depends on IIO_ST_LSM6DSX
select REGMAP_SPI
+
+config IIO_ST_LSM6DSX_I3C
+ tristate
+ depends on IIO_ST_LSM6DSX
+ select REGMAP_I3C
diff --git a/drivers/iio/imu/st_lsm6dsx/Makefile b/drivers/iio/imu/st_lsm6dsx/Makefile
index 28cc67399d94..57cbcd67d64f 100644
--- a/drivers/iio/imu/st_lsm6dsx/Makefile
+++ b/drivers/iio/imu/st_lsm6dsx/Makefile
@@ -5,3 +5,4 @@ st_lsm6dsx-y := st_lsm6dsx_core.o st_lsm6dsx_buffer.o \
obj-$(CONFIG_IIO_ST_LSM6DSX) += st_lsm6dsx.o
obj-$(CONFIG_IIO_ST_LSM6DSX_I2C) += st_lsm6dsx_i2c.o
obj-$(CONFIG_IIO_ST_LSM6DSX_SPI) += st_lsm6dsx_spi.o
+obj-$(CONFIG_IIO_ST_LSM6DSX_I3C) += st_lsm6dsx_i3c.o
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index c14bf533b66b..80e42c7dbcbe 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -22,6 +22,9 @@
#define ST_ASM330LHH_DEV_NAME "asm330lhh"
#define ST_LSM6DSOX_DEV_NAME "lsm6dsox"
#define ST_LSM6DSR_DEV_NAME "lsm6dsr"
+#define ST_LSM6DS3TRC_DEV_NAME "lsm6ds3tr-c"
+#define ST_ISM330DHCX_DEV_NAME "ism330dhcx"
+#define ST_LSM9DS1_DEV_NAME "lsm9ds1-imu"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -33,6 +36,9 @@ enum st_lsm6dsx_hw_id {
ST_ASM330LHH_ID,
ST_LSM6DSOX_ID,
ST_LSM6DSR_ID,
+ ST_LSM6DS3TRC_ID,
+ ST_ISM330DHCX_ID,
+ ST_LSM9DS1_ID,
ST_LSM6DSX_MAX_ID,
};
@@ -54,8 +60,8 @@ enum st_lsm6dsx_hw_id {
.address = addr, \
.modified = 1, \
.channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = scan_idx, \
.scan_type = { \
@@ -71,6 +77,7 @@ struct st_lsm6dsx_reg {
u8 mask;
};
+struct st_lsm6dsx_sensor;
struct st_lsm6dsx_hw;
struct st_lsm6dsx_odr {
@@ -97,12 +104,14 @@ struct st_lsm6dsx_fs_table_entry {
/**
* struct st_lsm6dsx_fifo_ops - ST IMU FIFO settings
+ * @update_fifo: Update FIFO configuration callback.
* @read_fifo: Read FIFO callback.
* @fifo_th: FIFO threshold register info (addr + mask).
* @fifo_diff: FIFO diff status register info (addr + mask).
* @th_wl: FIFO threshold word length.
*/
struct st_lsm6dsx_fifo_ops {
+ int (*update_fifo)(struct st_lsm6dsx_sensor *sensor, bool enable);
int (*read_fifo)(struct st_lsm6dsx_hw *hw);
struct {
u8 addr;
@@ -196,8 +205,14 @@ struct st_lsm6dsx_ext_dev_settings {
/**
* struct st_lsm6dsx_settings - ST IMU sensor settings
* @wai: Sensor WhoAmI default value.
+ * @int1_addr: Control Register address for INT1
+ * @int2_addr: Control Register address for INT2
+ * @reset_addr: register address for reset/reboot
* @max_fifo_size: Sensor max fifo length in FIFO words.
* @id: List of hw id/device name supported by the driver configuration.
+ * @channels: IIO channels supported by the device.
+ * @odr_table: Hw sensors odr table (Hz + val).
+ * @fs_table: Hw sensors gain table (gain + val).
* @decimator: List of decimator register info (addr + mask).
* @batch: List of FIFO batching register info (addr + mask).
* @fifo_ops: Sensor hw FIFO parameters.
@@ -206,11 +221,20 @@ struct st_lsm6dsx_ext_dev_settings {
*/
struct st_lsm6dsx_settings {
u8 wai;
+ u8 int1_addr;
+ u8 int2_addr;
+ u8 reset_addr;
u16 max_fifo_size;
struct {
enum st_lsm6dsx_hw_id hw_id;
const char *name;
} id[ST_LSM6DSX_MAX_ID];
+ struct {
+ const struct iio_chan_spec *chan;
+ int len;
+ } channels[2];
+ struct st_lsm6dsx_odr_table_entry odr_table[2];
+ struct st_lsm6dsx_fs_table_entry fs_table[2];
struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
struct st_lsm6dsx_reg batch[ST_LSM6DSX_MAX_ID];
struct st_lsm6dsx_fifo_ops fifo_ops;
@@ -314,6 +338,7 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw);
int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val);
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor,
u16 watermark);
+int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable);
int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw);
int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index e4d8a79d557d..b0f3da1976e4 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -2,10 +2,10 @@
/*
* STMicroelectronics st_lsm6dsx FIFO buffer library driver
*
- * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC: The FIFO buffer can be
- * configured to store data from gyroscope and accelerometer. Samples are
- * queued without any tag according to a specific pattern based on
- * 'FIFO data sets' (6 bytes each):
+ * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+ * The FIFO buffer can be configured to store data from gyroscope and
+ * accelerometer. Samples are queued without any tag according to a
+ * specific pattern based on 'FIFO data sets' (6 bytes each):
* - 1st data set is reserved for gyroscope data
* - 2nd data set is reserved for accelerometer data
* The FIFO pattern changes depending on the ODRs and decimation factors
@@ -14,9 +14,10 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR: The FIFO buffer can be configured to
- * store data from gyroscope and accelerometer. Each sample is queued with
- * a tag (1B) indicating data source (gyroscope, accelerometer, hw timer).
+ * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX: The FIFO buffer can be
+ * configured to store data from gyroscope and accelerometer. Each sample
+ * is queued with a tag (1B) indicating data source (gyroscope, accelerometer,
+ * hw timer).
*
* FIFO supported modes:
* - BYPASS: FIFO disabled
@@ -601,9 +602,8 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
return err;
}
-static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
+int st_lsm6dsx_update_fifo(struct st_lsm6dsx_sensor *sensor, bool enable)
{
- struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
int err;
@@ -670,17 +670,29 @@ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
count = hw->settings->fifo_ops.read_fifo(hw);
mutex_unlock(&hw->fifo_lock);
- return !count ? IRQ_NONE : IRQ_HANDLED;
+ return count ? IRQ_HANDLED : IRQ_NONE;
}
static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
{
- return st_lsm6dsx_update_fifo(iio_dev, true);
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (!hw->settings->fifo_ops.update_fifo)
+ return -ENOTSUPP;
+
+ return hw->settings->fifo_ops.update_fifo(sensor, true);
}
static int st_lsm6dsx_buffer_postdisable(struct iio_dev *iio_dev)
{
- return st_lsm6dsx_update_fifo(iio_dev, false);
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (!hw->settings->fifo_ops.update_fifo)
+ return -ENOTSUPP;
+
+ return hw->settings->fifo_ops.update_fifo(sensor, false);
}
static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index a6702a74570e..2d3495560136 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -10,6 +10,8 @@
* +-125/+-245/+-500/+-1000/+-2000 dps
* LSM6DSx series has an integrated First-In-First-Out (FIFO) buffer
* allowing dynamic batching of sensor data.
+ * LSM9DSx series is similar but includes an additional magnetometer, handled
+ * by a different driver.
*
* Supported sensors:
* - LSM6DS3:
@@ -18,18 +20,25 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
- * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC:
+ * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
* - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX:
* - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
+ * - LSM9DS1:
+ * - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
+ * - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
+ * - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
+ * - Gyroscope supported full-scale [dps]: +-245/+-500/+-2000
+ * - FIFO size: 32
+ *
* Copyright 2016 STMicroelectronics Inc.
*
* Lorenzo Bianconi <lorenzo.bianconi@st.com>
@@ -49,79 +58,110 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_INT1_ADDR 0x0d
-#define ST_LSM6DSX_REG_INT2_ADDR 0x0e
#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
#define ST_LSM6DSX_REG_WHOAMI_ADDR 0x0f
-#define ST_LSM6DSX_REG_RESET_ADDR 0x12
#define ST_LSM6DSX_REG_RESET_MASK BIT(0)
#define ST_LSM6DSX_REG_BOOT_MASK BIT(7)
#define ST_LSM6DSX_REG_BDU_ADDR 0x12
#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
-#define ST_LSM6DSX_REG_INT2_ON_INT1_ADDR 0x13
-#define ST_LSM6DSX_REG_INT2_ON_INT1_MASK BIT(5)
-
-#define ST_LSM6DSX_REG_ACC_OUT_X_L_ADDR 0x28
-#define ST_LSM6DSX_REG_ACC_OUT_Y_L_ADDR 0x2a
-#define ST_LSM6DSX_REG_ACC_OUT_Z_L_ADDR 0x2c
-
-#define ST_LSM6DSX_REG_GYRO_OUT_X_L_ADDR 0x22
-#define ST_LSM6DSX_REG_GYRO_OUT_Y_L_ADDR 0x24
-#define ST_LSM6DSX_REG_GYRO_OUT_Z_L_ADDR 0x26
-
-static const struct st_lsm6dsx_odr_table_entry st_lsm6dsx_odr_table[] = {
- [ST_LSM6DSX_ID_ACC] = {
- .reg = {
- .addr = 0x10,
- .mask = GENMASK(7, 4),
- },
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
- },
- [ST_LSM6DSX_ID_GYRO] = {
- .reg = {
- .addr = 0x11,
- .mask = GENMASK(7, 4),
- },
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
- }
+
+static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
+ ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
-static const struct st_lsm6dsx_fs_table_entry st_lsm6dsx_fs_table[] = {
- [ST_LSM6DSX_ID_ACC] = {
- .reg = {
- .addr = 0x10,
- .mask = GENMASK(3, 2),
- },
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
- },
- [ST_LSM6DSX_ID_GYRO] = {
- .reg = {
- .addr = 0x11,
- .mask = GENMASK(3, 2),
- },
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
- }
+static const struct iio_chan_spec st_lsm6dsx_gyro_channels[] = {
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x22, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x24, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x26, IIO_MOD_Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static const struct iio_chan_spec st_lsm6ds0_gyro_channels[] = {
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x18, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x1a, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, 0x1c, IIO_MOD_Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
+ .wai = 0x68,
+ .int1_addr = 0x0c,
+ .int2_addr = 0x0d,
+ .reset_addr = 0x22,
+ .max_fifo_size = 32,
+ .id = {
+ {
+ .hw_id = ST_LSM9DS1_ID,
+ .name = ST_LSM9DS1_DEV_NAME,
+ },
+ },
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6ds0_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6ds0_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x20,
+ .mask = GENMASK(7, 5),
+ },
+ .odr_avl[0] = { 10, 0x01 },
+ .odr_avl[1] = { 50, 0x02 },
+ .odr_avl[2] = { 119, 0x03 },
+ .odr_avl[3] = { 238, 0x04 },
+ .odr_avl[4] = { 476, 0x05 },
+ .odr_avl[5] = { 952, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 5),
+ },
+ .odr_avl[0] = { 15, 0x01 },
+ .odr_avl[1] = { 60, 0x02 },
+ .odr_avl[2] = { 119, 0x03 },
+ .odr_avl[3] = { 238, 0x04 },
+ .odr_avl[4] = { 476, 0x05 },
+ .odr_avl[5] = { 952, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x20,
+ .mask = GENMASK(4, 3),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(4, 3),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+ },
+ },
+ },
+ {
.wai = 0x69,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 1365,
.id = {
{
@@ -129,6 +169,64 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_LSM6DS3_DEV_NAME,
},
},
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -140,6 +238,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_fifo,
.fifo_th = {
.addr = 0x06,
@@ -172,6 +271,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
{
.wai = 0x69,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 682,
.id = {
{
@@ -179,6 +281,64 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_LSM6DS3H_DEV_NAME,
},
},
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -190,6 +350,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_fifo,
.fifo_th = {
.addr = 0x06,
@@ -222,6 +383,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
{
.wai = 0x6a,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 682,
.id = {
{
@@ -233,6 +397,67 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
}, {
.hw_id = ST_ISM330DLC_ID,
.name = ST_ISM330DLC_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DS3TRC_ID,
+ .name = ST_LSM6DS3TRC_DEV_NAME,
+ },
+ },
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
},
},
.decimator = {
@@ -246,6 +471,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_fifo,
.fifo_th = {
.addr = 0x06,
@@ -278,6 +504,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
{
.wai = 0x6c,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 512,
.id = {
{
@@ -288,6 +517,64 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_LSM6DSOX_DEV_NAME,
},
},
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -299,6 +586,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_tagged_fifo,
.fifo_th = {
.addr = 0x07,
@@ -306,7 +594,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
.fifo_diff = {
.addr = 0x3a,
- .mask = GENMASK(8, 0),
+ .mask = GENMASK(9, 0),
},
.th_wl = 1,
},
@@ -349,6 +637,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
{
.wai = 0x6b,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 512,
.id = {
{
@@ -356,6 +647,64 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_ASM330LHH_DEV_NAME,
},
},
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -367,6 +716,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_tagged_fifo,
.fifo_th = {
.addr = 0x07,
@@ -374,7 +724,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
.fifo_diff = {
.addr = 0x3a,
- .mask = GENMASK(8, 0),
+ .mask = GENMASK(9, 0),
},
.th_wl = 1,
},
@@ -391,11 +741,75 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
{
.wai = 0x6b,
+ .int1_addr = 0x0d,
+ .int2_addr = 0x0e,
+ .reset_addr = 0x12,
.max_fifo_size = 512,
.id = {
{
.hw_id = ST_LSM6DSR_ID,
.name = ST_LSM6DSR_DEV_NAME,
+ }, {
+ .hw_id = ST_ISM330DHCX_ID,
+ .name = ST_ISM330DHCX_DEV_NAME,
+ },
+ },
+ .channels = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .chan = st_lsm6dsx_acc_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_acc_channels),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .chan = st_lsm6dsx_gyro_channels,
+ .len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
+ },
+ },
+ .odr_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(7, 4),
+ },
+ .odr_avl[0] = { 13, 0x01 },
+ .odr_avl[1] = { 26, 0x02 },
+ .odr_avl[2] = { 52, 0x03 },
+ .odr_avl[3] = { 104, 0x04 },
+ .odr_avl[4] = { 208, 0x05 },
+ .odr_avl[5] = { 416, 0x06 },
+ },
+ },
+ .fs_table = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .reg = {
+ .addr = 0x10,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .reg = {
+ .addr = 0x11,
+ .mask = GENMASK(3, 2),
+ },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
},
},
.batch = {
@@ -409,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
},
.fifo_ops = {
+ .update_fifo = st_lsm6dsx_update_fifo,
.read_fifo = st_lsm6dsx_read_tagged_fifo,
.fifo_th = {
.addr = 0x07,
@@ -416,7 +831,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
.fifo_diff = {
.addr = 0x3a,
- .mask = GENMASK(8, 0),
+ .mask = GENMASK(9, 0),
},
.th_wl = 1,
},
@@ -459,26 +874,6 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
},
};
-static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, ST_LSM6DSX_REG_ACC_OUT_X_L_ADDR,
- IIO_MOD_X, 0),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, ST_LSM6DSX_REG_ACC_OUT_Y_L_ADDR,
- IIO_MOD_Y, 1),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, ST_LSM6DSX_REG_ACC_OUT_Z_L_ADDR,
- IIO_MOD_Z, 2),
- IIO_CHAN_SOFT_TIMESTAMP(3),
-};
-
-static const struct iio_chan_spec st_lsm6dsx_gyro_channels[] = {
- ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, ST_LSM6DSX_REG_GYRO_OUT_X_L_ADDR,
- IIO_MOD_X, 0),
- ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, ST_LSM6DSX_REG_GYRO_OUT_Y_L_ADDR,
- IIO_MOD_Y, 1),
- ST_LSM6DSX_CHANNEL(IIO_ANGL_VEL, ST_LSM6DSX_REG_GYRO_OUT_Z_L_ADDR,
- IIO_MOD_Z, 2),
- IIO_CHAN_SOFT_TIMESTAMP(3),
-};
-
int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable)
{
const struct st_lsm6dsx_shub_settings *hub_settings;
@@ -533,23 +928,22 @@ static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id,
static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
u32 gain)
{
- struct st_lsm6dsx_hw *hw = sensor->hw;
- const struct st_lsm6dsx_reg *reg;
+ const struct st_lsm6dsx_fs_table_entry *fs_table;
unsigned int data;
int i, err;
- u8 val;
+ fs_table = &sensor->hw->settings->fs_table[sensor->id];
for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
- if (st_lsm6dsx_fs_table[sensor->id].fs_avl[i].gain == gain)
+ if (fs_table->fs_avl[i].gain == gain)
break;
if (i == ST_LSM6DSX_FS_LIST_SIZE)
return -EINVAL;
- val = st_lsm6dsx_fs_table[sensor->id].fs_avl[i].val;
- reg = &st_lsm6dsx_fs_table[sensor->id].reg;
- data = ST_LSM6DSX_SHIFT_VAL(val, reg->mask);
- err = st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask, data);
+ data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val,
+ fs_table->reg.mask);
+ err = st_lsm6dsx_update_bits_locked(sensor->hw, fs_table->reg.addr,
+ fs_table->reg.mask, data);
if (err < 0)
return err;
@@ -560,20 +954,22 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val)
{
+ const struct st_lsm6dsx_odr_table_entry *odr_table;
int i;
+ odr_table = &sensor->hw->settings->odr_table[sensor->id];
for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
/*
* ext devices can run at different odr respect to
* accel sensor
*/
- if (st_lsm6dsx_odr_table[sensor->id].odr_avl[i].hz >= odr)
+ if (odr_table->odr_avl[i].hz >= odr)
break;
if (i == ST_LSM6DSX_ODR_LIST_SIZE)
return -EINVAL;
- *val = st_lsm6dsx_odr_table[sensor->id].odr_avl[i].val;
+ *val = odr_table->odr_avl[i].val;
return 0;
}
@@ -638,7 +1034,7 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
return err;
}
- reg = &st_lsm6dsx_odr_table[ref_sensor->id].reg;
+ reg = &hw->settings->odr_table[ref_sensor->id].reg;
data = ST_LSM6DSX_SHIFT_VAL(val, reg->mask);
return st_lsm6dsx_update_bits_locked(hw, reg->addr, reg->mask, data);
}
@@ -783,11 +1179,12 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
enum st_lsm6dsx_sensor_id id = sensor->id;
+ struct st_lsm6dsx_hw *hw = sensor->hw;
int i, len = 0;
for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- st_lsm6dsx_odr_table[id].odr_avl[i].hz);
+ hw->settings->odr_table[id].odr_avl[i].hz);
buf[len - 1] = '\n';
return len;
@@ -798,12 +1195,19 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
char *buf)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
+ const struct st_lsm6dsx_fs_table_entry *fs_table;
enum st_lsm6dsx_sensor_id id = sensor->id;
+ struct st_lsm6dsx_hw *hw = sensor->hw;
int i, len = 0;
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
+ fs_table = &hw->settings->fs_table[id];
+ for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
+ if (!fs_table->fs_avl[i].gain)
+ break;
+
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- st_lsm6dsx_fs_table[id].fs_avl[i].gain);
+ fs_table->fs_avl[i].gain);
+ }
buf[len - 1] = '\n';
return len;
@@ -873,10 +1277,10 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
switch (drdy_pin) {
case 1:
- *drdy_reg = ST_LSM6DSX_REG_INT1_ADDR;
+ *drdy_reg = hw->settings->int1_addr;
break;
case 2:
- *drdy_reg = ST_LSM6DSX_REG_INT2_ADDR;
+ *drdy_reg = hw->settings->int2_addr;
break;
default:
dev_err(hw->dev, "unsupported data ready pin\n");
@@ -976,7 +1380,7 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
int err;
/* device sw reset */
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_RESET_ADDR,
+ err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
ST_LSM6DSX_REG_RESET_MASK,
FIELD_PREP(ST_LSM6DSX_REG_RESET_MASK, 1));
if (err < 0)
@@ -985,7 +1389,7 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
msleep(50);
/* reload trimming parameter */
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_RESET_ADDR,
+ err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
ST_LSM6DSX_REG_BOOT_MASK,
FIELD_PREP(ST_LSM6DSX_REG_BOOT_MASK, 1));
if (err < 0)
@@ -1033,28 +1437,24 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
iio_dev->modes = INDIO_DIRECT_MODE;
iio_dev->dev.parent = hw->dev;
iio_dev->available_scan_masks = st_lsm6dsx_available_scan_masks;
+ iio_dev->channels = hw->settings->channels[id].chan;
+ iio_dev->num_channels = hw->settings->channels[id].len;
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = st_lsm6dsx_odr_table[id].odr_avl[0].hz;
- sensor->gain = st_lsm6dsx_fs_table[id].fs_avl[0].gain;
+ sensor->odr = hw->settings->odr_table[id].odr_avl[0].hz;
+ sensor->gain = hw->settings->fs_table[id].fs_avl[0].gain;
sensor->watermark = 1;
switch (id) {
case ST_LSM6DSX_ID_ACC:
- iio_dev->channels = st_lsm6dsx_acc_channels;
- iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_acc_channels);
iio_dev->info = &st_lsm6dsx_acc_info;
-
scnprintf(sensor->name, sizeof(sensor->name), "%s_accel",
name);
break;
case ST_LSM6DSX_ID_GYRO:
- iio_dev->channels = st_lsm6dsx_gyro_channels;
- iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_gyro_channels);
iio_dev->info = &st_lsm6dsx_gyro_info;
-
scnprintf(sensor->name, sizeof(sensor->name), "%s_gyro",
name);
break;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index b3211e0ac07b..f52511059545 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -75,6 +75,18 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm6dsr",
.data = (void *)ST_LSM6DSR_ID,
},
+ {
+ .compatible = "st,lsm6ds3tr-c",
+ .data = (void *)ST_LSM6DS3TRC_ID,
+ },
+ {
+ .compatible = "st,ism330dhcx",
+ .data = (void *)ST_ISM330DHCX_ID,
+ },
+ {
+ .compatible = "st,lsm9ds1-imu",
+ .data = (void *)ST_LSM9DS1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -89,6 +101,9 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_ASM330LHH_DEV_NAME, ST_ASM330LHH_ID },
{ ST_LSM6DSOX_DEV_NAME, ST_LSM6DSOX_ID },
{ ST_LSM6DSR_DEV_NAME, ST_LSM6DSR_ID },
+ { ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
+ { ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
+ { ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
new file mode 100644
index 000000000000..57e633121bdc
--- /dev/null
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <vitor.soares@synopsys.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "st_lsm6dsx.h"
+
+static const struct i3c_device_id st_lsm6dsx_i3c_ids[] = {
+ I3C_DEVICE(0x0104, 0x006C, (void *)ST_LSM6DSO_ID),
+ I3C_DEVICE(0x0104, 0x006B, (void *)ST_LSM6DSR_ID),
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(i3c, st_lsm6dsx_i3c_ids);
+
+static int st_lsm6dsx_i3c_probe(struct i3c_device *i3cdev)
+{
+ struct regmap_config st_lsm6dsx_i3c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ const struct i3c_device_id *id = i3c_device_match_id(i3cdev,
+ st_lsm6dsx_i3c_ids);
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i3c(i3cdev, &st_lsm6dsx_i3c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&i3cdev->dev, "Failed to register i3c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return st_lsm6dsx_probe(&i3cdev->dev, 0, (uintptr_t)id->data, regmap);
+}
+
+static struct i3c_driver st_lsm6dsx_driver = {
+ .driver = {
+ .name = "st_lsm6dsx_i3c",
+ .pm = &st_lsm6dsx_pm_ops,
+ },
+ .probe = st_lsm6dsx_i3c_probe,
+ .id_table = st_lsm6dsx_i3c_ids,
+};
+module_i3c_driver(st_lsm6dsx_driver);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("STMicroelectronics st_lsm6dsx i3c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index c9d3c4711018..344b28dddebb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -75,6 +75,18 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm6dsr",
.data = (void *)ST_LSM6DSR_ID,
},
+ {
+ .compatible = "st,lsm6ds3tr-c",
+ .data = (void *)ST_LSM6DS3TRC_ID,
+ },
+ {
+ .compatible = "st,ism330dhcx",
+ .data = (void *)ST_ISM330DHCX_ID,
+ },
+ {
+ .compatible = "st,lsm9ds1-imu",
+ .data = (void *)ST_LSM9DS1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -89,6 +101,9 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_ASM330LHH_DEV_NAME, ST_ASM330LHH_ID },
{ ST_LSM6DSOX_DEV_NAME, ST_LSM6DSOX_ID },
{ ST_LSM6DSR_DEV_NAME, ST_LSM6DSR_ID },
+ { ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
+ { ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
+ { ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e3fd00b595d0..08d7e1ef2186 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -303,6 +303,7 @@ config MAX44000
config MAX44009
tristate "MAX44009 Ambient Light Sensor"
depends on I2C
+ select REGMAP_I2C
help
Say Y here if you want to build support for Maxim Integrated's
MAX44009 ambient light sensor device.
@@ -310,6 +311,16 @@ config MAX44009
To compile this driver as a module, choose M here:
the module will be called max44009.
+config NOA1305
+ tristate "ON Semiconductor NOA1305 ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build support for the ON Semiconductor
+ NOA1305 ambient light sensor.
+
+ To compile this driver as a module, choose M here:
+ The module will be called noa1305.
+
config OPT3001
tristate "Texas Instruments OPT3001 Light Sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index e40794fbb435..00d1f9b98f39 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_LV0104CS) += lv0104cs.o
obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_MAX44009) += max44009.o
+obj-$(CONFIG_NOA1305) += noa1305.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b09b8b60bd83..c5dfb9a6b5a1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1135,5 +1135,5 @@ static struct i2c_driver apds9960_driver = {
module_i2c_driver(apds9960_driver);
MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
-MODULE_DESCRIPTION("ADPS9960 Gesture/RGB/ALS/Proximity sensor");
+MODULE_DESCRIPTION("APDS9960 Gesture/RGB/ALS/Proximity sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 50f3438c2b49..0443861ba1ec 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -101,15 +101,16 @@ static int cm3323_init(struct iio_dev *indio_dev)
return 0;
}
-static void cm3323_disable(struct iio_dev *indio_dev)
+static void cm3323_disable(void *data)
{
int ret;
- struct cm3323_data *data = iio_priv(indio_dev);
+ struct iio_dev *indio_dev = data;
+ struct cm3323_data *cm_data = iio_priv(indio_dev);
- ret = i2c_smbus_write_word_data(data->client, CM3323_CMD_CONF,
+ ret = i2c_smbus_write_word_data(cm_data->client, CM3323_CMD_CONF,
CM3323_CONF_SD_BIT);
if (ret < 0)
- dev_err(&data->client->dev, "Error writing reg_conf\n");
+ dev_err(&cm_data->client->dev, "Error writing reg_conf\n");
}
static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
@@ -243,26 +244,11 @@ static int cm3323_probe(struct i2c_client *client,
return ret;
}
- ret = iio_device_register(indio_dev);
- if (ret < 0) {
- dev_err(&client->dev, "failed to register iio dev\n");
- goto err_init;
- }
-
- return 0;
-err_init:
- cm3323_disable(indio_dev);
- return ret;
-}
-
-static int cm3323_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- cm3323_disable(indio_dev);
+ ret = devm_add_action_or_reset(&client->dev, cm3323_disable, indio_dev);
+ if (ret < 0)
+ return ret;
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id cm3323_id[] = {
@@ -276,7 +262,6 @@ static struct i2c_driver cm3323_driver = {
.name = CM3323_DRV_NAME,
},
.probe = cm3323_probe,
- .remove = cm3323_remove,
.id_table = cm3323_id,
};
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 7702c2bcbcfa..1019d625adb1 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -646,18 +646,18 @@ static int cm36651_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
cm36651->client = client;
- cm36651->ps_client = i2c_new_dummy(client->adapter,
+ cm36651->ps_client = i2c_new_dummy_device(client->adapter,
CM36651_I2C_ADDR_PS);
- if (!cm36651->ps_client) {
+ if (IS_ERR(cm36651->ps_client)) {
dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(cm36651->ps_client);
goto error_disable_reg;
}
- cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
- if (!cm36651->ara_client) {
+ cm36651->ara_client = i2c_new_dummy_device(client->adapter, CM36651_ARA);
+ if (IS_ERR(cm36651->ara_client)) {
dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
- ret = -ENODEV;
+ ret = PTR_ERR(cm36651->ara_client);
goto error_i2c_unregister_ps;
}
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 308ee6ff2e22..44313a009928 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -42,7 +42,7 @@ static int cros_ec_light_prox_read(struct iio_dev *indio_dev,
struct cros_ec_light_prox_state *st = iio_priv(indio_dev);
u16 data = 0;
s64 val64;
- int ret = IIO_VAL_INT;
+ int ret;
int idx = chan->scan_index;
mutex_lock(&st->core.cmd_lock);
@@ -50,23 +50,22 @@ static int cros_ec_light_prox_read(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->type == IIO_PROXIMITY) {
- if (cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
- (s16 *)&data) < 0) {
- ret = -EIO;
+ ret = cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
+ (s16 *)&data);
+ if (ret)
break;
- }
*val = data;
+ ret = IIO_VAL_INT;
} else {
ret = -EINVAL;
}
break;
case IIO_CHAN_INFO_PROCESSED:
if (chan->type == IIO_LIGHT) {
- if (cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
- (s16 *)&data) < 0) {
- ret = -EIO;
+ ret = cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
+ (s16 *)&data);
+ if (ret)
break;
- }
/*
* The data coming from the light sensor is
* pre-processed and represents the ambient light
@@ -82,15 +81,16 @@ static int cros_ec_light_prox_read(struct iio_dev *indio_dev,
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
st->core.param.sensor_offset.flags = 0;
- if (cros_ec_motion_send_host_cmd(&st->core, 0)) {
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret)
break;
- }
/* Save values */
- st->core.calib[0] = st->core.resp->sensor_offset.offset[0];
+ st->core.calib[0].offset =
+ st->core.resp->sensor_offset.offset[0];
- *val = st->core.calib[idx];
+ *val = st->core.calib[idx].offset;
+ ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_CALIBSCALE:
/*
@@ -101,10 +101,9 @@ static int cros_ec_light_prox_read(struct iio_dev *indio_dev,
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
- if (cros_ec_motion_send_host_cmd(&st->core, 0)) {
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret)
break;
- }
val64 = st->core.resp->sensor_range.ret;
*val = val64 >> 16;
@@ -127,28 +126,27 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct cros_ec_light_prox_state *st = iio_priv(indio_dev);
- int ret = 0;
+ int ret;
int idx = chan->scan_index;
mutex_lock(&st->core.cmd_lock);
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
- st->core.calib[idx] = val;
+ st->core.calib[idx].offset = val;
/* Send to EC for each axis, even if not complete */
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
st->core.param.sensor_offset.flags = MOTION_SENSE_SET_OFFSET;
- st->core.param.sensor_offset.offset[0] = st->core.calib[0];
+ st->core.param.sensor_offset.offset[0] =
+ st->core.calib[0].offset;
st->core.param.sensor_offset.temp =
EC_MOTION_SENSE_INVALID_CALIB_TEMP;
- if (cros_ec_motion_send_host_cmd(&st->core, 0))
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
break;
case IIO_CHAN_INFO_CALIBSCALE:
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
st->core.param.sensor_range.data = (val << 16) | (val2 / 100);
- if (cros_ec_motion_send_host_cmd(&st->core, 0))
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
break;
default:
ret = cros_ec_sensors_core_write(&st->core, chan, val, val2,
@@ -164,6 +162,7 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_light_prox_info = {
.read_raw = &cros_ec_light_prox_read,
.write_raw = &cros_ec_light_prox_write,
+ .read_avail = &cros_ec_sensors_core_read_avail,
};
static int cros_ec_light_prox_probe(struct platform_device *pdev)
@@ -198,6 +197,8 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_FREQUENCY);
+ channel->info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
channel->scan_type.shift = 0;
@@ -205,8 +206,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
channel->ext_info = cros_ec_sensors_ext_info;
channel->scan_type.sign = 'u';
- state->core.calib[0] = 0;
-
/* Sensor specific */
switch (state->core.type) {
case MOTIONSENSE_TYPE_LIGHT:
diff --git a/drivers/iio/light/noa1305.c b/drivers/iio/light/noa1305.c
new file mode 100644
index 000000000000..5ebfbc52f541
--- /dev/null
+++ b/drivers/iio/light/noa1305.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Support for ON Semiconductor NOA1305 ambient light sensor
+ *
+ * Copyright (C) 2016 Emcraft Systems
+ * Copyright (C) 2019 Collabora Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define NOA1305_REG_POWER_CONTROL 0x0
+#define NOA1305_POWER_CONTROL_DOWN 0x00
+#define NOA1305_POWER_CONTROL_ON 0x08
+#define NOA1305_REG_RESET 0x1
+#define NOA1305_RESET_RESET 0x10
+#define NOA1305_REG_INTEGRATION_TIME 0x2
+#define NOA1305_INTEGR_TIME_800MS 0x00
+#define NOA1305_INTEGR_TIME_400MS 0x01
+#define NOA1305_INTEGR_TIME_200MS 0x02
+#define NOA1305_INTEGR_TIME_100MS 0x03
+#define NOA1305_INTEGR_TIME_50MS 0x04
+#define NOA1305_INTEGR_TIME_25MS 0x05
+#define NOA1305_INTEGR_TIME_12_5MS 0x06
+#define NOA1305_INTEGR_TIME_6_25MS 0x07
+#define NOA1305_REG_INT_SELECT 0x3
+#define NOA1305_INT_SEL_ACTIVE_HIGH 0x01
+#define NOA1305_INT_SEL_ACTIVE_LOW 0x02
+#define NOA1305_INT_SEL_INACTIVE 0x03
+#define NOA1305_REG_INT_THRESH_LSB 0x4
+#define NOA1305_REG_INT_THRESH_MSB 0x5
+#define NOA1305_REG_ALS_DATA_LSB 0x6
+#define NOA1305_REG_ALS_DATA_MSB 0x7
+#define NOA1305_REG_DEVICE_ID_LSB 0x8
+#define NOA1305_REG_DEVICE_ID_MSB 0x9
+
+#define NOA1305_DEVICE_ID 0x0519
+#define NOA1305_DRIVER_NAME "noa1305"
+
+struct noa1305_priv {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator *vin_reg;
+};
+
+static int noa1305_measure(struct noa1305_priv *priv)
+{
+ __le16 data;
+ int ret;
+
+ ret = regmap_bulk_read(priv->regmap, NOA1305_REG_ALS_DATA_LSB, &data,
+ 2);
+ if (ret < 0)
+ return ret;
+
+ return le16_to_cpu(data);
+}
+
+static int noa1305_scale(struct noa1305_priv *priv, int *val, int *val2)
+{
+ int data;
+ int ret;
+
+ ret = regmap_read(priv->regmap, NOA1305_REG_INTEGRATION_TIME, &data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Lux = count / (<Integration Constant> * <Integration Time>)
+ *
+ * Integration Constant = 7.7
+ * Integration Time in Seconds
+ */
+ switch (data) {
+ case NOA1305_INTEGR_TIME_800MS:
+ *val = 100;
+ *val2 = 77 * 8;
+ break;
+ case NOA1305_INTEGR_TIME_400MS:
+ *val = 100;
+ *val2 = 77 * 4;
+ break;
+ case NOA1305_INTEGR_TIME_200MS:
+ *val = 100;
+ *val2 = 77 * 2;
+ break;
+ case NOA1305_INTEGR_TIME_100MS:
+ *val = 100;
+ *val2 = 77;
+ break;
+ case NOA1305_INTEGR_TIME_50MS:
+ *val = 1000;
+ *val2 = 77 * 5;
+ break;
+ case NOA1305_INTEGR_TIME_25MS:
+ *val = 10000;
+ *val2 = 77 * 25;
+ break;
+ case NOA1305_INTEGR_TIME_12_5MS:
+ *val = 100000;
+ *val2 = 77 * 125;
+ break;
+ case NOA1305_INTEGR_TIME_6_25MS:
+ *val = 1000000;
+ *val2 = 77 * 625;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static const struct iio_chan_spec noa1305_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static int noa1305_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret = -EINVAL;
+ struct noa1305_priv *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = noa1305_measure(priv);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return noa1305_scale(priv, val, val2);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info noa1305_info = {
+ .read_raw = noa1305_read_raw,
+};
+
+static bool noa1305_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NOA1305_REG_POWER_CONTROL:
+ case NOA1305_REG_RESET:
+ case NOA1305_REG_INTEGRATION_TIME:
+ case NOA1305_REG_INT_SELECT:
+ case NOA1305_REG_INT_THRESH_LSB:
+ case NOA1305_REG_INT_THRESH_MSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config noa1305_regmap_config = {
+ .name = NOA1305_DRIVER_NAME,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NOA1305_REG_DEVICE_ID_MSB,
+ .writeable_reg = noa1305_writable_reg,
+};
+
+static void noa1305_reg_remove(void *data)
+{
+ struct noa1305_priv *priv = data;
+
+ regulator_disable(priv->vin_reg);
+}
+
+static int noa1305_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct noa1305_priv *priv;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ __le16 data;
+ unsigned int dev_id;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &noa1305_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Regmap initialization failed.\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv = iio_priv(indio_dev);
+
+ priv->vin_reg = devm_regulator_get(&client->dev, "vin");
+ if (IS_ERR(priv->vin_reg)) {
+ dev_err(&client->dev, "get regulator vin failed\n");
+ return PTR_ERR(priv->vin_reg);
+ }
+
+ ret = regulator_enable(priv->vin_reg);
+ if (ret) {
+ dev_err(&client->dev, "enable regulator vin failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&client->dev, noa1305_reg_remove, priv);
+ if (ret) {
+ dev_err(&client->dev, "addition of devm action failed\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ priv->client = client;
+ priv->regmap = regmap;
+
+ ret = regmap_bulk_read(regmap, NOA1305_REG_DEVICE_ID_LSB, &data, 2);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID reading failed: %d\n", ret);
+ return ret;
+ }
+
+ dev_id = le16_to_cpu(data);
+ if (dev_id != NOA1305_DEVICE_ID) {
+ dev_err(&client->dev, "Unknown device ID: 0x%x\n", dev_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_write(regmap, NOA1305_REG_POWER_CONTROL,
+ NOA1305_POWER_CONTROL_ON);
+ if (ret < 0) {
+ dev_err(&client->dev, "Enabling power control failed\n");
+ return ret;
+ }
+
+ ret = regmap_write(regmap, NOA1305_REG_RESET, NOA1305_RESET_RESET);
+ if (ret < 0) {
+ dev_err(&client->dev, "Device reset failed\n");
+ return ret;
+ }
+
+ ret = regmap_write(regmap, NOA1305_REG_INTEGRATION_TIME,
+ NOA1305_INTEGR_TIME_800MS);
+ if (ret < 0) {
+ dev_err(&client->dev, "Setting integration time failed\n");
+ return ret;
+ }
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &noa1305_info;
+ indio_dev->channels = noa1305_channels;
+ indio_dev->num_channels = ARRAY_SIZE(noa1305_channels);
+ indio_dev->name = NOA1305_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret)
+ dev_err(&client->dev, "registering device failed\n");
+
+ return ret;
+}
+
+static const struct of_device_id noa1305_of_match[] = {
+ { .compatible = "onnn,noa1305" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, noa1305_of_match);
+
+static const struct i2c_device_id noa1305_ids[] = {
+ { "noa1305", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, noa1305_ids);
+
+static struct i2c_driver noa1305_driver = {
+ .driver = {
+ .name = NOA1305_DRIVER_NAME,
+ .of_match_table = noa1305_of_match,
+ },
+ .probe = noa1305_probe,
+ .id_table = noa1305_ids,
+};
+
+module_i2c_driver(noa1305_driver);
+
+MODULE_AUTHOR("Sergei Miroshnichenko <sergeimir@emcraft.com>");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@collabora.com");
+MODULE_DESCRIPTION("ON Semiconductor NOA1305 ambient light sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 6579d2418814..982bba0c54e7 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -1261,7 +1261,7 @@ static int si1145_probe_trigger(struct iio_dev *indio_dev)
return ret;
}
- ret = iio_trigger_register(trig);
+ ret = devm_iio_trigger_register(&client->dev, trig);
if (ret)
return ret;
@@ -1271,16 +1271,6 @@ static int si1145_probe_trigger(struct iio_dev *indio_dev)
return 0;
}
-static void si1145_remove_trigger(struct iio_dev *indio_dev)
-{
- struct si1145_data *data = iio_priv(indio_dev);
-
- if (data->trig) {
- iio_trigger_unregister(data->trig);
- data->trig = NULL;
- }
-}
-
static int si1145_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1332,7 +1322,8 @@ static int si1145_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(&client->dev,
+ indio_dev, NULL,
si1145_trigger_handler, &si1145_buffer_setup_ops);
if (ret < 0)
return ret;
@@ -1340,23 +1331,12 @@ static int si1145_probe(struct i2c_client *client,
if (client->irq) {
ret = si1145_probe_trigger(indio_dev);
if (ret < 0)
- goto error_free_buffer;
+ return ret;
} else {
dev_info(&client->dev, "no irq, using polling\n");
}
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_free_trigger;
-
- return 0;
-
-error_free_trigger:
- si1145_remove_trigger(indio_dev);
-error_free_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id si1145_ids[] = {
@@ -1371,23 +1351,11 @@ static const struct i2c_device_id si1145_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, si1145_ids);
-static int si1145_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- si1145_remove_trigger(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
-}
-
static struct i2c_driver si1145_driver = {
.driver = {
.name = "si1145",
},
.probe = si1145_probe,
- .remove = si1145_remove,
.id_table = si1145_ids,
};
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index b955183edfe8..185c24a75ae6 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -679,9 +679,18 @@ static const struct acpi_device_id stk3310_acpi_id[] = {
MODULE_DEVICE_TABLE(acpi, stk3310_acpi_id);
+static const struct of_device_id stk3310_of_match[] = {
+ { .compatible = "sensortek,stk3310", },
+ { .compatible = "sensortek,stk3311", },
+ { .compatible = "sensortek,stk3335", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stk3310_of_match);
+
static struct i2c_driver stk3310_driver = {
.driver = {
.name = "stk3310",
+ .of_match_table = stk3310_of_match,
.pm = STK3310_PM_OPS,
.acpi_match_table = ACPI_PTR(stk3310_acpi_id),
},
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 83cece921843..be37fcbd4654 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -134,6 +134,12 @@ enum {
TSL2772_CHIP_SUSPENDED = 2
};
+enum {
+ TSL2772_SUPPLY_VDD = 0,
+ TSL2772_SUPPLY_VDDIO = 1,
+ TSL2772_NUM_SUPPLIES = 2
+};
+
/* Per-device data */
struct tsl2772_als_info {
u16 als_ch0;
@@ -161,8 +167,7 @@ struct tsl2772_chip {
struct mutex prox_mutex;
struct mutex als_mutex;
struct i2c_client *client;
- struct regulator *vdd_supply;
- struct regulator *vddio_supply;
+ struct regulator_bulk_data supplies[TSL2772_NUM_SUPPLIES];
u16 prox_data;
struct tsl2772_als_info als_cur_info;
struct tsl2772_settings settings;
@@ -697,46 +702,7 @@ static void tsl2772_disable_regulators_action(void *_data)
{
struct tsl2772_chip *chip = _data;
- regulator_disable(chip->vdd_supply);
- regulator_disable(chip->vddio_supply);
-}
-
-static int tsl2772_enable_regulator(struct tsl2772_chip *chip,
- struct regulator *regulator)
-{
- int ret;
-
- ret = regulator_enable(regulator);
- if (ret < 0) {
- dev_err(&chip->client->dev, "Failed to enable regulator: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static struct regulator *tsl2772_get_regulator(struct tsl2772_chip *chip,
- char *name)
-{
- struct regulator *regulator;
- int ret;
-
- regulator = devm_regulator_get(&chip->client->dev, name);
- if (IS_ERR(regulator)) {
- if (PTR_ERR(regulator) != -EPROBE_DEFER)
- dev_err(&chip->client->dev,
- "Failed to get %s regulator %d\n",
- name, (int)PTR_ERR(regulator));
-
- return regulator;
- }
-
- ret = tsl2772_enable_regulator(chip, regulator);
- if (ret < 0)
- return ERR_PTR(ret);
-
- return regulator;
+ regulator_bulk_disable(ARRAY_SIZE(chip->supplies), chip->supplies);
}
static int tsl2772_chip_on(struct iio_dev *indio_dev)
@@ -860,6 +826,13 @@ static int tsl2772_chip_off(struct iio_dev *indio_dev)
return tsl2772_write_control_reg(chip, 0x00);
}
+static void tsl2772_chip_off_action(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ tsl2772_chip_off(indio_dev);
+}
+
/**
* tsl2772_invoke_change - power cycle the device to implement the user
* parameters
@@ -1797,20 +1770,32 @@ static int tsl2772_probe(struct i2c_client *clientp,
chip->client = clientp;
i2c_set_clientdata(clientp, indio_dev);
- chip->vddio_supply = tsl2772_get_regulator(chip, "vddio");
- if (IS_ERR(chip->vddio_supply))
- return PTR_ERR(chip->vddio_supply);
+ chip->supplies[TSL2772_SUPPLY_VDD].supply = "vdd";
+ chip->supplies[TSL2772_SUPPLY_VDDIO].supply = "vddio";
+
+ ret = devm_regulator_bulk_get(&clientp->dev,
+ ARRAY_SIZE(chip->supplies),
+ chip->supplies);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&clientp->dev,
+ "Failed to get regulators: %d\n",
+ ret);
+
+ return ret;
+ }
- chip->vdd_supply = tsl2772_get_regulator(chip, "vdd");
- if (IS_ERR(chip->vdd_supply)) {
- regulator_disable(chip->vddio_supply);
- return PTR_ERR(chip->vdd_supply);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->supplies), chip->supplies);
+ if (ret < 0) {
+ dev_err(&clientp->dev, "Failed to enable regulators: %d\n",
+ ret);
+ return ret;
}
- ret = devm_add_action(&clientp->dev, tsl2772_disable_regulators_action,
- chip);
+ ret = devm_add_action_or_reset(&clientp->dev,
+ tsl2772_disable_regulators_action,
+ chip);
if (ret < 0) {
- tsl2772_disable_regulators_action(chip);
dev_err(&clientp->dev, "Failed to setup regulator cleanup action %d\n",
ret);
return ret;
@@ -1877,15 +1862,13 @@ static int tsl2772_probe(struct i2c_client *clientp,
if (ret < 0)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret) {
- tsl2772_chip_off(indio_dev);
- dev_err(&clientp->dev,
- "%s: iio registration failed\n", __func__);
+ ret = devm_add_action_or_reset(&clientp->dev,
+ tsl2772_chip_off_action,
+ indio_dev);
+ if (ret < 0)
return ret;
- }
- return 0;
+ return devm_iio_device_register(&clientp->dev, indio_dev);
}
static int tsl2772_suspend(struct device *dev)
@@ -1895,8 +1878,7 @@ static int tsl2772_suspend(struct device *dev)
int ret;
ret = tsl2772_chip_off(indio_dev);
- regulator_disable(chip->vdd_supply);
- regulator_disable(chip->vddio_supply);
+ regulator_bulk_disable(ARRAY_SIZE(chip->supplies), chip->supplies);
return ret;
}
@@ -1907,32 +1889,15 @@ static int tsl2772_resume(struct device *dev)
struct tsl2772_chip *chip = iio_priv(indio_dev);
int ret;
- ret = tsl2772_enable_regulator(chip, chip->vddio_supply);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->supplies), chip->supplies);
if (ret < 0)
return ret;
- ret = tsl2772_enable_regulator(chip, chip->vdd_supply);
- if (ret < 0) {
- regulator_disable(chip->vddio_supply);
- return ret;
- }
-
usleep_range(TSL2772_BOOT_MIN_SLEEP_TIME, TSL2772_BOOT_MAX_SLEEP_TIME);
return tsl2772_chip_on(indio_dev);
}
-static int tsl2772_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- tsl2772_chip_off(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static const struct i2c_device_id tsl2772_idtable[] = {
{ "tsl2571", tsl2571 },
{ "tsl2671", tsl2671 },
@@ -1979,7 +1944,6 @@ static struct i2c_driver tsl2772_driver = {
},
.id_table = tsl2772_idtable,
.probe = tsl2772_probe,
- .remove = tsl2772_remove,
};
module_i2c_driver(tsl2772_driver);
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index a3138e1b5803..0be553ad5989 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -158,10 +158,10 @@ static int veml6070_probe(struct i2c_client *client,
indio_dev->name = VEML6070_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->client2 = i2c_new_dummy(client->adapter, VEML6070_ADDR_DATA_LSB);
- if (!data->client2) {
+ data->client2 = i2c_new_dummy_device(client->adapter, VEML6070_ADDR_DATA_LSB);
+ if (IS_ERR(data->client2)) {
dev_err(&client->dev, "i2c device for second chip address failed\n");
- return -ENODEV;
+ return PTR_ERR(data->client2);
}
data->config = VEML6070_IT_10 | VEML6070_COMMAND_RSRVD |
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 7de10281ad9e..425cdd07b4e5 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -53,7 +53,7 @@
#define MMC35240_CTRL1_BW_SHIFT 0
#define MMC35240_WAIT_CHARGE_PUMP 50000 /* us */
-#define MMC53240_WAIT_SET_RESET 1000 /* us */
+#define MMC35240_WAIT_SET_RESET 1000 /* us */
/*
* Memsic OTP process code piece is put here for reference:
@@ -225,7 +225,7 @@ static int mmc35240_init(struct mmc35240_data *data)
ret = mmc35240_hw_set(data, true);
if (ret < 0)
return ret;
- usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
+ usleep_range(MMC35240_WAIT_SET_RESET, MMC35240_WAIT_SET_RESET + 1);
ret = mmc35240_hw_set(data, false);
if (ret < 0)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index d69ef9b2a731..204b285725c8 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -22,6 +22,7 @@
#define LIS2MDL_MAGN_DEV_NAME "lis2mdl"
#define LSM9DS1_MAGN_DEV_NAME "lsm9ds1_magn"
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
int st_magn_common_probe(struct iio_dev *indio_dev);
void st_magn_common_remove(struct iio_dev *indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index 11d7806655bc..bb425c167a96 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -32,39 +32,32 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *mdata = iio_priv(indio_dev);
-
- mdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (mdata->buffer_data == NULL) {
- err = -ENOMEM;
- goto allocate_memory_error;
- }
err = iio_triggered_buffer_postenable(indio_dev);
if (err < 0)
- goto st_magn_buffer_postenable_error;
+ return err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_magn_buffer_predisable;
- return st_sensors_set_enable(indio_dev, true);
+ return 0;
-st_magn_buffer_postenable_error:
- kfree(mdata->buffer_data);
-allocate_memory_error:
+st_magn_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
return err;
}
static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
{
- int err;
- struct st_sensor_data *mdata = iio_priv(indio_dev);
+ int err, err2;
err = st_sensors_set_enable(indio_dev, false);
- if (err < 0)
- goto st_magn_buffer_predisable_error;
- err = iio_triggered_buffer_predisable(indio_dev);
+ err2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!err)
+ err = err2;
-st_magn_buffer_predisable_error:
- kfree(mdata->buffer_data);
return err;
}
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 2f7a1dbcdeb3..a3a268ee2896 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -470,28 +469,41 @@ static const struct iio_trigger_ops st_magn_trigger_ops = {
#define ST_MAGN_TRIGGER_OPS NULL
#endif
+/*
+ * st_magn_get_settings() - get sensor settings from device name
+ * @name: device name buffer reference.
+ *
+ * Return: valid reference on success, NULL otherwise.
+ */
+const struct st_sensor_settings *st_magn_get_settings(const char *name)
+{
+ int index = st_sensors_get_settings_index(name,
+ st_magn_sensors_settings,
+ ARRAY_SIZE(st_magn_sensors_settings));
+ if (index < 0)
+ return NULL;
+
+ return &st_magn_sensors_settings[index];
+}
+EXPORT_SYMBOL(st_magn_get_settings);
+
int st_magn_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *mdata = iio_priv(indio_dev);
- int irq = mdata->get_irq_data_ready(indio_dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &magn_info;
- mutex_init(&mdata->tb.buf_lock);
err = st_sensors_power_enable(indio_dev);
if (err)
return err;
- err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_magn_sensors_settings),
- st_magn_sensors_settings);
+ err = st_sensors_verify_id(indio_dev);
if (err < 0)
goto st_magn_power_off;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
- mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
@@ -507,7 +519,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
if (err < 0)
goto st_magn_power_off;
- if (irq > 0) {
+ if (mdata->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_MAGN_TRIGGER_OPS);
if (err < 0)
@@ -524,7 +536,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
return 0;
st_magn_device_register_error:
- if (irq > 0)
+ if (mdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_magn_probe_trigger_error:
st_magn_deallocate_ring(indio_dev);
@@ -542,7 +554,7 @@ void st_magn_common_remove(struct iio_dev *indio_dev)
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (mdata->get_irq_data_ready(indio_dev) > 0)
+ if (mdata->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_magn_deallocate_ring(indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 4d014fd1aeb0..fdba480a12be 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -55,21 +55,33 @@ MODULE_DEVICE_TABLE(of, st_magn_of_match);
#endif
static int st_magn_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *mdata;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&client->dev, st_magn_of_match,
+ client->name, sizeof(client->name));
+
+ settings = st_magn_get_settings(client->name);
+ if (!settings) {
+ dev_err(&client->dev, "device name %s not recognized.\n",
+ client->name);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mdata));
if (!indio_dev)
return -ENOMEM;
mdata = iio_priv(indio_dev);
- st_sensors_of_name_probe(&client->dev, st_magn_of_match,
- client->name, sizeof(client->name));
+ mdata->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_i2c_configure(indio_dev, client, mdata);
+ err = st_sensors_i2c_configure(indio_dev, client);
+ if (err < 0)
+ return err;
err = st_magn_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 0d47070611b1..fbf909bde841 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -51,19 +51,31 @@ MODULE_DEVICE_TABLE(of, st_magn_of_match);
static int st_magn_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *mdata;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
+ spi->modalias, sizeof(spi->modalias));
+
+ settings = st_magn_get_settings(spi->modalias);
+ if (!settings) {
+ dev_err(&spi->dev, "device name %s not recognized.\n",
+ spi->modalias);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*mdata));
if (!indio_dev)
return -ENOMEM;
mdata = iio_priv(indio_dev);
+ mdata->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
- spi->modalias, sizeof(spi->modalias));
- st_sensors_spi_configure(indio_dev, spi, mdata);
+ err = st_sensors_spi_configure(indio_dev, spi);
+ if (err < 0)
+ return err;
err = st_magn_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index ebc7c72a5e36..4cac0173db8b 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -26,6 +26,17 @@ config DS1803
To compile this driver as a module, choose M here: the
module will be called ds1803.
+config MAX5432
+ tristate "Maxim MAX5432-MAX5435 Digital Potentiometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Maxim
+ MAX5432, MAX5433, MAX5434 and MAX5435 digital
+ potentiometer chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max5432.
+
config MAX5481
tristate "Maxim MAX5481-MAX5484 Digital Potentiometer driver"
depends on SPI
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 8ff55138cf12..091adf3cdd0b 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD5272) += ad5272.o
obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MAX5432) += max5432.o
obj-$(CONFIG_MAX5481) += max5481.o
obj-$(CONFIG_MAX5487) += max5487.o
obj-$(CONFIG_MCP4018) += mcp4018.o
diff --git a/drivers/iio/potentiometer/max5432.c b/drivers/iio/potentiometer/max5432.c
new file mode 100644
index 000000000000..641b1821fdf6
--- /dev/null
+++ b/drivers/iio/potentiometer/max5432.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Maxim Integrated MAX5432-MAX5435 digital potentiometer driver
+ * Copyright (C) 2019 Martin Kaiser <martin@kaiser.cx>
+ *
+ * Datasheet:
+ * https://datasheets.maximintegrated.com/en/ds/MAX5432-MAX5435.pdf
+ */
+
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+/* All chip variants have 32 wiper positions. */
+#define MAX5432_MAX_POS 31
+
+#define MAX5432_OHM_50K (50 * 1000)
+#define MAX5432_OHM_100K (100 * 1000)
+
+/* Update the volatile (currently active) setting. */
+#define MAX5432_CMD_VREG 0x11
+
+struct max5432_data {
+ struct i2c_client *client;
+ unsigned long ohm;
+};
+
+static const struct iio_chan_spec max5432_channels[] = {
+ {
+ .type = IIO_RESISTANCE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .address = MAX5432_CMD_VREG,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static int max5432_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max5432_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ if (unlikely(data->ohm > INT_MAX))
+ return -ERANGE;
+
+ *val = data->ohm;
+ *val2 = MAX5432_MAX_POS;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int max5432_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max5432_data *data = iio_priv(indio_dev);
+ u8 data_byte;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val < 0 || val > MAX5432_MAX_POS)
+ return -EINVAL;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ /* Wiper position is in bits D7-D3. (D2-D0 are don't care bits.) */
+ data_byte = val << 3;
+ return i2c_smbus_write_byte_data(data->client, chan->address,
+ data_byte);
+}
+
+static const struct iio_info max5432_info = {
+ .read_raw = max5432_read_raw,
+ .write_raw = max5432_write_raw,
+};
+
+static int max5432_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct max5432_data *data;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct max5432_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->ohm = (unsigned long)of_device_get_match_data(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &max5432_info;
+ indio_dev->channels = max5432_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max5432_channels);
+ indio_dev->name = client->name;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id max5432_dt_ids[] = {
+ { .compatible = "maxim,max5432", .data = (void *)MAX5432_OHM_50K },
+ { .compatible = "maxim,max5433", .data = (void *)MAX5432_OHM_100K },
+ { .compatible = "maxim,max5434", .data = (void *)MAX5432_OHM_50K },
+ { .compatible = "maxim,max5435", .data = (void *)MAX5432_OHM_100K },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max5432_dt_ids);
+
+static struct i2c_driver max5432_driver = {
+ .driver = {
+ .name = "max5432",
+ .of_match_table = of_match_ptr(max5432_dt_ids),
+ },
+ .probe = max5432_probe,
+};
+
+module_i2c_driver(max5432_driver);
+
+MODULE_AUTHOR("Martin Kaiser <martin@kaiser.cx>");
+MODULE_DESCRIPTION("max5432-max5435 digital potentiometers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 034ce98d6e97..70148624db64 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -39,26 +39,29 @@ static int cros_ec_baro_read(struct iio_dev *indio_dev,
{
struct cros_ec_baro_state *st = iio_priv(indio_dev);
u16 data = 0;
- int ret = IIO_VAL_INT;
+ int ret;
int idx = chan->scan_index;
mutex_lock(&st->core.cmd_lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
- (s16 *)&data) < 0)
- ret = -EIO;
+ ret = cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
+ (s16 *)&data);
+ if (ret)
+ break;
+
*val = data;
+ ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
- if (cros_ec_motion_send_host_cmd(&st->core, 0)) {
- ret = -EIO;
+ ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+ if (ret)
break;
- }
+
*val = st->core.resp->sensor_range.ret;
/* scale * in_pressure_raw --> kPa */
@@ -152,8 +155,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
channel->ext_info = cros_ec_sensors_ext_info;
channel->scan_type.sign = 'u';
- state->core.calib[0] = 0;
-
/* Sensor specific */
switch (state->core.type) {
case MOTIONSENSE_TYPE_BARO:
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index f00102577fd5..026ba15ef68f 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -243,10 +243,10 @@ static int hp03_probe(struct i2c_client *client,
* which has it's dedicated I2C address and contains
* the calibration constants for the sensor.
*/
- priv->eeprom_client = i2c_new_dummy(client->adapter, HP03_EEPROM_ADDR);
- if (!priv->eeprom_client) {
+ priv->eeprom_client = i2c_new_dummy_device(client->adapter, HP03_EEPROM_ADDR);
+ if (IS_ERR(priv->eeprom_client)) {
dev_err(dev, "New EEPROM I2C device failed\n");
- return -ENODEV;
+ return PTR_ERR(priv->eeprom_client);
}
priv->eeprom_regmap = regmap_init_i2c(priv->eeprom_client,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 6a720cfb5686..c2e47a6c3118 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -41,6 +41,7 @@ static const struct st_sensors_platform_data default_press_pdata = {
.drdy_int_pin = 1,
};
+const struct st_sensor_settings *st_press_get_settings(const char *name);
int st_press_common_probe(struct iio_dev *indio_dev);
void st_press_common_remove(struct iio_dev *indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index 4566e08a64a1..418dbf9e6e1e 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -29,52 +29,39 @@ int st_press_trig_set_state(struct iio_trigger *trig, bool state)
return st_sensors_set_dataready_irq(indio_dev, state);
}
-static int st_press_buffer_preenable(struct iio_dev *indio_dev)
-{
- return st_sensors_set_enable(indio_dev, true);
-}
-
static int st_press_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
- struct st_sensor_data *press_data = iio_priv(indio_dev);
-
- press_data->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (press_data->buffer_data == NULL) {
- err = -ENOMEM;
- goto allocate_memory_error;
- }
err = iio_triggered_buffer_postenable(indio_dev);
if (err < 0)
- goto st_press_buffer_postenable_error;
+ return err;
- return err;
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_press_buffer_predisable;
-st_press_buffer_postenable_error:
- kfree(press_data->buffer_data);
-allocate_memory_error:
+ return 0;
+
+st_press_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
return err;
}
static int st_press_buffer_predisable(struct iio_dev *indio_dev)
{
- int err;
- struct st_sensor_data *press_data = iio_priv(indio_dev);
-
- err = iio_triggered_buffer_predisable(indio_dev);
- if (err < 0)
- goto st_press_buffer_predisable_error;
+ int err, err2;
err = st_sensors_set_enable(indio_dev, false);
-st_press_buffer_predisable_error:
- kfree(press_data->buffer_data);
+ err2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!err)
+ err = err2;
+
return err;
}
static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
- .preenable = &st_press_buffer_preenable,
.postenable = &st_press_buffer_postenable,
.predisable = &st_press_buffer_predisable,
};
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index b960e76f7dfd..ca6863b32a5f 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -664,25 +663,39 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
#define ST_PRESS_TRIGGER_OPS NULL
#endif
+/*
+ * st_press_get_settings() - get sensor settings from device name
+ * @name: device name buffer reference.
+ *
+ * Return: valid reference on success, NULL otherwise.
+ */
+const struct st_sensor_settings *st_press_get_settings(const char *name)
+{
+ int index = st_sensors_get_settings_index(name,
+ st_press_sensors_settings,
+ ARRAY_SIZE(st_press_sensors_settings));
+ if (index < 0)
+ return NULL;
+
+ return &st_press_sensors_settings[index];
+}
+EXPORT_SYMBOL(st_press_get_settings);
+
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
struct st_sensors_platform_data *pdata =
(struct st_sensors_platform_data *)press_data->dev->platform_data;
- int irq = press_data->get_irq_data_ready(indio_dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &press_info;
- mutex_init(&press_data->tb.buf_lock);
err = st_sensors_power_enable(indio_dev);
if (err)
return err;
- err = st_sensors_check_device_support(indio_dev,
- ARRAY_SIZE(st_press_sensors_settings),
- st_press_sensors_settings);
+ err = st_sensors_verify_id(indio_dev);
if (err < 0)
goto st_press_power_off;
@@ -693,7 +706,6 @@ int st_press_common_probe(struct iio_dev *indio_dev)
* element.
*/
press_data->num_data_channels = press_data->sensor_settings->num_ch - 1;
- press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
@@ -716,7 +728,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
if (err < 0)
goto st_press_power_off;
- if (irq > 0) {
+ if (press_data->irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
ST_PRESS_TRIGGER_OPS);
if (err < 0)
@@ -733,7 +745,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
return err;
st_press_device_register_error:
- if (irq > 0)
+ if (press_data->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_press_probe_trigger_error:
st_press_deallocate_ring(indio_dev);
@@ -751,7 +763,7 @@ void st_press_common_remove(struct iio_dev *indio_dev)
st_sensors_power_disable(indio_dev);
iio_device_unregister(indio_dev);
- if (press_data->get_irq_data_ready(indio_dev) > 0)
+ if (press_data->irq > 0)
st_sensors_deallocate_trigger(indio_dev);
st_press_deallocate_ring(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index b7d9ba706abc..71d2ed6b4948 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -78,18 +78,13 @@ static const struct i2c_device_id st_press_id_table[] = {
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
static int st_press_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *press_data;
+ struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data));
- if (!indio_dev)
- return -ENOMEM;
-
- press_data = iio_priv(indio_dev);
-
if (client->dev.of_node) {
st_sensors_of_name_probe(&client->dev, st_press_of_match,
client->name, sizeof(client->name));
@@ -99,11 +94,27 @@ static int st_press_i2c_probe(struct i2c_client *client,
return -ENODEV;
strlcpy(client->name, st_press_id_table[ret].name,
- sizeof(client->name));
+ sizeof(client->name));
} else if (!id)
return -ENODEV;
- st_sensors_i2c_configure(indio_dev, client, press_data);
+ settings = st_press_get_settings(client->name);
+ if (!settings) {
+ dev_err(&client->dev, "device name %s not recognized.\n",
+ client->name);
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ press_data = iio_priv(indio_dev);
+ press_data->sensor_settings = (struct st_sensor_settings *)settings;
+
+ ret = st_sensors_i2c_configure(indio_dev, client);
+ if (ret < 0)
+ return ret;
ret = st_press_common_probe(indio_dev);
if (ret < 0)
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index ef61401c41d3..7c8b70221e70 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -61,19 +61,31 @@ MODULE_DEVICE_TABLE(of, st_press_of_match);
static int st_press_spi_probe(struct spi_device *spi)
{
- struct iio_dev *indio_dev;
+ const struct st_sensor_settings *settings;
struct st_sensor_data *press_data;
+ struct iio_dev *indio_dev;
int err;
+ st_sensors_of_name_probe(&spi->dev, st_press_of_match,
+ spi->modalias, sizeof(spi->modalias));
+
+ settings = st_press_get_settings(spi->modalias);
+ if (!settings) {
+ dev_err(&spi->dev, "device name %s not recognized.\n",
+ spi->modalias);
+ return -ENODEV;
+ }
+
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*press_data));
- if (indio_dev == NULL)
+ if (!indio_dev)
return -ENOMEM;
press_data = iio_priv(indio_dev);
+ press_data->sensor_settings = (struct st_sensor_settings *)settings;
- st_sensors_of_name_probe(&spi->dev, st_press_of_match,
- spi->modalias, sizeof(spi->modalias));
- st_sensors_spi_configure(indio_dev, spi, press_data);
+ err = st_sensors_spi_configure(indio_dev, spi);
+ if (err < 0)
+ return err;
err = st_press_common_probe(indio_dev);
if (err < 0)
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 6b5cce6f1a7b..d53601447da4 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -62,7 +62,7 @@ config RFD77402
tristate "RFD77402 ToF sensor"
depends on I2C
help
- Say Y to build a driver for the RFD77420 Time-of-Flight (distance)
+ Say Y to build a driver for the RFD77402 Time-of-Flight (distance)
sensor module with I2C interface.
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index c613a64c017f..2ab68282d0b6 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -230,31 +230,13 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
data->spi = spi;
data->chip = chip;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(&spi->dev,
+ indio_dev, NULL,
maxim_thermocouple_trigger_handler, NULL);
if (ret)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_buffer;
-
- return 0;
-
-error_unreg_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int maxim_thermocouple_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id maxim_thermocouple_id[] = {
@@ -277,7 +259,6 @@ static struct spi_driver maxim_thermocouple_driver = {
.of_match_table = maxim_thermocouple_of_match,
},
.probe = maxim_thermocouple_probe,
- .remove = maxim_thermocouple_remove,
.id_table = maxim_thermocouple_id,
};
module_spi_driver(maxim_thermocouple_driver);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index ccf1ce653b25..a5dfe65cd9b9 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -608,86 +608,6 @@ static const struct iio_enum stm32_enable_mode_enum = {
.get = stm32_get_enable_mode
};
-static const char *const stm32_quadrature_modes[] = {
- "channel_A",
- "channel_B",
- "quadrature",
-};
-
-static int stm32_set_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- unsigned int mode)
-{
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
-
- regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, mode + 1);
-
- return 0;
-}
-
-static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
- u32 smcr;
- int mode;
-
- regmap_read(priv->regmap, TIM_SMCR, &smcr);
- mode = (smcr & TIM_SMCR_SMS) - 1;
- if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
- return -EINVAL;
-
- return mode;
-}
-
-static const struct iio_enum stm32_quadrature_mode_enum = {
- .items = stm32_quadrature_modes,
- .num_items = ARRAY_SIZE(stm32_quadrature_modes),
- .set = stm32_set_quadrature_mode,
- .get = stm32_get_quadrature_mode
-};
-
-static const char *const stm32_count_direction_states[] = {
- "up",
- "down"
-};
-
-static int stm32_set_count_direction(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- unsigned int dir)
-{
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
- u32 val;
- int mode;
-
- /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
- regmap_read(priv->regmap, TIM_SMCR, &val);
- mode = (val & TIM_SMCR_SMS) - 1;
- if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
- return -EBUSY;
-
- return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
- dir ? TIM_CR1_DIR : 0);
-}
-
-static int stm32_get_count_direction(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
-{
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
- u32 cr1;
-
- regmap_read(priv->regmap, TIM_CR1, &cr1);
-
- return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
-}
-
-static const struct iio_enum stm32_count_direction_enum = {
- .items = stm32_count_direction_states,
- .num_items = ARRAY_SIZE(stm32_count_direction_states),
- .set = stm32_set_count_direction,
- .get = stm32_get_count_direction
-};
-
static ssize_t stm32_count_get_preset(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
@@ -728,10 +648,6 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
.read = stm32_count_get_preset,
.write = stm32_count_set_preset
},
- IIO_ENUM("count_direction", IIO_SEPARATE, &stm32_count_direction_enum),
- IIO_ENUM_AVAILABLE("count_direction", &stm32_count_direction_enum),
- IIO_ENUM("quadrature_mode", IIO_SEPARATE, &stm32_quadrature_mode_enum),
- IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_quadrature_mode_enum),
IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum),
IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
if (ret)
goto err;
- cma_configfs_init();
+ ret = cma_configfs_init();
+ if (ret)
+ goto err_ib;
return 0;
+err_ib:
+ ib_unregister_client(&cma_client);
err:
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 888d89ce81df..beee7b7e0d9a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_udata *udata,
struct ib_uobject *uobj)
{
+ enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
+ bool is_xrc;
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
* and more importantly they are created internaly by driver,
* see mlx5 create_dev_resources() as an example.
*/
- if (attr->qp_type < IB_QPT_XRC_INI) {
+ is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
+ if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
qp->res.type = RDMA_RESTRACK_QP;
if (uobj)
rdma_restrack_uadd(&qp->res);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 01faef7bc061..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
int ret;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
mutex_lock(&port_counter->lock);
if (on) {
ret = __counter_set_mode(&port_counter->mode,
@@ -146,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res))
+ if (!rdma_is_visible_in_pid_ns(&qp->res))
return false;
- /* Ensure that counter belong to right PID */
- if (!rdma_is_kernel_res(&counter->res) &&
- !rdma_is_kernel_res(&qp->res) &&
- (task_pid_vnr(counter->res.task) != current->pid))
+ /* Ensure that counter belongs to the right PID */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -393,6 +394,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
u64 sum;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return 0;
+
sum = get_running_counters_hwstat_sum(dev, port, index);
sum += port_counter->hstats->value[index];
@@ -418,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
return qp;
err:
- rdma_restrack_put(&qp->res);
+ rdma_restrack_put(res);
return NULL;
}
@@ -506,6 +510,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
+ if (!dev->port_data[port].port_counter.hstats)
+ return -EOPNOTSUPP;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
@@ -594,7 +601,7 @@ void rdma_counter_init(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats || !dev->port_data)
+ if (!dev->port_data)
return;
rdma_for_each_port(dev, port) {
@@ -602,6 +609,9 @@ void rdma_counter_init(struct ib_device *dev)
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
mutex_init(&port_counter->lock);
+ if (!dev->ops.alloc_hw_stats)
+ continue;
+
port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
if (!port_counter->hstats)
goto fail;
@@ -624,9 +634,6 @@ void rdma_counter_release(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats)
- return;
-
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9773145dee09..ea8661a00651 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(devices_rwsem);
#define DEVICE_REGISTERED XA_MARK_1
-static LIST_HEAD(client_list);
+static u32 highest_client_id;
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(clients_rwsem);
+static void ib_client_put(struct ib_client *client)
+{
+ if (refcount_dec_and_test(&client->uses))
+ complete(&client->uses_zero);
+}
+
/*
* If client_data is registered then the corresponding client must also still
* be registered.
@@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device,
down_write(&device->client_data_rwsem);
/*
+ * So long as the client is registered hold both the client and device
+ * unregistration locks.
+ */
+ if (!refcount_inc_not_zero(&client->uses))
+ goto out_unlock;
+ refcount_inc(&device->refcount);
+
+ /*
* Another caller to add_client_context got here first and has already
* completely initialized context.
*/
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
return 0;
out:
+ ib_device_put(device);
+ ib_client_put(client);
+out_unlock:
up_write(&device->client_data_rwsem);
return ret;
}
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
client_data = xa_load(&device->client_data, client_id);
xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
client = xa_load(&clients, client_id);
- downgrade_write(&device->client_data_rwsem);
+ up_write(&device->client_data_rwsem);
/*
* Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
*
* For this reason clients and drivers should not call the
* unregistration functions will holdling any locks.
- *
- * It tempting to drop the client_data_rwsem too, but this is required
- * to ensure that unregister_client does not return until all clients
- * are completely unregistered, which is required to avoid module
- * unloading races.
*/
if (client->remove)
client->remove(device, client_data);
xa_erase(&device->client_data, client_id);
- up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
}
static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
static void disable_device(struct ib_device *device)
{
- struct ib_client *client;
+ u32 cid;
WARN_ON(!refcount_read(&device->refcount));
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
up_write(&devices_rwsem);
+ /*
+ * Remove clients in LIFO order, see assign_client_id. This could be
+ * more efficient if xarray learns to reverse iterate. Since no new
+ * clients can be added to this ib_device past this point we only need
+ * the maximum possible client_id value here.
+ */
down_read(&clients_rwsem);
- list_for_each_entry_reverse(client, &client_list, list)
- remove_client_context(device, client->client_id);
+ cid = highest_client_id;
up_read(&clients_rwsem);
+ while (cid) {
+ cid--;
+ remove_client_context(device, cid);
+ }
/* Pairs with refcount_set in enable_device */
ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
- * registration order, and retain a linked list we can reverse iterate
- * to get the LIFO order. The extra linked list can go away if xarray
- * learns to reverse iterate.
+ * registration order.
*/
- if (list_empty(&client_list)) {
- client->client_id = 0;
- } else {
- struct ib_client *last;
-
- last = list_last_entry(&client_list, struct ib_client, list);
- client->client_id = last->client_id + 1;
- }
+ client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
goto out;
+ highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
- list_add_tail(&client->list, &client_list);
out:
up_write(&clients_rwsem);
return ret;
}
+static void remove_client_id(struct ib_client *client)
+{
+ down_write(&clients_rwsem);
+ xa_erase(&clients, client->client_id);
+ for (; highest_client_id; highest_client_id--)
+ if (xa_load(&clients, highest_client_id - 1))
+ break;
+ up_write(&clients_rwsem);
+}
+
/**
* ib_register_client - Register an IB client
* @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
unsigned long index;
int ret;
+ refcount_set(&client->uses, 1);
+ init_completion(&client->uses_zero);
ret = assign_client_id(client);
if (ret)
return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
unsigned long index;
down_write(&clients_rwsem);
+ ib_client_put(client);
xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&clients_rwsem);
- /*
- * Every device still known must be serialized to make sure we are
- * done with the client callbacks before we return.
- */
- down_read(&devices_rwsem);
- xa_for_each (&devices, index, device)
+
+ /* We do not want to have locks while calling client->remove() */
+ rcu_read_lock();
+ xa_for_each (&devices, index, device) {
+ if (!ib_device_try_get(device))
+ continue;
+ rcu_read_unlock();
+
remove_client_context(device, client->client_id);
- up_read(&devices_rwsem);
- down_write(&clients_rwsem);
- list_del(&client->list);
- xa_erase(&clients, client->client_id);
- up_write(&clients_rwsem);
+ ib_device_put(device);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ /*
+ * remove_client_context() is not a fence, it can return even though a
+ * removal is ongoing. Wait until all removals are completed.
+ */
+ wait_for_completion(&client->uses_zero);
+ remove_client_id(client);
}
EXPORT_SYMBOL(ib_unregister_client);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cc99479b2c09..9947d16edef2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
+ port_priv->pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(port_priv->pd)) {
+ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+ ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
- port_priv->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(port_priv->pd)) {
- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
- ret = PTR_ERR(port_priv->pd);
goto error4;
}
@@ -3278,11 +3278,11 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dealloc_pd(port_priv->pd);
ib_free_cq(port_priv->cq);
+ ib_dealloc_pd(port_priv->pd);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
- curr = rdma_restrack_count(device, i,
- task_active_pid_ns(current));
+ curr = rdma_restrack_count(device, i);
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
@@ -1952,12 +1951,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+ ret = -EMSGSIZE;
goto err_msg;
+ }
nlmsg_end(msg, nlh);
ib_device_put(device);
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
* rdma_restrack_count() - the current usage of specific object
* @dev: IB device
* @type: actual type of object to operate
- * @ns: PID namespace
*/
-int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
- struct pid_namespace *ns)
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
{
struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
xa_lock(&rt->xa);
xas_for_each(&xas, e, U32_MAX) {
- if (ns == &init_pid_ns ||
- (!rdma_is_kernel_res(e) &&
- ns == task_active_pid_ns(e->task)))
- cnt++;
+ if (!rdma_is_visible_in_pid_ns(e))
+ continue;
+ cnt++;
}
xa_unlock(&rt->xa);
return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
*/
if (rdma_is_kernel_res(res))
return task_active_pid_ns(current) == &init_pid_ns;
- return task_active_pid_ns(current) == task_active_pid_ns(res->task);
+
+ /* PID 0 means that resource is not found in current namespace */
+ return task_pid_vnr(res->task);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem)
{
- int i;
- int n;
+ int i, n = 0;
struct scatterlist *sg;
- if (umem->is_odp)
- return ib_umem_num_pages(umem);
-
- n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
n += sg_dma_len(sg) >> PAGE_SHIFT;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
* prevent any further fault handling on this MR.
*/
ib_umem_notifier_start_account(umem_odp);
- umem_odp->dying = 1;
- /* Make sure that the fact the umem is dying is out before we release
- * all pending page faults. */
- smp_wmb();
complete_all(&umem_odp->notifier_completion);
umem_odp->umem.context->invalidate_range(
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 9f8a48016b41..ffdeaf6e0b68 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a91653aabf38..098ab883733e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
/* Delete the entry from the hardware */
ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
- gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
* or via the ib_unregister_device path. In the former case QP1
* may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
}
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EBUSY;
}
+
+ size = req->cmd_size;
+ /* change the cmd_size to the number of 16byte cmdq unit.
+ * req->cmd_size is modified here
+ */
+ bnxt_qplib_set_cmd_slots(req);
+
memset(resp, 0, sizeof(*resp));
crsqe->resp = (struct creq_qp_event *)resp;
crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
- size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
- (req).cmd_size = (sizeof((req)) + \
- BNXT_QPLIB_CMDQE_UNITS - 1) / \
- BNXT_QPLIB_CMDQE_UNITS; \
+ (req).cmd_size = sizeof((req)); \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
BNXT_QPLIB_CMDQE_UNITS);
}
+/* Set the cmd_size to a factor of CMDQE unit */
+static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
+{
+ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+}
+
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 37928b1111df..bdbde8e22420 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl,
u16 max)
{
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
if (!sgid_tbl->tbl)
return -ENOMEM;
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
for (i = 0; i < sgid_tbl->max; i++) {
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
}
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct net_device *netdev)
{
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 30c42c92fac7..fbda11a7ab1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
};
struct bnxt_qplib_sgid_tbl {
- struct bnxt_qplib_gid *tbl;
+ struct bnxt_qplib_gid_info *tbl;
u16 *hw_id;
u16 max;
u16 active;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 48793d3512ac..40296b97d21e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
index, sgid_tbl->max);
return -EINVAL;
}
- memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
return 0;
}
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update)
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
- if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
break;
}
if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
if (rc)
return rc;
}
- memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
- if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
dev_dbg(&res->pdev->dev,
"SGID entry already exist in entry %d!\n", i);
*index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
sgid_tbl->active++;
if (vlan_id != 0xFFFF)
sgid_tbl->vlan[free_idx] = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 0ec3b12b0bcd..13d9432d5ce2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
u8 data[16];
};
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update);
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d5b643a1d9fd..67052dc3100c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
write_csr(dd, RCV_ERR_MASK, ~0ull);
rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
+ return 0;
}
static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
goto bail_cleanup;
/* set initial RXE CSRs */
- init_rxe(dd);
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* set initial TXE CSRs */
init_txe(dd);
/* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
if (!data)
return -ENOMEM;
copy = min(len, datalen - 1);
- if (copy_from_user(data, buf, copy))
- return -EFAULT;
+ if (copy_from_user(data, buf, copy)) {
+ ret = -EFAULT;
+ goto free_data;
+ }
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
ptr = data;
token = ptr;
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
ret = len;
debugfs_file_put(file->f_path.dentry);
+free_data:
kfree(data);
return ret;
}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
return -ENOMEM;
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
bit = find_first_bit(fault->opcodes, bitsize);
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
data[size - 1] = '\n';
data[size] = '\0';
ret = simple_read_from_buffer(buf, len, pos, data, size);
+free_data:
kfree(data);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0477c14633ab..024a7c2b6124 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92acccaaaa86..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
flows[i].req = req;
flows[i].npagesets = 0;
flows[i].pagesets[0].mapped = 0;
+ flows[i].resync_npkts = 0;
}
req->flows = flows;
return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
return NULL;
}
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
- u32 psn, u16 *fidx)
-{
- for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
- tail = CIRC_NEXT(tail, MAX_FLOWS)) {
- struct tid_rdma_flow *flow = &req->flows[tail];
- u32 spsn, lpsn;
-
- spsn = full_flow_psn(flow, flow->flow_state.spsn);
- lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
- if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
- if (fidx)
- *fidx = tail;
- return flow;
- }
- }
- return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
- u32 psn, u16 *fidx)
-{
- return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
- fidx);
-}
-
/* TID RDMA READ functions */
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
@@ -2601,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
hfi1_kern_clear_hw_flow(priv->rcd, qp);
}
-static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet, u8 rcv_type,
- u8 opcode)
+static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
{
struct rvt_qp *qp = packet->qp;
- struct hfi1_qp_priv *qpriv = qp->priv;
- u32 ipsn;
- struct ib_other_headers *ohdr = packet->ohdr;
- struct rvt_ack_entry *e;
- struct tid_rdma_request *req;
- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
- u32 i;
if (rcv_type >= RHF_RCV_TYPE_IB)
goto done;
@@ -2629,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
if (rcv_type == RHF_RCV_TYPE_EAGER) {
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
- goto done_unlock;
- }
-
- /*
- * For TID READ response, error out QP after freeing the tid
- * resources.
- */
- if (opcode == TID_OP(READ_RESP)) {
- ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
- if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
- cmp_psn(ipsn, qp->s_psn) < 0) {
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto done;
- }
- goto done_unlock;
}
- /*
- * Error out the qp for TID RDMA WRITE
- */
- hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
- for (i = 0; i < rvt_max_atomic(rdi); i++) {
- e = &qp->s_ack_queue[i];
- if (e->opcode == TID_OP(WRITE_REQ)) {
- req = ack_to_tid_req(e);
- hfi1_kern_exp_rcv_clear_all(req);
- }
- }
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
- goto done;
-
-done_unlock:
+ /* Since no payload is delivered, just drop the packet */
spin_unlock(&qp->s_lock);
done:
return true;
@@ -2714,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
u32 fpsn;
lockdep_assert_held(&qp->r_lock);
+ spin_lock(&qp->s_lock);
/* If the psn is out of valid range, drop the packet */
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
cmp_psn(ibpsn, qp->s_psn) > 0)
- return ret;
+ goto s_unlock;
- spin_lock(&qp->s_lock);
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
@@ -2767,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
wqe = do_rc_completion(qp, wqe, ibp);
if (qp->s_acked == qp->s_tail)
- break;
+ goto s_unlock;
}
+ if (qp->s_acked == qp->s_tail)
+ goto s_unlock;
+
/* Handle the eflags for the request */
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
goto s_unlock;
@@ -2788,19 +2723,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* to prevent continuous Flow Sequence errors for any
* packets that could be still in the fabric.
*/
- flow = find_flow(req, psn, NULL);
- if (!flow) {
- /*
- * We can't find the IB PSN matching the
- * received KDETH PSN. The only thing we can
- * do at this point is report the error to
- * the QP.
- */
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return ret;
- }
+ flow = &req->flows[req->clear_tail];
if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
@@ -2961,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
if (lnh == HFI1_LRH_GRH)
goto r_unlock;
- if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
+ if (tid_rdma_tid_err(packet, rcv_type))
goto r_unlock;
}
@@ -2981,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
*/
spin_lock(&qp->s_lock);
qpriv = qp->priv;
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
+ qpriv->r_tid_tail == qpriv->r_tid_head)
+ goto unlock;
e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto unlock;
req = ack_to_tid_req(e);
+ if (req->comp_seg == req->cur_seg)
+ goto unlock;
flow = &req->flows[req->clear_tail];
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4548,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
struct rvt_swqe *wqe;
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
- u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
+ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
unsigned long flags;
u16 fidx;
@@ -4577,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
ack_kpsn--;
}
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_op_err;
+
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4589,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
/* Drop stale ACK/NAK */
- if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
+ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
goto ack_op_err;
while (cmp_psn(ack_kpsn,
@@ -4751,7 +4685,12 @@ done:
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
+ if (!req->flows)
+ break;
flow = &req->flows[req->acked_tail];
+ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+ if (cmp_psn(psn, flpsn) > 0)
+ break;
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
flow);
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c4b243f50c76..646f61545ed6 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -54,6 +54,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <rdma/opa_addr.h>
+#include <linux/nospec.h>
#include "hfi.h"
#include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
+ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 8bf847bcd8d3..54782197c717 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
+ bool "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
To compile HIP06 or HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
+ tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
module will be called hns-roce-hw-v1
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
+ tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..449a2d81319d 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 627aa46ef683..c00714c2f16a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db)
{
+ unsigned long page_addr = virt & PAGE_MASK;
struct hns_roce_user_db_page *page;
+ unsigned int offset;
int ret = 0;
mutex_lock(&context->page_mutex);
list_for_each_entry(page, &context->page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if (page->user_virt == page_addr)
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
}
refcount_set(&page->refcount, 1);
- page->user_virt = (virt & PAGE_MASK);
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->user_virt = page_addr;
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
list_add(&page->list, &context->page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
- (virt & ~PAGE_MASK);
- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+ offset = virt - page_addr;
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 81e6dedb1e02..141205e76314 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd)
+ if (!pd) {
+ ret = -ENOMEM;
goto alloc_mem_failed;
+ }
pd->device = ibdev;
ret = hns_roce_alloc_pd(pd, NULL);
@@ -4499,19 +4501,13 @@ static const struct acpi_device_id hns_roce_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-static int hns_roce_node_match(struct device *dev, const void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
static struct
platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
{
struct device *dev;
/* get the 'device' corresponding to the matching 'fwnode' */
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_roce_node_match);
+ dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
/* get the platform device */
return dev ? to_platform_device(dev) : NULL;
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- kfree(tun_qp->tx_ring);
- tun_qp->tx_ring = NULL;
i = MLX4_NUM_TUNNEL_BUFS;
err:
while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
+ kfree(tun_qp->tx_ring);
+ tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
event_sub->eventfd =
eventfd_ctx_fdget(redirect_fd);
- if (IS_ERR(event_sub)) {
+ if (IS_ERR(event_sub->eventfd)) {
err = PTR_ERR(event_sub->eventfd);
event_sub->eventfd = NULL;
goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
- mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
/* delete the subscriptions which are related to this FD */
list_for_each_entry_safe(event_sub, event_sub_tmp,
&ev_file->subscribed_events_list, file_list) {
- devx_cleanup_subscription(ev_file->dev, event_sub);
+ devx_cleanup_subscription(dev, event_sub);
if (event_sub->eventfd)
eventfd_ctx_put(event_sub->eventfd);
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
kfree_rcu(event_sub, rcu);
}
- mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
}
uverbs_close_fd(filp);
- put_device(&ev_file->dev->ib_dev.dev);
+ put_device(&dev->ib_dev.dev);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2a5780cb394..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- if (MLX5_CAP_GEN(mdev, pg))
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
}
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
return;
}
- if (mpi->mdev_events.notifier_call)
- mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
- mpi->mdev_events.notifier_call = NULL;
-
mpi->ibdev = NULL;
spin_unlock(&port->mp.mpi_lock);
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
mlx5_remove_netdev_notifier(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
@@ -6140,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
+ mlx5_ib_internal_fill_odp_caps(dev);
+
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -6564,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
- mlx5_ib_internal_fill_odp_caps(dev);
-
return mlx5_ib_odp_init_one(dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry;
if (umem->is_odp) {
- unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
+ struct ib_umem_odp *odp = to_ib_umem_odp(umem);
+ unsigned int page_shift = odp->page_shift;
- *ncont = ib_umem_page_count(umem);
+ *ncont = ib_umem_odp_num_pages(odp);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c482f19958b3..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
u64 length;
int access_flags;
u32 mkey;
+ u8 ignore_free_state:1;
};
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
@@ -1474,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+
+static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
+ bool do_modify_atomic)
+{
+ if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return false;
+
+ if (do_modify_atomic &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return false;
+
+ return true;
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 20ece6e0b2fc..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
- return;
- }
+ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr))
+ if (unreg_umr(dev, mr)) {
+ mr->allocated_from_cache = false;
+ destroy_mkey(dev, mr);
+ ent = &cache->ent[c];
+ if (ent->cur < ent->limit)
+ queue_work(cache->wq, &ent->work);
return;
+ }
ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
struct ib_umem *umem;
int page_shift;
int npages;
@@ -1300,29 +1293,28 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
+ use_umr = mlx5_ib_can_use_umr(dev, true);
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
- populate_mtts = false;
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
- populate_mtts = true;
+ use_umr = false;
}
if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1338,7 +1330,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
- if (!populate_mtts) {
+ if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1365,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.pd = dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
+ umrwr.ignore_free_state = 1;
return mlx5_ib_post_send_wait(dev, &umrwr);
}
@@ -1452,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+ if (!mlx5_ib_can_use_umr(dev, true) ||
+ (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
*/
@@ -1577,10 +1572,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- mlx5_free_priv_descs(mr);
-
- if (!allocated_from_cache)
+ if (!allocated_from_cache) {
destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+ }
}
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5b642d81e617..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
-
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -300,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps));
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!MLX5_CAP_GEN(dev->mdev, pg) ||
+ !mlx5_ib_can_use_umr(dev, true))
return;
caps->general_caps = IB_ODP_SUPPORT;
@@ -354,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
- MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
return;
@@ -578,7 +581,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u32 flags)
{
int npages = 0, current_seq, page_shift, ret, np;
- bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -593,7 +595,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
- implicit = true;
} else {
odp = odp_mr;
}
@@ -681,19 +682,15 @@ next_mr:
out:
if (ret == -EAGAIN) {
- if (implicit || !odp->dying) {
- unsigned long timeout =
- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(
- &odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq, odp->notifiers_count);
- }
- } else {
- /* The MR is being killed, kill the QP as well. */
- ret = -EFAULT;
+ unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+ if (!wait_for_completion_timeout(&odp->notifier_completion,
+ timeout)) {
+ mlx5_ib_warn(
+ dev,
+ "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+ current_seq, odp->notifiers_seq,
+ odp->notifiers_count);
}
}
@@ -1627,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret = 0;
- if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
- ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return ret;
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1638,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
- if (!MLX5_CAP_GEN(dev->mdev, pg))
- return ret;
-
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
return ret;
@@ -1648,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
@@ -1771,7 +1767,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
w->num_sge, 0);
- kfree(w);
+ kvfree(w);
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1809,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (valid_req)
queue_work(system_unbound_wq, &work->work);
else
- kfree(work);
+ kvfree(work);
srcu_read_unlock(&dev->mr_srcu, srcu_key);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2a97619ed603..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -4163,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
MLX5_IB_UMR_OCTOWORD;
}
-static __be64 frwr_mkey_mask(void)
+static __be64 frwr_mkey_mask(bool atomic)
{
u64 result;
@@ -4176,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_SMALL_FENCE |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
@@ -4205,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags)
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
@@ -4213,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->flags = flags;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask();
+ umr->mkey_mask = frwr_mkey_mask(atomic);
}
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4295,10 +4296,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
memset(umr, 0, sizeof(*umr));
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
@@ -4808,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
+ if (!mlx5_ib_can_use_umr(dev, atomic)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Invalid IB_SEND_INLINE send flag\n");
@@ -4823,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
if (umr_inline)
flags |= MLX5_UMR_INLINE;
- set_reg_umr_seg(*seg, mr, flags);
+ set_reg_umr_seg(*seg, mr, flags, atomic);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 533157a2a3be..f97b3d65b30c 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
}
static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+ return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
+ dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ?
+ "iWARP" : "RoCE");
}
static DEVICE_ATTR_RO(hca_type);
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C && 64BIT
+ depends on INET && INFINIBAND && LIBCRC32C
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
};
struct siw_pble {
- u64 addr; /* Address of assigned user buffer */
- u64 size; /* Size of this entry */
- u64 pbl_off; /* Total offset from start of PBL */
+ dma_addr_t addr; /* Address of assigned buffer */
+ unsigned int size; /* Size of this entry */
+ unsigned long pbl_off; /* Total offset from start of PBL */
};
struct siw_pbl {
@@ -214,7 +214,7 @@ struct siw_wqe {
struct siw_cq {
struct ib_cq base_cq;
spinlock_t lock;
- u64 *notify;
+ struct siw_cq_ctrl *notify;
struct siw_cqe *queue;
u32 cq_put;
u32 cq_get;
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index a7cde98e73e8..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
static void siw_cep_set_inuse(struct siw_cep *cep)
{
unsigned long flags;
- int rv;
retry:
spin_lock_irqsave(&cep->lock, flags);
if (cep->in_use) {
spin_unlock_irqrestore(&cep->lock, flags);
- rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
if (signal_pending(current))
flush_signals(current);
goto retry;
@@ -356,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
getname_local(cep->sock, &event.local_addr);
getname_peer(cep->sock, &event.remote_addr);
}
- siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n",
- cep->qp ? qp_id(cep->qp) : -1, id, reason, status);
+ siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
return id->event_handler(id, &event);
}
@@ -948,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_get(new_cep);
new_s->sk->sk_user_data = new_cep;
- siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
-
if (siw_tcp_nagle == false) {
int val = 1;
@@ -1012,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
cep = work->cep;
siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
- cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX,
+ work->type, cep->state);
siw_cep_set_inuse(cep);
@@ -1146,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
}
if (release_cep) {
siw_dbg_cep(cep,
- "release: timer=%s, QP[%u], id 0x%p\n",
+ "release: timer=%s, QP[%u]\n",
cep->mpa_timer ? "y" : "n",
- cep->qp ? qp_id(cep->qp) : -1, cep->cm_id);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX);
siw_cancel_mpatimer(cep);
@@ -1212,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
else
delay = MPAREP_TIMEOUT;
}
- siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n",
- cep->qp ? qp_id(cep->qp) : -1, type, work, delay);
+ siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
+ cep->qp ? qp_id(cep->qp) : -1, type, delay);
queue_delayed_work(siw_cm_wq, &work->work, delay);
@@ -1377,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
}
if (v4)
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
+ pd_len,
&((struct sockaddr_in *)(laddr))->sin_addr,
ntohs(((struct sockaddr_in *)(laddr))->sin_port),
&((struct sockaddr_in *)(raddr))->sin_addr,
ntohs(((struct sockaddr_in *)(raddr))->sin_port));
else
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
+ pd_len,
&((struct sockaddr_in6 *)(laddr))->sin6_addr,
ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
&((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1509,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (rv >= 0) {
rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
if (!rv) {
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id,
- qp_id(qp));
+ siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
siw_cep_set_free(cep);
return 0;
}
}
error:
- siw_dbg_qp(qp, "failed: %d\n", rv);
+ siw_dbg(id->device, "failed: %d\n", rv);
if (cep) {
siw_socket_disassoc(s);
@@ -1541,7 +1538,8 @@ error:
} else if (s) {
sock_release(s);
}
- siw_qp_put(qp);
+ if (qp)
+ siw_qp_put(qp);
return rv;
}
@@ -1581,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep);
@@ -1602,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
up_write(&qp->state_lock);
goto error;
}
- siw_dbg_cep(cep, "id 0x%p\n", id);
+ siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1612,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
params->ird > sdev->attrs.max_ird) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n",
- id, qp_id(qp), params->ord, sdev->attrs.max_ord,
+ "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
+ qp_id(qp), params->ord, sdev->attrs.max_ord,
params->ird, sdev->attrs.max_ird);
rv = -EINVAL;
up_write(&qp->state_lock);
@@ -1625,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (params->private_data_len > max_priv_data) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: private data length: %d (max %d)\n",
- id, qp_id(qp), params->private_data_len, max_priv_data);
+ "[QP %u]: private data length: %d (max %d)\n",
+ qp_id(qp), params->private_data_len, max_priv_data);
rv = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1680,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
qp_attrs.flags = SIW_MPA_CRC;
qp_attrs.state = SIW_QP_STATE_RTS;
- siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp));
+ siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
/* Associate QP with CEP */
siw_cep_get(cep);
@@ -1701,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (rv)
goto error;
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n",
- id, qp_id(qp), params->private_data_len);
+ siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
+ qp_id(qp), params->private_data_len);
rv = siw_send_mpareqrep(cep, params->private_data,
params->private_data_len);
@@ -1760,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep); /* put last reference */
return -ECONNRESET;
}
- siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state,
+ siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
pd_len);
if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1805,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
sizeof(s_val));
if (rv) {
- siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv);
+ siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
if (rv) {
- siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv);
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
}
cep = siw_cep_alloc(sdev);
@@ -1825,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
rv = siw_cm_alloc_work(cep, backlog);
if (rv) {
siw_dbg(id->device,
- "id 0x%p: alloc_work error %d, backlog %d\n", id,
+ "alloc_work error %d, backlog %d\n",
rv, backlog);
goto error;
}
rv = s->ops->listen(s, backlog);
if (rv) {
- siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv);
+ siw_dbg(id->device, "listen error %d\n", rv);
goto error;
}
cep->cm_id = id;
@@ -1915,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
list_del(p);
- siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id,
- cep->state);
+ siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
siw_cep_set_inuse(cep);
@@ -1953,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct net_device *dev = to_siw_dev(id->device)->netdev;
int rv = 0, listeners = 0;
- siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog);
+ siw_dbg(id->device, "backlog %d\n", backlog);
/*
* For each attached address of the interface, create a
@@ -1965,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in s_laddr, *s_raddr;
const struct in_ifaddr *ifa;
+ if (!in_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
s_raddr = (struct sockaddr_in *)&id->remote_addr;
siw_dbg(id->device,
- "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n",
- id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
+ "laddr %pI4:%d, raddr %pI4:%d\n",
+ &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
rtnl_lock();
@@ -1994,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
*s_raddr = &to_sockaddr_in6(id->remote_addr);
+ if (!in6_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
siw_dbg(id->device,
- "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n",
- id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
+ "laddr %pI6:%d, raddr %pI6:%d\n",
+ &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
- read_lock_bh(&in6_dev->lock);
+ rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- struct sockaddr_in6 bind_addr;
-
+ if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+ continue;
if (ipv6_addr_any(&s_laddr->sin6_addr) ||
ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
- bind_addr.sin6_family = AF_INET6;
- bind_addr.sin6_port = s_laddr->sin6_port;
- bind_addr.sin6_flowinfo = 0;
- bind_addr.sin6_addr = ifp->addr;
- bind_addr.sin6_scope_id = dev->ifindex;
+ struct sockaddr_in6 bind_addr = {
+ .sin6_family = AF_INET6,
+ .sin6_port = s_laddr->sin6_port,
+ .sin6_flowinfo = 0,
+ .sin6_addr = ifp->addr,
+ .sin6_scope_id = dev->ifindex };
rv = siw_listen_address(id, backlog,
(struct sockaddr *)&bind_addr,
@@ -2018,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
listeners++;
}
}
- read_unlock_bh(&in6_dev->lock);
-
+ rtnl_unlock();
in6_dev_put(in6_dev);
} else {
- return -EAFNOSUPPORT;
+ rv = -EAFNOSUPPORT;
}
+out:
if (listeners)
rv = 0;
else if (!rv)
rv = -EINVAL;
- siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK");
+ siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
return rv;
}
int siw_destroy_listen(struct iw_cm_id *id)
{
- siw_dbg(id->device, "id 0x%p\n", id);
-
if (!id->provider_data) {
- siw_dbg(id->device, "id 0x%p: no cep(s)\n", id);
+ siw_dbg(id->device, "no cep(s)\n");
return 0;
}
siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->wc_flags = IB_WC_WITH_INVALIDATE;
}
wc->qp = cqe->base_qp;
- siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n",
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%pK\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
- cqe->flags, (void *)cqe->id);
+ cqe->flags, (void *)(uintptr_t)cqe->id);
}
WRITE_ONCE(cqe->flags, 0);
cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index f55c4e80aea4..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
out_err:
siw_cpu_info.num_nodes = 0;
- while (i) {
+ while (--i >= 0)
kfree(siw_cpu_info.tx_valid_cpus[i]);
- siw_cpu_info.tx_valid_cpus[i--] = NULL;
- }
kfree(siw_cpu_info.tx_valid_cpus);
siw_cpu_info.tx_valid_cpus = NULL;
@@ -612,6 +610,7 @@ static __init int siw_init_module(void)
if (!siw_create_tx_threads()) {
pr_info("siw: Could not start any TX thread\n");
+ rv = -ENOMEM;
goto out_error;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n",
- (unsigned long long)addr,
- (unsigned long long)(addr + len));
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n",
- (unsigned long long)mem->va,
- (unsigned long long)(mem->va + mem->len),
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ (void *)(uintptr_t)addr,
+ (void *)(uintptr_t)(addr + len));
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ (void *)(uintptr_t)mem->va,
+ (void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
* Optionally, provides remaining len within current element, and
* current PBL index for later resume at same element.
*/
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
{
int i = idx ? *idx : 0;
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
void siw_umem_release(struct siw_umem *umem, bool dirty);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 11383d9f95ef..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
{
struct siw_rx_stream *c_rx = &qp->rx_stream;
struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
- int size = crypto_shash_descsize(siw_crypto_shash) +
- sizeof(struct shash_desc);
+ int size;
if (siw_crypto_shash == NULL)
return -ENOENT;
+ size = crypto_shash_descsize(siw_crypto_shash) +
+ sizeof(struct shash_desc);
+
c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
@@ -947,7 +949,7 @@ skip_irq:
rv = -EINVAL;
goto out;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
wqe->sqe.sge[0].lkey = 0;
wqe->sqe.num_sge = 1;
}
@@ -1011,18 +1013,24 @@ out:
*/
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
{
- u64 cq_notify;
+ u32 cq_notify;
if (!cq->base_cq.comp_handler)
return false;
- cq_notify = READ_ONCE(*cq->notify);
+ /* Read application shared notification state */
+ cq_notify = READ_ONCE(cq->notify->flags);
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
(flags & SIW_WQE_SOLICITED))) {
- /* dis-arm CQ */
- smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
+ /*
+ * CQ notification is one-shot: Since the
+ * current CQE causes user notification,
+ * the CQ gets dis-aremd and must be re-aremd
+ * by the user for a new notification.
+ */
+ WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
return true;
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
__func__, qp_id(rx_qp(srx)),
- (void *)dest_addr, (void *)umem->fp_addr);
+ (void *)(uintptr_t)dest_addr,
+ (void *)(uintptr_t)umem->fp_addr);
/* siw internal error */
srx->skb_copied += copied;
srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
while (len) {
int bytes;
- u64 buf_addr =
+ dma_addr_t buf_addr =
siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
if (!buf_addr)
break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
mem_p = *mem;
if (mem_p->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(sge->laddr + frx->sge_off),
- sge_bytes);
+ (void *)(uintptr_t)(sge->laddr + frx->sge_off),
+ sge_bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem,
sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
if (mem->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(srx->ddp_to + srx->fpdu_part_rcvd),
- bytes);
+ (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
+ bytes);
else if (!mem->is_pbl)
rv = siw_rx_umem(srx, mem->umem,
srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
bytes = min(srx->fpdu_part_rem, srx->skb_new);
if (mem_p->mem_obj == NULL)
- rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed),
- bytes);
+ rv = siw_rx_kva(srx,
+ (void *)(uintptr_t)(sge->laddr + wqe->processed),
+ bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
{
struct siw_pbl *pbl = mem->pbl;
u64 offset = addr - mem->va;
- u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
/*
* Copy short payload at provided destination payload address
*/
-static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
+static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
{
struct siw_wqe *wqe = &c_tx->wqe_active;
struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
return 0;
if (tx_flags(wqe) & SIW_WQE_INLINE) {
- memcpy((void *)paddr, &wqe->sqe.sge[1], bytes);
+ memcpy(paddr, &wqe->sqe.sge[1], bytes);
} else {
struct siw_mem *mem = wqe->mem[0];
if (!mem->mem_obj) {
/* Kernel client using kva */
- memcpy((void *)paddr, (void *)sge->laddr, bytes);
+ memcpy(paddr,
+ (const void *)(uintptr_t)sge->laddr, bytes);
} else if (c_tx->in_syscall) {
- if (copy_from_user((void *)paddr,
- (const void __user *)sge->laddr,
+ if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
bytes))
return -EFAULT;
} else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
buffer = kmap_atomic(p);
if (likely(PAGE_SIZE - off >= bytes)) {
- memcpy((void *)paddr, buffer + off, bytes);
+ memcpy(paddr, buffer + off, bytes);
kunmap_atomic(buffer);
} else {
unsigned long part = bytes - (PAGE_SIZE - off);
- memcpy((void *)paddr, buffer + off, part);
+ memcpy(paddr, buffer + off, part);
kunmap_atomic(buffer);
if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
return -EFAULT;
buffer = kmap_atomic(p);
- memcpy((void *)(paddr + part), buffer,
+ memcpy(paddr + part, buffer,
bytes - part);
kunmap_atomic(buffer);
}
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_send);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
-static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
+static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
{
- if (hdr_len) {
- ++pages;
- --num_maps;
- }
- while (num_maps-- > 0) {
- kunmap(*pages);
- pages++;
+ while (kmap_mask) {
+ if (kmap_mask & BIT(0))
+ kunmap(*pp);
+ pp++;
+ kmap_mask >>= 1;
}
}
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
pbl_idx = c_tx->pbl_idx;
+ unsigned long kmap_mask = 0L;
if (c_tx->state == SIW_SEND_HDR) {
if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
mem = wqe->mem[sge_idx];
- if (!mem->mem_obj)
- is_kva = 1;
+ is_kva = mem->mem_obj == NULL ? 1 : 0;
} else {
is_kva = 1;
}
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
* tx from kernel virtual address: either inline data
* or memory region with assigned kernel buffer
*/
- iov[seg].iov_base = (void *)(sge->laddr + sge_off);
+ iov[seg].iov_base =
+ (void *)(uintptr_t)(sge->laddr + sge_off);
iov[seg].iov_len = sge_len;
if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
p = siw_get_upage(mem->umem,
sge->laddr + sge_off);
if (unlikely(!p)) {
- if (hdr_len)
- seg--;
- if (!c_tx->use_sendpage && seg) {
- siw_unmap_pages(page_array,
- hdr_len, seg);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT;
goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!c_tx->use_sendpage) {
iov[seg].iov_base = kmap(p) + fp_off;
iov[seg].iov_len = plen;
+
+ /* Remember for later kunmap() */
+ kmap_mask |= BIT(seg);
+
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
page_address(p) + fp_off,
plen);
} else {
- u64 pa = ((sge->laddr + sge_off) & PAGE_MASK);
+ u64 va = sge->laddr + sge_off;
- page_array[seg] = virt_to_page(pa);
+ page_array[seg] = virt_to_page(va & PAGE_MASK);
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(sge->laddr + sge_off),
+ (void *)(uintptr_t)va,
plen);
}
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
- if (!is_kva && !c_tx->use_sendpage) {
- siw_unmap_pages(page_array, hdr_len,
- seg - 1);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE;
goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
} else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len);
- if (!is_kva)
- siw_unmap_pages(page_array, hdr_len, seg);
+ siw_unmap_pages(page_array, kmap_mask);
}
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
rv = -EINVAL;
goto tx_error;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr =
+ (u64)(uintptr_t)&wqe->sqe.sge[1];
}
}
wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{
- struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr;
+ struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
mem->stag = sqe->rkey;
mem->perms = sqe->access;
- siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n",
- mem->va, base_mr->iova);
+ siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
mem->va = base_mr->iova;
mem->stag_valid = 1;
out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
*/
qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0;
- siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n",
- qp->qp_num, qp->srq);
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
} else if (num_rqe) {
if (qp->kernel_verbs)
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
base_ucontext);
struct siw_qp_attrs qp_attrs;
- siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep);
+ siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
/*
* Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
void *kbuf = &sqe->sge[1];
int num_sge = core_wr->num_sge, bytes = 0;
- sqe->sge[0].laddr = (u64)kbuf;
+ sqe->sge[0].laddr = (uintptr_t)kbuf;
sqe->sge[0].lkey = 0;
while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
break;
case IB_WR_REG_MR:
- sqe->base_mr = (uint64_t)reg_wr(wr)->mr;
+ sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
sqe->rkey = reg_wr(wr)->key;
sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
- sqe->opcode, sqe->flags, (void *)sqe->id);
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ sqe->opcode, sqe->flags,
+ (void *)(uintptr_t)sqe->id);
if (unlikely(rv < 0))
break;
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->lock);
- cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
+ cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
if (udata) {
struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- /* CQ event for next solicited completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
+ /*
+ * Enable CQ event for next solicited completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
else
- /* CQ event for any signalled completion */
- smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
+ /*
+ * Enable CQ event for any signalled completion.
+ * and make it visible to all associated producers.
+ */
+ smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
return cq->cq_put - cq->cq_get;
@@ -1199,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
int rv;
- siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n",
- (unsigned long long)start, (unsigned long long)rnic_va,
+ siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1357,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
struct siw_mem *mem = mr->mem;
struct siw_pbl *pbl = mem->pbl;
struct siw_pble *pble;
- u64 pbl_size;
+ unsigned long pbl_size;
int i, rv;
if (!pbl) {
@@ -1396,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
pbl_size += sg_dma_len(slp);
}
siw_dbg_mem(mem,
- "sge[%d], size %llu, addr 0x%016llx, total %llu\n",
- i, pble->size, pble->addr, pbl_size);
+ "sge[%d], size %u, addr 0x%p, total %lu\n",
+ i, pble->size, (void *)(uintptr_t)pble->addr,
+ pbl_size);
}
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
if (rv > 0) {
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%016llx, %u SLE to %u entries\n",
- mem->len, mem->va, num_sle, pbl->num_buf);
+ "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ mem->len, (void *)(uintptr_t)mem->va, num_sle,
+ pbl->num_buf);
}
return rv;
}
@@ -1523,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
}
spin_lock_init(&srq->lock);
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
return 0;
@@ -1644,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
if (unlikely(!srq->kernel_verbs)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: no kernel post_recv for mapped srq\n",
- srq);
+ "[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL;
goto out;
}
@@ -1667,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
}
if (unlikely(wr->num_sge > srq->max_sge)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: too many sge's: %d\n", srq,
- wr->num_sge);
+ "[SRQ]: too many sge's: %d\n", wr->num_sge);
rv = -EINVAL;
break;
}
@@ -1687,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&srq->lock, flags);
out:
if (unlikely(rv < 0)) {
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
*bad_wr = wr;
}
return rv;
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 29abfeeef9a5..6c554c11a7ac 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -201,7 +201,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
return -ENODEV;
epirq = &interface->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(epirq))
+ return -ENODEV;
+
epout = &interface->endpoint[1].desc;
+ if (!usb_endpoint_is_int_out(epout))
+ return -ENODEV;
iforce_usb = kzalloc(sizeof(*iforce_usb), GFP_KERNEL);
if (!iforce_usb)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 8e9c3ea9d5e7..90e8a7f2f07c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -76,6 +76,8 @@ config KEYBOARD_APPLESPI
depends on ACPI && EFI
depends on SPI
depends on X86 || COMPILE_TEST
+ depends on LEDS_CLASS
+ select CRC16
help
Say Y here if you are running Linux on any Apple MacBook8,1 or later,
or any MacBookPro13,* or MacBookPro14,*.
diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
index 548737e7aeda..584289b67fb3 100644
--- a/drivers/input/keyboard/applespi.c
+++ b/drivers/input/keyboard/applespi.c
@@ -134,10 +134,10 @@ struct keyboard_protocol {
* struct tp_finger - single trackpad finger structure, le16-aligned
*
* @origin: zero when switching track finger
- * @abs_x: absolute x coodinate
- * @abs_y: absolute y coodinate
- * @rel_x: relative x coodinate
- * @rel_y: relative y coodinate
+ * @abs_x: absolute x coordinate
+ * @abs_y: absolute y coordinate
+ * @rel_x: relative x coordinate
+ * @rel_y: relative y coordinate
* @tool_major: tool area, major axis
* @tool_minor: tool area, minor axis
* @orientation: 16384 when point, else 15 bit angle
@@ -944,10 +944,14 @@ static inline int le16_to_int(__le16 x)
static void applespi_debug_update_dimensions(struct applespi_data *applespi,
const struct tp_finger *f)
{
- applespi->tp_dim_min_x = min_t(int, applespi->tp_dim_min_x, f->abs_x);
- applespi->tp_dim_max_x = max_t(int, applespi->tp_dim_max_x, f->abs_x);
- applespi->tp_dim_min_y = min_t(int, applespi->tp_dim_min_y, f->abs_y);
- applespi->tp_dim_max_y = max_t(int, applespi->tp_dim_max_y, f->abs_y);
+ applespi->tp_dim_min_x = min(applespi->tp_dim_min_x,
+ le16_to_int(f->abs_x));
+ applespi->tp_dim_max_x = max(applespi->tp_dim_max_x,
+ le16_to_int(f->abs_x));
+ applespi->tp_dim_min_y = min(applespi->tp_dim_min_y,
+ le16_to_int(f->abs_y));
+ applespi->tp_dim_max_y = max(applespi->tp_dim_max_y,
+ le16_to_int(f->abs_y));
}
static int applespi_tp_dim_open(struct inode *inode, struct file *file)
@@ -1490,8 +1494,7 @@ static void applespi_got_data(struct applespi_data *applespi)
size_t tp_len;
tp = &message->touchpad;
- tp_len = sizeof(*tp) +
- tp->number_of_fingers * sizeof(tp->fingers[0]);
+ tp_len = struct_size(tp, fingers, tp->number_of_fingers);
if (le16_to_cpu(message->length) + 2 != tp_len) {
dev_warn_ratelimited(&applespi->spi->dev,
@@ -1611,8 +1614,8 @@ static void applespi_save_bl_level(struct applespi_data *applespi,
efi_attr = EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
- sts = efivar_entry_set_safe(EFI_BL_LEVEL_NAME, efi_guid, efi_attr, true,
- efi_data_len, &efi_data);
+ sts = efivar_entry_set_safe((efi_char16_t *)EFI_BL_LEVEL_NAME, efi_guid,
+ efi_attr, true, efi_data_len, &efi_data);
if (sts)
dev_warn(&applespi->spi->dev,
"Error saving backlight level to EFI vars: %d\n", sts);
@@ -1953,7 +1956,7 @@ static const struct acpi_device_id applespi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, applespi_acpi_match);
-const struct dev_pm_ops applespi_pm_ops = {
+static const struct dev_pm_ops applespi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(applespi_suspend, applespi_resume)
.poweroff_late = applespi_poweroff_late,
};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d8434b7b623..04fe43440a3c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1827,6 +1827,31 @@ static int elantech_create_smbus(struct psmouse *psmouse,
leave_breadcrumbs);
}
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+ struct elantech_device_info *info)
+{
+ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ return true;
+
+ switch (info->bus) {
+ case ETP_BUS_PS2_ONLY:
+ /* expected case */
+ break;
+ case ETP_BUS_SMB_HST_NTFY_ONLY:
+ case ETP_BUS_PS2_SMB_HST_NTFY:
+ /* SMbus implementation is stable since 2018 */
+ if (dmi_get_bios_year() >= 2018)
+ return true;
+ /* fall through */
+ default:
+ psmouse_dbg(psmouse,
+ "Ignoring SMBus bus provider %d\n", info->bus);
+ break;
+ }
+
+ return false;
+}
+
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
@@ -1846,7 +1871,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+ if (!elantech_use_host_notify(psmouse, info) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
@@ -1866,34 +1891,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
return 0;
}
-static bool elantech_use_host_notify(struct psmouse *psmouse,
- struct elantech_device_info *info)
-{
- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
- return true;
-
- switch (info->bus) {
- case ETP_BUS_PS2_ONLY:
- /* expected case */
- break;
- case ETP_BUS_SMB_ALERT_ONLY:
- /* fall-through */
- case ETP_BUS_PS2_SMB_ALERT:
- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
- break;
- case ETP_BUS_SMB_HST_NTFY_ONLY:
- /* fall-through */
- case ETP_BUS_PS2_SMB_HST_NTFY:
- return true;
- default:
- psmouse_dbg(psmouse,
- "Ignoring SMBus bus provider %d.\n",
- info->bus);
- }
-
- return false;
-}
-
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index b1956ed4c0dd..46bbe99d6511 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
NULL
};
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index 871e5b5ab129..148245c69be7 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -16,12 +16,12 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/hypervisor.h>
+#include <asm/vmware.h>
#include "psmouse.h"
#include "vmmouse.h"
#define VMMOUSE_PROTO_MAGIC 0x564D5868U
-#define VMMOUSE_PROTO_PORT 0x5658
/*
* Main commands supported by the vmmouse hypervisor port.
@@ -84,7 +84,7 @@ struct vmmouse_data {
#define VMMOUSE_CMD(cmd, in1, out1, out2, out3, out4) \
({ \
unsigned long __dummy1, __dummy2; \
- __asm__ __volatile__ ("inl %%dx" : \
+ __asm__ __volatile__ (VMWARE_HYPERCALL : \
"=a"(out1), \
"=b"(out2), \
"=c"(out3), \
@@ -94,7 +94,7 @@ struct vmmouse_data {
"a"(VMMOUSE_PROTO_MAGIC), \
"b"(in1), \
"c"(VMMOUSE_PROTO_CMD_##cmd), \
- "d"(VMMOUSE_PROTO_PORT) : \
+ "d"(0) : \
"memory"); \
})
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
static void hv_kbd_on_channel_callback(void *context)
{
+ struct vmpacket_descriptor *desc;
struct hv_device *hv_dev = context;
- void *buffer;
- int bufferlen = 0x100; /* Start with sensible size */
u32 bytes_recvd;
u64 req_id;
- int error;
- buffer = kmalloc(bufferlen, GFP_ATOMIC);
- if (!buffer)
- return;
-
- while (1) {
- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
- &bytes_recvd, &req_id);
- switch (error) {
- case 0:
- if (bytes_recvd == 0) {
- kfree(buffer);
- return;
- }
-
- hv_kbd_handle_received_packet(hv_dev, buffer,
- bytes_recvd, req_id);
- break;
+ foreach_vmbus_pkt(desc, hv_dev->channel) {
+ bytes_recvd = desc->len8 * 8;
+ req_id = desc->trans_id;
- case -ENOBUFS:
- kfree(buffer);
- /* Handle large packet */
- bufferlen = bytes_recvd;
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (!buffer)
- return;
- break;
- }
+ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
+ req_id);
}
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 04b85571f41e..aa577898e952 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(endpoint))
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
@@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
usb_fill_int_urb(kbtab->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
kbtab->data, 8,
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 00cb1ba2d364..3fd3e862269b 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -186,7 +186,7 @@ static const struct v4l2_pix_format sur40_pix_format[] = {
.width = SENSOR_RES_X / 2,
.height = SENSOR_RES_Y / 2,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
+ .colorspace = V4L2_COLORSPACE_RAW,
.bytesperline = SENSOR_RES_X / 2,
.sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
},
@@ -195,7 +195,7 @@ static const struct v4l2_pix_format sur40_pix_format[] = {
.width = SENSOR_RES_X / 2,
.height = SENSOR_RES_Y / 2,
.field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
+ .colorspace = V4L2_COLORSPACE_RAW,
.bytesperline = SENSOR_RES_X / 2,
.sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index a2cec6cacf57..16d70201de4a 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!usbtouch || !input_dev)
goto out_free;
+ mutex_init(&usbtouch->pm_mutex);
+
type = &usbtouch_dev_info[id->driver_info];
usbtouch->type = type;
if (!type->process_pkt)
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 871eb4bc4efc..7b971228df38 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -29,6 +29,7 @@ static struct dentry *icc_debugfs_dir;
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
+ * @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
*/
@@ -36,6 +37,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
+ u32 tag;
u32 avg_bw;
u32 peak_bw;
};
@@ -203,8 +205,11 @@ static int aggregate_requests(struct icc_node *node)
node->avg_bw = 0;
node->peak_bw = 0;
+ if (p->pre_aggregate)
+ p->pre_aggregate(node);
+
hlist_for_each_entry(r, &node->req_list, req_node)
- p->aggregate(node, r->avg_bw, r->peak_bw,
+ p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
&node->avg_bw, &node->peak_bw);
return 0;
@@ -386,6 +391,26 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(of_icc_get);
/**
+ * icc_set_tag() - set an optional tag on a path
+ * @path: the path we want to tag
+ * @tag: the tag value
+ *
+ * This function allows consumers to append a tag to the requests associated
+ * with a path, so that a different aggregation could be done based on this tag.
+ */
+void icc_set_tag(struct icc_path *path, u32 tag)
+{
+ int i;
+
+ if (!path)
+ return;
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].tag = tag;
+}
+EXPORT_SYMBOL_GPL(icc_set_tag);
+
+/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
* @path: reference to the path returned by icc_get()
* @avg_bw: average bandwidth in kilobytes per second
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index d5e70ebc2410..6ab4012a059a 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_QCS404
+ tristate "Qualcomm QCS404 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on qcs404-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -12,3 +21,6 @@ config INTERCONNECT_QCOM_SDM845
help
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
+
+config INTERCONNECT_QCOM_SMD_RPM
+ tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 1c1cea690f92..67dafb783dec 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,5 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
+icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
new file mode 100644
index 000000000000..910081d6ddc0
--- /dev/null
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd
+ */
+
+#include <dt-bindings/interconnect/qcom,qcs404.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+enum {
+ QCS404_MASTER_AMPSS_M0 = 1,
+ QCS404_MASTER_GRAPHICS_3D,
+ QCS404_MASTER_MDP_PORT0,
+ QCS404_SNOC_BIMC_1_MAS,
+ QCS404_MASTER_TCU_0,
+ QCS404_MASTER_SPDM,
+ QCS404_MASTER_BLSP_1,
+ QCS404_MASTER_BLSP_2,
+ QCS404_MASTER_XM_USB_HS1,
+ QCS404_MASTER_CRYPTO_CORE0,
+ QCS404_MASTER_SDCC_1,
+ QCS404_MASTER_SDCC_2,
+ QCS404_SNOC_PNOC_MAS,
+ QCS404_MASTER_QPIC,
+ QCS404_MASTER_QDSS_BAM,
+ QCS404_BIMC_SNOC_MAS,
+ QCS404_PNOC_SNOC_MAS,
+ QCS404_MASTER_QDSS_ETR,
+ QCS404_MASTER_EMAC,
+ QCS404_MASTER_PCIE,
+ QCS404_MASTER_USB3,
+ QCS404_PNOC_INT_0,
+ QCS404_PNOC_INT_2,
+ QCS404_PNOC_INT_3,
+ QCS404_PNOC_SLV_0,
+ QCS404_PNOC_SLV_1,
+ QCS404_PNOC_SLV_2,
+ QCS404_PNOC_SLV_3,
+ QCS404_PNOC_SLV_4,
+ QCS404_PNOC_SLV_6,
+ QCS404_PNOC_SLV_7,
+ QCS404_PNOC_SLV_8,
+ QCS404_PNOC_SLV_9,
+ QCS404_PNOC_SLV_10,
+ QCS404_PNOC_SLV_11,
+ QCS404_SNOC_QDSS_INT,
+ QCS404_SNOC_INT_0,
+ QCS404_SNOC_INT_1,
+ QCS404_SNOC_INT_2,
+ QCS404_SLAVE_EBI_CH0,
+ QCS404_BIMC_SNOC_SLV,
+ QCS404_SLAVE_SPDM_WRAPPER,
+ QCS404_SLAVE_PDM,
+ QCS404_SLAVE_PRNG,
+ QCS404_SLAVE_TCSR,
+ QCS404_SLAVE_SNOC_CFG,
+ QCS404_SLAVE_MESSAGE_RAM,
+ QCS404_SLAVE_DISPLAY_CFG,
+ QCS404_SLAVE_GRAPHICS_3D_CFG,
+ QCS404_SLAVE_BLSP_1,
+ QCS404_SLAVE_TLMM_NORTH,
+ QCS404_SLAVE_PCIE_1,
+ QCS404_SLAVE_EMAC_CFG,
+ QCS404_SLAVE_BLSP_2,
+ QCS404_SLAVE_TLMM_EAST,
+ QCS404_SLAVE_TCU,
+ QCS404_SLAVE_PMIC_ARB,
+ QCS404_SLAVE_SDCC_1,
+ QCS404_SLAVE_SDCC_2,
+ QCS404_SLAVE_TLMM_SOUTH,
+ QCS404_SLAVE_USB_HS,
+ QCS404_SLAVE_USB3,
+ QCS404_SLAVE_CRYPTO_0_CFG,
+ QCS404_PNOC_SNOC_SLV,
+ QCS404_SLAVE_APPSS,
+ QCS404_SLAVE_WCSS,
+ QCS404_SNOC_BIMC_1_SLV,
+ QCS404_SLAVE_OCIMEM,
+ QCS404_SNOC_PNOC_SLV,
+ QCS404_SLAVE_QDSS_STM,
+ QCS404_SLAVE_CATS_128,
+ QCS404_SLAVE_OCMEM_64,
+ QCS404_SLAVE_LPASS,
+};
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+static const struct clk_bulk_data bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct qcom_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define QCS404_MAX_LINKS 12
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM id for devices that are bus masters
+ * @slv_rpm_id: RPM id for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct qcom_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[QCS404_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct qcom_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
+DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_blsp_1, QCS404_MASTER_BLSP_1, 4, 41, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_blsp_2, QCS404_MASTER_BLSP_2, 4, 39, -1, QCS404_PNOC_INT_3);
+DEFINE_QNODE(mas_xi_usb_hs1, QCS404_MASTER_XM_USB_HS1, 8, 138, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_crypto, QCS404_MASTER_CRYPTO_CORE0, 8, 23, -1, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
+DEFINE_QNODE(mas_sdcc_1, QCS404_MASTER_SDCC_1, 8, 33, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_sdcc_2, QCS404_MASTER_SDCC_2, 8, 35, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_snoc_pcnoc, QCS404_SNOC_PNOC_MAS, 8, 77, -1, QCS404_PNOC_INT_2);
+DEFINE_QNODE(mas_qpic, QCS404_MASTER_QPIC, 4, -1, -1, QCS404_PNOC_INT_0);
+DEFINE_QNODE(mas_qdss_bam, QCS404_MASTER_QDSS_BAM, 4, -1, -1, QCS404_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_bimc_snoc, QCS404_BIMC_SNOC_MAS, 8, 21, -1, QCS404_SLAVE_OCMEM_64, QCS404_SLAVE_CATS_128, QCS404_SNOC_INT_0, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_pcnoc_snoc, QCS404_PNOC_SNOC_MAS, 8, 29, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_2, QCS404_SNOC_INT_0);
+DEFINE_QNODE(mas_qdss_etr, QCS404_MASTER_QDSS_ETR, 8, -1, -1, QCS404_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_emac, QCS404_MASTER_EMAC, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_pcie, QCS404_MASTER_PCIE, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(mas_usb3, QCS404_MASTER_USB3, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(pcnoc_int_0, QCS404_PNOC_INT_0, 8, 85, 114, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
+DEFINE_QNODE(pcnoc_int_2, QCS404_PNOC_INT_2, 8, 124, 184, QCS404_PNOC_SLV_10, QCS404_SLAVE_TCU, QCS404_PNOC_SLV_11, QCS404_PNOC_SLV_2, QCS404_PNOC_SLV_3, QCS404_PNOC_SLV_0, QCS404_PNOC_SLV_1, QCS404_PNOC_SLV_6, QCS404_PNOC_SLV_7, QCS404_PNOC_SLV_4, QCS404_PNOC_SLV_8, QCS404_PNOC_SLV_9);
+DEFINE_QNODE(pcnoc_int_3, QCS404_PNOC_INT_3, 8, 125, 185, QCS404_PNOC_SNOC_SLV);
+DEFINE_QNODE(pcnoc_s_0, QCS404_PNOC_SLV_0, 4, 89, 118, QCS404_SLAVE_PRNG, QCS404_SLAVE_SPDM_WRAPPER, QCS404_SLAVE_PDM);
+DEFINE_QNODE(pcnoc_s_1, QCS404_PNOC_SLV_1, 4, 90, 119, QCS404_SLAVE_TCSR);
+DEFINE_QNODE(pcnoc_s_2, QCS404_PNOC_SLV_2, 4, -1, -1, QCS404_SLAVE_GRAPHICS_3D_CFG);
+DEFINE_QNODE(pcnoc_s_3, QCS404_PNOC_SLV_3, 4, 92, 121, QCS404_SLAVE_MESSAGE_RAM);
+DEFINE_QNODE(pcnoc_s_4, QCS404_PNOC_SLV_4, 4, 93, 122, QCS404_SLAVE_SNOC_CFG);
+DEFINE_QNODE(pcnoc_s_6, QCS404_PNOC_SLV_6, 4, 94, 123, QCS404_SLAVE_BLSP_1, QCS404_SLAVE_TLMM_NORTH, QCS404_SLAVE_EMAC_CFG);
+DEFINE_QNODE(pcnoc_s_7, QCS404_PNOC_SLV_7, 4, 95, 124, QCS404_SLAVE_TLMM_SOUTH, QCS404_SLAVE_DISPLAY_CFG, QCS404_SLAVE_SDCC_1, QCS404_SLAVE_PCIE_1, QCS404_SLAVE_SDCC_2);
+DEFINE_QNODE(pcnoc_s_8, QCS404_PNOC_SLV_8, 4, 96, 125, QCS404_SLAVE_CRYPTO_0_CFG);
+DEFINE_QNODE(pcnoc_s_9, QCS404_PNOC_SLV_9, 4, 97, 126, QCS404_SLAVE_BLSP_2, QCS404_SLAVE_TLMM_EAST, QCS404_SLAVE_PMIC_ARB);
+DEFINE_QNODE(pcnoc_s_10, QCS404_PNOC_SLV_10, 4, 157, -1, QCS404_SLAVE_USB_HS);
+DEFINE_QNODE(pcnoc_s_11, QCS404_PNOC_SLV_11, 4, 158, 246, QCS404_SLAVE_USB3);
+DEFINE_QNODE(qdss_int, QCS404_SNOC_QDSS_INT, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
+DEFINE_QNODE(snoc_int_0, QCS404_SNOC_INT_0, 8, 99, 130, QCS404_SLAVE_LPASS, QCS404_SLAVE_APPSS, QCS404_SLAVE_WCSS);
+DEFINE_QNODE(snoc_int_1, QCS404_SNOC_INT_1, 8, 100, 131, QCS404_SNOC_PNOC_SLV, QCS404_SNOC_INT_2);
+DEFINE_QNODE(snoc_int_2, QCS404_SNOC_INT_2, 8, 134, 197, QCS404_SLAVE_QDSS_STM, QCS404_SLAVE_OCIMEM);
+DEFINE_QNODE(slv_ebi, QCS404_SLAVE_EBI_CH0, 8, -1, 0, 0);
+DEFINE_QNODE(slv_bimc_snoc, QCS404_BIMC_SNOC_SLV, 8, -1, 2, QCS404_BIMC_SNOC_MAS);
+DEFINE_QNODE(slv_spdm, QCS404_SLAVE_SPDM_WRAPPER, 4, -1, -1, 0);
+DEFINE_QNODE(slv_pdm, QCS404_SLAVE_PDM, 4, -1, 41, 0);
+DEFINE_QNODE(slv_prng, QCS404_SLAVE_PRNG, 4, -1, 44, 0);
+DEFINE_QNODE(slv_tcsr, QCS404_SLAVE_TCSR, 4, -1, 50, 0);
+DEFINE_QNODE(slv_snoc_cfg, QCS404_SLAVE_SNOC_CFG, 4, -1, 70, 0);
+DEFINE_QNODE(slv_message_ram, QCS404_SLAVE_MESSAGE_RAM, 4, -1, 55, 0);
+DEFINE_QNODE(slv_disp_ss_cfg, QCS404_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_gpu_cfg, QCS404_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_1, QCS404_SLAVE_BLSP_1, 4, -1, 39, 0);
+DEFINE_QNODE(slv_tlmm_north, QCS404_SLAVE_TLMM_NORTH, 4, -1, 214, 0);
+DEFINE_QNODE(slv_pcie, QCS404_SLAVE_PCIE_1, 4, -1, -1, 0);
+DEFINE_QNODE(slv_ethernet, QCS404_SLAVE_EMAC_CFG, 4, -1, -1, 0);
+DEFINE_QNODE(slv_blsp_2, QCS404_SLAVE_BLSP_2, 4, -1, 37, 0);
+DEFINE_QNODE(slv_tlmm_east, QCS404_SLAVE_TLMM_EAST, 4, -1, 213, 0);
+DEFINE_QNODE(slv_tcu, QCS404_SLAVE_TCU, 8, -1, -1, 0);
+DEFINE_QNODE(slv_pmic_arb, QCS404_SLAVE_PMIC_ARB, 4, -1, 59, 0);
+DEFINE_QNODE(slv_sdcc_1, QCS404_SLAVE_SDCC_1, 4, -1, 31, 0);
+DEFINE_QNODE(slv_sdcc_2, QCS404_SLAVE_SDCC_2, 4, -1, 33, 0);
+DEFINE_QNODE(slv_tlmm_south, QCS404_SLAVE_TLMM_SOUTH, 4, -1, -1, 0);
+DEFINE_QNODE(slv_usb_hs, QCS404_SLAVE_USB_HS, 4, -1, 40, 0);
+DEFINE_QNODE(slv_usb3, QCS404_SLAVE_USB3, 4, -1, 22, 0);
+DEFINE_QNODE(slv_crypto_0_cfg, QCS404_SLAVE_CRYPTO_0_CFG, 4, -1, 52, 0);
+DEFINE_QNODE(slv_pcnoc_snoc, QCS404_PNOC_SNOC_SLV, 8, -1, 45, QCS404_PNOC_SNOC_MAS);
+DEFINE_QNODE(slv_kpss_ahb, QCS404_SLAVE_APPSS, 4, -1, -1, 0);
+DEFINE_QNODE(slv_wcss, QCS404_SLAVE_WCSS, 4, -1, 23, 0);
+DEFINE_QNODE(slv_snoc_bimc_1, QCS404_SNOC_BIMC_1_SLV, 8, -1, 104, QCS404_SNOC_BIMC_1_MAS);
+DEFINE_QNODE(slv_imem, QCS404_SLAVE_OCIMEM, 8, -1, 26, 0);
+DEFINE_QNODE(slv_snoc_pcnoc, QCS404_SNOC_PNOC_SLV, 8, -1, 28, QCS404_SNOC_PNOC_MAS);
+DEFINE_QNODE(slv_qdss_stm, QCS404_SLAVE_QDSS_STM, 4, -1, 30, 0);
+DEFINE_QNODE(slv_cats_0, QCS404_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, QCS404_SLAVE_OCMEM_64, 8, -1, -1, 0);
+DEFINE_QNODE(slv_lpass, QCS404_SLAVE_LPASS, 4, -1, -1, 0);
+
+static struct qcom_icc_node *qcs404_bimc_nodes[] = {
+ [MASTER_AMPSS_M0] = &mas_apps_proc,
+ [MASTER_OXILI] = &mas_oxili,
+ [MASTER_MDP_PORT0] = &mas_mdp,
+ [MASTER_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MASTER_TCU_0] = &mas_tcu_0,
+ [SLAVE_EBI_CH0] = &slv_ebi,
+ [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static struct qcom_icc_desc qcs404_bimc = {
+ .nodes = qcs404_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
+};
+
+static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
+ [MASTER_SPDM] = &mas_spdm,
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_BLSP_2] = &mas_blsp_2,
+ [MASTER_XI_USB_HS1] = &mas_xi_usb_hs1,
+ [MASTER_CRYPT0] = &mas_crypto,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [MASTER_QPIC] = &mas_qpic,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_2] = &pcnoc_int_2,
+ [PCNOC_INT_3] = &pcnoc_int_3,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_6] = &pcnoc_s_6,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [PCNOC_S_8] = &pcnoc_s_8,
+ [PCNOC_S_9] = &pcnoc_s_9,
+ [PCNOC_S_10] = &pcnoc_s_10,
+ [PCNOC_S_11] = &pcnoc_s_11,
+ [SLAVE_SPDM] = &slv_spdm,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLAVE_GPU_CFG] = &slv_gpu_cfg,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_BLSP_2] = &slv_blsp_2,
+ [SLAVE_TLMM_NORTH] = &slv_tlmm_north,
+ [SLAVE_PCIE] = &slv_pcie,
+ [SLAVE_ETHERNET] = &slv_ethernet,
+ [SLAVE_TLMM_EAST] = &slv_tlmm_east,
+ [SLAVE_TCU] = &slv_tcu,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static struct qcom_icc_desc qcs404_pcnoc = {
+ .nodes = qcs404_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
+};
+
+static struct qcom_icc_node *qcs404_snoc_nodes[] = {
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+ [MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_EMAC] = &mas_emac,
+ [MASTER_PCIE] = &mas_pcie,
+ [MASTER_USB3] = &mas_usb3,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_2] = &snoc_int_2,
+ [SLAVE_KPSS_AHB] = &slv_kpss_ahb,
+ [SLAVE_WCSS] = &slv_wcss,
+ [SLAVE_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_CATS_0] = &slv_cats_0,
+ [SLAVE_CATS_1] = &slv_cats_1,
+ [SLAVE_LPASS] = &slv_lpass,
+};
+
+static struct qcom_icc_desc qcs404_snoc = {
+ .nodes = qcs404_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
+};
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ u64 sum_bw;
+ u64 max_peak_bw;
+ u64 rate;
+ u32 agg_avg = 0;
+ u32 agg_peak = 0;
+ int ret, i;
+
+ qn = src->data;
+ provider = src->provider;
+ qp = to_qcom_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* send bandwidth request message to the RPM processor */
+ if (qn->mas_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_MASTER_REQ,
+ qn->mas_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
+ qn->mas_rpm_id, ret);
+ return ret;
+ }
+ }
+
+ if (qn->slv_rpm_id != -1) {
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
+ RPM_BUS_SLAVE_REQ,
+ qn->slv_rpm_id,
+ sum_bw);
+ if (ret) {
+ pr_err("qcom_icc_rpm_smd_send slv error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, qn->buswidth);
+
+ if (qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ pr_err("%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ return ret;
+ }
+ }
+
+ qn->rate = rate;
+
+ return 0;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
+ GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ icc_provider_del(provider);
+
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qcs404_noc_of_match[] = {
+ { .compatible = "qcom,qcs404-bimc", .data = &qcs404_bimc },
+ { .compatible = "qcom,qcs404-pcnoc", .data = &qcs404_pcnoc },
+ { .compatible = "qcom,qcs404-snoc", .data = &qcs404_snoc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
+
+static struct platform_driver qcs404_noc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-qcs404",
+ .of_match_table = qcs404_noc_of_match,
+ },
+};
+module_platform_driver(qcs404_noc_driver);
+MODULE_DESCRIPTION("Qualcomm QCS404 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 4915b78da673..57955596bb59 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -20,23 +20,6 @@
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
-#define BCM_TCS_CMD_COMMIT_SHFT 30
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHFT 29
-#define BCM_TCS_CMD_VALID_MASK 0x20000000
-#define BCM_TCS_CMD_VOTE_X_SHFT 14
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_Y_SHFT 0
-#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
-
-#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
- (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
- ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
- ((cpu_to_le32(vote_x) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
- ((cpu_to_le32(vote_y) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
-
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
@@ -66,6 +49,22 @@ struct bcm_db {
#define SDM845_MAX_BCM_PER_NODE 2
#define SDM845_MAX_VCD 10
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC 0
+#define QCOM_ICC_BUCKET_WAKE 1
+#define QCOM_ICC_BUCKET_SLEEP 2
+#define QCOM_ICC_NUM_BUCKETS 3
+#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+ QCOM_ICC_TAG_SLEEP)
+
/**
* struct qcom_icc_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
@@ -86,8 +85,8 @@ struct qcom_icc_node {
u16 num_links;
u16 channels;
u16 buswidth;
- u64 sum_avg;
- u64 max_peak;
+ u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
+ u64 max_peak[QCOM_ICC_NUM_BUCKETS];
struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
size_t num_bcms;
};
@@ -112,8 +111,8 @@ struct qcom_icc_bcm {
const char *name;
u32 type;
u32 addr;
- u64 vote_x;
- u64 vote_y;
+ u64 vote_x[QCOM_ICC_NUM_BUCKETS];
+ u64 vote_y[QCOM_ICC_NUM_BUCKETS];
bool dirty;
bool keepalive;
struct bcm_db aux_data;
@@ -555,7 +554,7 @@ inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
cmd->wait = true;
}
-static void tcs_list_gen(struct list_head *bcm_list,
+static void tcs_list_gen(struct list_head *bcm_list, int bucket,
struct tcs_cmd tcs_list[SDM845_MAX_VCD],
int n[SDM845_MAX_VCD])
{
@@ -573,8 +572,8 @@ static void tcs_list_gen(struct list_head *bcm_list,
commit = true;
cur_vcd_size = 0;
}
- tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
- bcm->addr, commit);
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
+ bcm->vote_y[bucket], bcm->addr, commit);
idx++;
n[batch]++;
/*
@@ -595,38 +594,56 @@ static void tcs_list_gen(struct list_head *bcm_list,
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
- size_t i;
- u64 agg_avg = 0;
- u64 agg_peak = 0;
+ size_t i, bucket;
+ u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
+ u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
u64 temp;
- for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
- agg_avg = max(agg_avg, temp);
+ for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg[bucket] = max(agg_avg[bucket], temp);
- temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
- agg_peak = max(agg_peak, temp);
- }
+ temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak[bucket] = max(agg_peak[bucket], temp);
+ }
- temp = agg_avg * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x = temp;
+ temp = agg_avg[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x[bucket] = temp;
- temp = agg_peak * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y = temp;
+ temp = agg_peak[bucket] * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y[bucket] = temp;
+ }
- if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
- bcm->vote_x = 1;
- bcm->vote_y = 1;
+ if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
+ bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
}
bcm->dirty = false;
}
-static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+static void qcom_icc_pre_aggregate(struct icc_node *node)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ qn->sum_avg[i] = 0;
+ qn->max_peak[i] = 0;
+ }
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i;
@@ -634,12 +651,19 @@ static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
qn = node->data;
+ if (!tag)
+ tag = QCOM_ICC_TAG_ALWAYS;
+
+ for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
+ if (tag & BIT(i)) {
+ qn->sum_avg[i] += avg_bw;
+ qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
+ }
+ }
+
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- qn->sum_avg = *agg_avg;
- qn->max_peak = *agg_peak;
-
for (i = 0; i < qn->num_bcms; i++)
qn->bcms[i]->dirty = true;
@@ -675,7 +699,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
* Construct the command list based on a pre ordered list of BCMs
* based on VCD.
*/
- tcs_list_gen(&commit_list, cmds, commit_idx);
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
if (!commit_idx[0])
return ret;
@@ -693,6 +717,41 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
return ret;
}
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ /*
+ * Only generate WAKE and SLEEP commands if a resource's
+ * requirements change as the execution environment transitions
+ * between different power states.
+ */
+ if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] !=
+ qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
+ qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] !=
+ qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) {
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ if (list_empty(&commit_list))
+ return ret;
+
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
+
+ ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
+
+ ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
return ret;
}
@@ -738,6 +797,7 @@ static int qnoc_probe(struct platform_device *pdev)
provider = &qp->provider;
provider->dev = &pdev->dev;
provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
provider->xlate = of_icc_xlate_onecell;
INIT_LIST_HEAD(&provider->nodes);
diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c
new file mode 100644
index 000000000000..dc8ff8d133a9
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RPM over SMD communication wrapper for interconnects
+ *
+ * Copyright (C) 2019 Linaro Ltd
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "smd-rpm.h"
+
+#define RPM_KEY_BW 0x00007762
+
+static struct qcom_smd_rpm *icc_smd_rpm;
+
+struct icc_rpm_smd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+bool qcom_icc_rpm_smd_available(void)
+{
+ return !!icc_smd_rpm;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_available);
+
+int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val)
+{
+ struct icc_rpm_smd_req req = {
+ .key = cpu_to_le32(RPM_KEY_BW),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(val),
+ };
+
+ return qcom_rpm_smd_write(icc_smd_rpm, ctx, rsc_type, id, &req,
+ sizeof(req));
+}
+EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_send);
+
+static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
+{
+ icc_smd_rpm = NULL;
+
+ return 0;
+}
+
+static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
+{
+ icc_smd_rpm = dev_get_drvdata(pdev->dev.parent);
+
+ if (!icc_smd_rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qcom_interconnect_rpm_smd_driver = {
+ .driver = {
+ .name = "icc_smd_rpm",
+ },
+ .probe = qcom_icc_rpm_smd_probe,
+ .remove = qcom_icc_rpm_smd_remove,
+};
+module_platform_driver(qcom_interconnect_rpm_smd_driver);
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm SMD RPM interconnect proxy driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:icc_smd_rpm");
diff --git a/drivers/interconnect/qcom/smd-rpm.h b/drivers/interconnect/qcom/smd-rpm.h
new file mode 100644
index 000000000000..ca9d0327b8ac
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
+#define __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
+
+#include <linux/soc/qcom/smd-rpm.h>
+
+bool qcom_icc_rpm_smd_available(void);
+int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val);
+
+#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e15cdcd8cb3c..e3842eabcfdd 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -177,11 +177,12 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
- depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
+ depends on PCI_MSI && ACPI && (X86 || IA64)
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
select DMAR_TABLE
+ select SWIOTLB
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index f13f36ae1af6..4f405f926e73 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,13 +10,14 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
+obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b607a92791d3..1ed3b98324ba 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -436,7 +436,7 @@ static int iommu_init_device(struct device *dev)
* invalid address), we ignore the capability for the device so
* it'll be forced to go into translation mode.
*/
- if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+ if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
struct amd_iommu *iommu;
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
+static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, 1);
+ iommu_queue_command(iommu, &cmd);
+
+ iommu_completion_wait(iommu);
+}
+
static void amd_iommu_flush_all(struct amd_iommu *iommu)
{
struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
gfp_t gfp)
{
+ unsigned long flags;
u64 *pte;
- if (domain->mode == PAGE_MODE_6_LEVEL)
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
- return false;
+ goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
- return false;
+ goto out;
*pte = PM_LEVEL_PDE(domain->mode,
iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
domain->mode += 1;
domain->updated = true;
- return true;
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return;
}
static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
{
u64 pte_root = 0;
u64 flags = 0;
+ u32 old_domid;
if (domain->mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
+ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
amd_iommu_dev_table[devid].data[1] = flags;
amd_iommu_dev_table[devid].data[0] = pte_root;
+
+ /*
+ * A kdump kernel might be replacing a domain ID that was copied from
+ * the previous kernel--if so, it needs to flush the translation cache
+ * entries for the old domain ID that is being overwritten
+ */
+ if (old_domid) {
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ amd_iommu_flush_tlb_domid(iommu, old_domid);
+ }
}
static void clear_dte_entry(u16 devid)
@@ -2226,7 +2256,7 @@ static int amd_iommu_add_device(struct device *dev)
BUG_ON(!dev_data);
- if (iommu_pass_through || dev_data->iommu_v2)
+ if (dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
@@ -2547,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ ret = iommu_map_page(domain, bus_addr, phys_addr,
+ PAGE_SIZE, prot,
+ GFP_ATOMIC | __GFP_NOWARN);
if (ret)
goto out_unmap;
@@ -2805,7 +2837,7 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
- swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
+ swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
if (amd_iommu_unmap_flush)
@@ -3055,7 +3087,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
}
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- size_t page_size)
+ size_t page_size,
+ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
@@ -3196,9 +3229,10 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
domain_flush_complete(dom);
}
-static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
+ amd_iommu_flush_iotlb_all(domain);
}
const struct iommu_ops amd_iommu_ops = {
@@ -3219,8 +3253,7 @@ const struct iommu_ops amd_iommu_ops = {
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
- .iotlb_range_add = amd_iommu_iotlb_range_add,
- .iotlb_sync = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
};
/*****************************************************************************
@@ -4313,13 +4346,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate,
};
+int amd_iommu_activate_guest_mode(void *data)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ !entry || entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ entry->lo.val = 0;
+ entry->hi.val = 0;
+
+ entry->lo.fields_vapic.guest_mode = 1;
+ entry->lo.fields_vapic.ga_log_intr = 1;
+ entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
+ entry->hi.fields.vector = ir_data->ga_vector;
+ entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+
+ return modify_irte_ga(ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+
+int amd_iommu_deactivate_guest_mode(void *data)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ struct irq_cfg *cfg = ir_data->cfg;
+
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ !entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ entry->lo.val = 0;
+ entry->hi.val = 0;
+
+ entry->lo.fields_remap.dm = apic->irq_dest_mode;
+ entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
+ entry->hi.fields.vector = cfg->vector;
+ entry->lo.fields_remap.destination =
+ APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+ entry->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+
+ return modify_irte_ga(ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry, NULL);
+}
+EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
+ int ret;
struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
- struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
@@ -4330,6 +4412,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
if (!dev_data || !dev_data->use_vapic)
return 0;
+ ir_data->cfg = irqd_cfg(data);
pi_data->ir_data = ir_data;
/* Note:
@@ -4348,37 +4431,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
- /* Setting */
- irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
- irte->hi.fields.vector = vcpu_pi_info->vector;
- irte->lo.fields_vapic.ga_log_intr = 1;
- irte->lo.fields_vapic.guest_mode = 1;
- irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
-
- ir_data->cached_ga_tag = pi_data->ga_tag;
+ ir_data->ga_root_ptr = (pi_data->base >> 12);
+ ir_data->ga_vector = vcpu_pi_info->vector;
+ ir_data->ga_tag = pi_data->ga_tag;
+ ret = amd_iommu_activate_guest_mode(ir_data);
+ if (!ret)
+ ir_data->cached_ga_tag = pi_data->ga_tag;
} else {
- /* Un-Setting */
- struct irq_cfg *cfg = irqd_cfg(data);
-
- irte->hi.val = 0;
- irte->lo.val = 0;
- irte->hi.fields.vector = cfg->vector;
- irte->lo.fields_remap.guest_mode = 0;
- irte->lo.fields_remap.destination =
- APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
- irte->hi.fields.destination =
- APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
- irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
- irte->lo.fields_remap.dm = apic->irq_dest_mode;
+ ret = amd_iommu_deactivate_guest_mode(ir_data);
/*
* This communicates the ga_tag back to the caller
* so that it can do all the necessary clean up.
*/
- ir_data->cached_ga_tag = 0;
+ if (!ret)
+ ir_data->cached_ga_tag = 0;
}
- return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+ return ret;
}
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
new file mode 100644
index 000000000000..12d540d9b59b
--- /dev/null
+++ b/drivers/iommu/amd_iommu.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef AMD_IOMMU_H
+#define AMD_IOMMU_H
+
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+
+#ifdef CONFIG_DMI
+void amd_iommu_apply_ivrs_quirks(void);
+#else
+static void amd_iommu_apply_ivrs_quirks(void) { }
+#endif
+
+#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index eb104c719629..568c52317757 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -23,6 +23,8 @@
#include <linux/mem_encrypt.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
+#include <asm/apic.h>
+#include <asm/msidef.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
@@ -30,6 +32,7 @@
#include <asm/irq_remapping.h>
#include <linux/crash_dump.h>
+#include "amd_iommu.h"
#include "amd_iommu_proto.h"
#include "amd_iommu_types.h"
#include "irq_remapping.h"
@@ -1000,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
set_iommu_for_device(iommu, devid);
}
-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
{
struct devid_map *entry;
struct list_head *list;
@@ -1151,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
if (ret)
return ret;
+ amd_iommu_apply_ivrs_quirks();
+
/*
* First save the recommended feature enable bits from ACPI
*/
@@ -1920,6 +1925,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return 0;
}
+#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
+#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
+#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
+#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
+
+/**
+ * Setup the IntCapXT registers with interrupt routing information
+ * based on the PCI MSI capability block registers, accessed via
+ * MMIO MSI address low/hi and MSI data registers.
+ */
+static void iommu_update_intcapxt(struct amd_iommu *iommu)
+{
+ u64 val;
+ u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
+ u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
+ u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
+ bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+ u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
+
+ if (x2apic_enabled())
+ dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
+
+ val = XT_INT_VEC(data & 0xFF) |
+ XT_INT_DEST_MODE(dm) |
+ XT_INT_DEST_LO(dest) |
+ XT_INT_DEST_HI(dest);
+
+ /**
+ * Current IOMMU implemtation uses the same IRQ for all
+ * 3 IOMMU interrupts.
+ */
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+static void _irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ if (iommu->dev->irq == notify->irq) {
+ iommu_update_intcapxt(iommu);
+ break;
+ }
+ }
+}
+
+static void _irq_notifier_release(struct kref *ref)
+{
+}
+
+static int iommu_init_intcapxt(struct amd_iommu *iommu)
+{
+ int ret;
+ struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
+
+ /**
+ * IntCapXT requires XTSup=1, which can be inferred
+ * amd_iommu_xt_mode.
+ */
+ if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
+ return 0;
+
+ /**
+ * Also, we need to setup notifier to update the IntCapXT registers
+ * whenever the irq affinity is changed from user-space.
+ */
+ notify->irq = iommu->dev->irq;
+ notify->notify = _irq_notifier_notify,
+ notify->release = _irq_notifier_release,
+ ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
+ if (ret) {
+ pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
+ iommu->devid, iommu->dev->irq);
+ return ret;
+ }
+
+ iommu_update_intcapxt(iommu);
+ iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+ return ret;
+}
+
static int iommu_init_msi(struct amd_iommu *iommu)
{
int ret;
@@ -1936,6 +2025,10 @@ static int iommu_init_msi(struct amd_iommu *iommu)
return ret;
enable_faults:
+ ret = iommu_init_intcapxt(iommu);
+ if (ret)
+ return ret;
+
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
new file mode 100644
index 000000000000..c235f79b7a20
--- /dev/null
+++ b/drivers/iommu/amd_iommu_quirks.c
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * Quirks for AMD IOMMU
+ *
+ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
+ */
+
+#ifdef CONFIG_DMI
+#include <linux/dmi.h>
+
+#include "amd_iommu.h"
+
+#define IVHD_SPECIAL_IOAPIC 1
+
+struct ivrs_quirk_entry {
+ u8 id;
+ u16 devid;
+};
+
+enum {
+ DELL_INSPIRON_7375 = 0,
+ DELL_LATITUDE_5495,
+ LENOVO_IDEAPAD_330S_15ARR,
+};
+
+static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
+ /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
+ [DELL_INSPIRON_7375] = {
+ { .id = 4, .devid = 0xa0 },
+ { .id = 5, .devid = 0x2 },
+ {}
+ },
+ /* ivrs_ioapic[4]=00:14.0 */
+ [DELL_LATITUDE_5495] = {
+ { .id = 4, .devid = 0xa0 },
+ {}
+ },
+ /* ivrs_ioapic[32]=00:14.0 */
+ [LENOVO_IDEAPAD_330S_15ARR] = {
+ { .id = 32, .devid = 0xa0 },
+ {}
+ },
+ {}
+};
+
+static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
+{
+ const struct ivrs_quirk_entry *i;
+
+ for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
+ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
+
+ return 0;
+}
+
+static const struct dmi_system_id ivrs_quirks[] __initconst = {
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Dell Inspiron 7375",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
+ },
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Dell Latitude 5495",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+ },
+ {
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Lenovo ideapad 330S-15ARR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
+ },
+ {}
+};
+
+void __init amd_iommu_apply_ivrs_quirks(void)
+{
+ dmi_check_system(ivrs_quirks);
+}
+#endif
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 52c35d557fad..9ac229e92b07 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -60,6 +60,12 @@
#define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
+#define MMIO_MSI_ADDR_LO_OFFSET 0x015C
+#define MMIO_MSI_ADDR_HI_OFFSET 0x0160
+#define MMIO_MSI_DATA_OFFSET 0x0164
+#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
+#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
+#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -150,6 +156,7 @@
#define CONTROL_GALOG_EN 0x1CULL
#define CONTROL_GAINT_EN 0x1DULL
#define CONTROL_XT_EN 0x32ULL
+#define CONTROL_INTCAPXT_EN 0x33ULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -592,6 +599,8 @@ struct amd_iommu {
/* DebugFS Info */
struct dentry *debugfs;
#endif
+ /* IRQ notifier for IntCapXT interrupt */
+ struct irq_affinity_notify intcapxt_notify;
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
@@ -864,6 +873,15 @@ struct amd_ir_data {
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
void *ref; /* Pointer to the actual irte */
+
+ /**
+ * Store information for activate/de-activate
+ * Guest virtual APIC mode during runtime.
+ */
+ struct irq_cfg *cfg;
+ int ga_vector;
+ int ga_root_ptr;
+ int ga_tag;
};
struct amd_irte_ops {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
new file mode 100644
index 000000000000..5c87a38620c4
--- /dev/null
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Miscellaneous Arm SMMU implementation and integration quirks
+// Copyright (C) 2019 Arm Limited
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/of.h>
+
+#include "arm-smmu.h"
+
+
+static int arm_smmu_gr0_ns(int offset)
+{
+ switch(offset) {
+ case ARM_SMMU_GR0_sCR0:
+ case ARM_SMMU_GR0_sACR:
+ case ARM_SMMU_GR0_sGFSR:
+ case ARM_SMMU_GR0_sGFSYNR0:
+ case ARM_SMMU_GR0_sGFSYNR1:
+ case ARM_SMMU_GR0_sGFSYNR2:
+ return offset + 0x400;
+ default:
+ return offset;
+ }
+}
+
+static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
+ int offset)
+{
+ if (page == ARM_SMMU_GR0)
+ offset = arm_smmu_gr0_ns(offset);
+ return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
+ int offset, u32 val)
+{
+ if (page == ARM_SMMU_GR0)
+ offset = arm_smmu_gr0_ns(offset);
+ writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+/* Since we don't care for sGFAR, we can do without 64-bit accessors */
+static const struct arm_smmu_impl calxeda_impl = {
+ .read_reg = arm_smmu_read_ns,
+ .write_reg = arm_smmu_write_ns,
+};
+
+
+struct cavium_smmu {
+ struct arm_smmu_device smmu;
+ u32 id_base;
+};
+
+static int cavium_cfg_probe(struct arm_smmu_device *smmu)
+{
+ static atomic_t context_count = ATOMIC_INIT(0);
+ struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
+ /*
+ * Cavium CN88xx erratum #27704.
+ * Ensure ASID and VMID allocation is unique across all SMMUs in
+ * the system.
+ */
+ cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
+ dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
+
+ return 0;
+}
+
+static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
+{
+ struct cavium_smmu *cs = container_of(smmu_domain->smmu,
+ struct cavium_smmu, smmu);
+
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+ smmu_domain->cfg.vmid += cs->id_base;
+ else
+ smmu_domain->cfg.asid += cs->id_base;
+
+ return 0;
+}
+
+static const struct arm_smmu_impl cavium_impl = {
+ .cfg_probe = cavium_cfg_probe,
+ .init_context = cavium_init_context,
+};
+
+static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct cavium_smmu *cs;
+
+ cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ cs->smmu = *smmu;
+ cs->smmu.impl = &cavium_impl;
+
+ devm_kfree(smmu->dev, smmu);
+
+ return &cs->smmu;
+}
+
+
+#define ARM_MMU500_ACTLR_CPRE (1 << 1)
+
+#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
+
+static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+{
+ u32 reg, major;
+ int i;
+ /*
+ * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
+ * writes to the context bank ACTLRs will stick. And we just hope that
+ * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
+ */
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
+ major = FIELD_GET(ID7_MAJOR, reg);
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
+ if (major >= 2)
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ /*
+ * Allow unmatched Stream IDs to allocate bypass
+ * TLB entries for reduced latency.
+ */
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
+
+ /*
+ * Disable MMU-500's not-particularly-beneficial next-page
+ * prefetcher for the sake of errata #841119 and #826419.
+ */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+ reg &= ~ARM_MMU500_ACTLR_CPRE;
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
+ }
+
+ return 0;
+}
+
+static const struct arm_smmu_impl arm_mmu500_impl = {
+ .reset = arm_mmu500_reset,
+};
+
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ /*
+ * We will inevitably have to combine model-specific implementation
+ * quirks with platform-specific integration quirks, but everything
+ * we currently support happens to work out as straightforward
+ * mutually-exclusive assignments.
+ */
+ switch (smmu->model) {
+ case ARM_MMU500:
+ smmu->impl = &arm_mmu500_impl;
+ break;
+ case CAVIUM_SMMUV2:
+ return cavium_smmu_impl_init(smmu);
+ default:
+ break;
+ }
+
+ if (of_property_read_bool(smmu->dev->of_node,
+ "calxeda,smmu-secure-config-access"))
+ smmu->impl = &calxeda_impl;
+
+ return smmu;
+}
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
deleted file mode 100644
index 1c278f7ae888..000000000000
--- a/drivers/iommu/arm-smmu-regs.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * IOMMU API for ARM architected SMMU implementations.
- *
- * Copyright (C) 2013 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#ifndef _ARM_SMMU_REGS_H
-#define _ARM_SMMU_REGS_H
-
-/* Configuration registers */
-#define ARM_SMMU_GR0_sCR0 0x0
-#define sCR0_CLIENTPD (1 << 0)
-#define sCR0_GFRE (1 << 1)
-#define sCR0_GFIE (1 << 2)
-#define sCR0_EXIDENABLE (1 << 3)
-#define sCR0_GCFGFRE (1 << 4)
-#define sCR0_GCFGFIE (1 << 5)
-#define sCR0_USFCFG (1 << 10)
-#define sCR0_VMIDPNE (1 << 11)
-#define sCR0_PTM (1 << 12)
-#define sCR0_FB (1 << 13)
-#define sCR0_VMID16EN (1 << 31)
-#define sCR0_BSU_SHIFT 14
-#define sCR0_BSU_MASK 0x3
-
-/* Auxiliary Configuration register */
-#define ARM_SMMU_GR0_sACR 0x10
-
-/* Identification registers */
-#define ARM_SMMU_GR0_ID0 0x20
-#define ARM_SMMU_GR0_ID1 0x24
-#define ARM_SMMU_GR0_ID2 0x28
-#define ARM_SMMU_GR0_ID3 0x2c
-#define ARM_SMMU_GR0_ID4 0x30
-#define ARM_SMMU_GR0_ID5 0x34
-#define ARM_SMMU_GR0_ID6 0x38
-#define ARM_SMMU_GR0_ID7 0x3c
-#define ARM_SMMU_GR0_sGFSR 0x48
-#define ARM_SMMU_GR0_sGFSYNR0 0x50
-#define ARM_SMMU_GR0_sGFSYNR1 0x54
-#define ARM_SMMU_GR0_sGFSYNR2 0x58
-
-#define ID0_S1TS (1 << 30)
-#define ID0_S2TS (1 << 29)
-#define ID0_NTS (1 << 28)
-#define ID0_SMS (1 << 27)
-#define ID0_ATOSNS (1 << 26)
-#define ID0_PTFS_NO_AARCH32 (1 << 25)
-#define ID0_PTFS_NO_AARCH32S (1 << 24)
-#define ID0_CTTW (1 << 14)
-#define ID0_NUMIRPT_SHIFT 16
-#define ID0_NUMIRPT_MASK 0xff
-#define ID0_NUMSIDB_SHIFT 9
-#define ID0_NUMSIDB_MASK 0xf
-#define ID0_EXIDS (1 << 8)
-#define ID0_NUMSMRG_SHIFT 0
-#define ID0_NUMSMRG_MASK 0xff
-
-#define ID1_PAGESIZE (1 << 31)
-#define ID1_NUMPAGENDXB_SHIFT 28
-#define ID1_NUMPAGENDXB_MASK 7
-#define ID1_NUMS2CB_SHIFT 16
-#define ID1_NUMS2CB_MASK 0xff
-#define ID1_NUMCB_SHIFT 0
-#define ID1_NUMCB_MASK 0xff
-
-#define ID2_OAS_SHIFT 4
-#define ID2_OAS_MASK 0xf
-#define ID2_IAS_SHIFT 0
-#define ID2_IAS_MASK 0xf
-#define ID2_UBS_SHIFT 8
-#define ID2_UBS_MASK 0xf
-#define ID2_PTFS_4K (1 << 12)
-#define ID2_PTFS_16K (1 << 13)
-#define ID2_PTFS_64K (1 << 14)
-#define ID2_VMID16 (1 << 15)
-
-#define ID7_MAJOR_SHIFT 4
-#define ID7_MAJOR_MASK 0xf
-
-/* Global TLB invalidation */
-#define ARM_SMMU_GR0_TLBIVMID 0x64
-#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
-#define ARM_SMMU_GR0_TLBIALLH 0x6c
-#define ARM_SMMU_GR0_sTLBGSYNC 0x70
-#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
-#define sTLBGSTATUS_GSACTIVE (1 << 0)
-
-/* Stream mapping registers */
-#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
-#define SMR_VALID (1 << 31)
-#define SMR_MASK_SHIFT 16
-#define SMR_ID_SHIFT 0
-
-#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
-#define S2CR_CBNDX_SHIFT 0
-#define S2CR_CBNDX_MASK 0xff
-#define S2CR_EXIDVALID (1 << 10)
-#define S2CR_TYPE_SHIFT 16
-#define S2CR_TYPE_MASK 0x3
-enum arm_smmu_s2cr_type {
- S2CR_TYPE_TRANS,
- S2CR_TYPE_BYPASS,
- S2CR_TYPE_FAULT,
-};
-
-#define S2CR_PRIVCFG_SHIFT 24
-#define S2CR_PRIVCFG_MASK 0x3
-enum arm_smmu_s2cr_privcfg {
- S2CR_PRIVCFG_DEFAULT,
- S2CR_PRIVCFG_DIPAN,
- S2CR_PRIVCFG_UNPRIV,
- S2CR_PRIVCFG_PRIV,
-};
-
-/* Context bank attribute registers */
-#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
-#define CBAR_VMID_SHIFT 0
-#define CBAR_VMID_MASK 0xff
-#define CBAR_S1_BPSHCFG_SHIFT 8
-#define CBAR_S1_BPSHCFG_MASK 3
-#define CBAR_S1_BPSHCFG_NSH 3
-#define CBAR_S1_MEMATTR_SHIFT 12
-#define CBAR_S1_MEMATTR_MASK 0xf
-#define CBAR_S1_MEMATTR_WB 0xf
-#define CBAR_TYPE_SHIFT 16
-#define CBAR_TYPE_MASK 0x3
-#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
-#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
-#define CBAR_IRPTNDX_SHIFT 24
-#define CBAR_IRPTNDX_MASK 0xff
-
-#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
-
-#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
-#define CBA2R_RW64_32BIT (0 << 0)
-#define CBA2R_RW64_64BIT (1 << 0)
-#define CBA2R_VMID_SHIFT 16
-#define CBA2R_VMID_MASK 0xffff
-
-#define ARM_SMMU_CB_SCTLR 0x0
-#define ARM_SMMU_CB_ACTLR 0x4
-#define ARM_SMMU_CB_RESUME 0x8
-#define ARM_SMMU_CB_TTBCR2 0x10
-#define ARM_SMMU_CB_TTBR0 0x20
-#define ARM_SMMU_CB_TTBR1 0x28
-#define ARM_SMMU_CB_TTBCR 0x30
-#define ARM_SMMU_CB_CONTEXTIDR 0x34
-#define ARM_SMMU_CB_S1_MAIR0 0x38
-#define ARM_SMMU_CB_S1_MAIR1 0x3c
-#define ARM_SMMU_CB_PAR 0x50
-#define ARM_SMMU_CB_FSR 0x58
-#define ARM_SMMU_CB_FAR 0x60
-#define ARM_SMMU_CB_FSYNR0 0x68
-#define ARM_SMMU_CB_S1_TLBIVA 0x600
-#define ARM_SMMU_CB_S1_TLBIASID 0x610
-#define ARM_SMMU_CB_S1_TLBIVAL 0x620
-#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
-#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
-#define ARM_SMMU_CB_TLBSYNC 0x7f0
-#define ARM_SMMU_CB_TLBSTATUS 0x7f4
-#define ARM_SMMU_CB_ATS1PR 0x800
-#define ARM_SMMU_CB_ATSR 0x8f0
-
-#define SCTLR_S1_ASIDPNE (1 << 12)
-#define SCTLR_CFCFG (1 << 7)
-#define SCTLR_CFIE (1 << 6)
-#define SCTLR_CFRE (1 << 5)
-#define SCTLR_E (1 << 4)
-#define SCTLR_AFE (1 << 2)
-#define SCTLR_TRE (1 << 1)
-#define SCTLR_M (1 << 0)
-
-#define CB_PAR_F (1 << 0)
-
-#define ATSR_ACTIVE (1 << 0)
-
-#define RESUME_RETRY (0 << 0)
-#define RESUME_TERMINATE (1 << 0)
-
-#define TTBCR2_SEP_SHIFT 15
-#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-#define TTBCR2_AS (1 << 4)
-
-#define TTBRn_ASID_SHIFT 48
-
-#define FSR_MULTI (1 << 31)
-#define FSR_SS (1 << 30)
-#define FSR_UUT (1 << 8)
-#define FSR_ASF (1 << 7)
-#define FSR_TLBLKF (1 << 6)
-#define FSR_TLBMCF (1 << 5)
-#define FSR_EF (1 << 4)
-#define FSR_PF (1 << 3)
-#define FSR_AFF (1 << 2)
-#define FSR_TF (1 << 1)
-
-#define FSR_IGN (FSR_AFF | FSR_ASF | \
- FSR_TLBMCF | FSR_TLBLKF)
-#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
- FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
-
-#define FSYNR0_WNR (1 << 4)
-
-#endif /* _ARM_SMMU_REGS_H */
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..8da93e730d6f 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -181,12 +181,13 @@
#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
#define ARM_SMMU_MEMATTR_OIWB 0xf
-#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
-#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
-#define Q_OVERFLOW_FLAG (1 << 31)
-#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
+#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1))
+#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift))
+#define Q_OVERFLOW_FLAG (1U << 31)
+#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG)
#define Q_ENT(q, p) ((q)->base + \
- Q_IDX(q, p) * (q)->ent_dwords)
+ Q_IDX(&((q)->llq), p) * \
+ (q)->ent_dwords)
#define Q_BASE_RWA (1UL << 62)
#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
@@ -306,6 +307,15 @@
#define CMDQ_ERR_CERROR_ABT_IDX 2
#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
+#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG
+
+/*
+ * This is used to size the command queue and therefore must be at least
+ * BITS_PER_LONG so that the valid_map works correctly (it relies on the
+ * total number of queue entries being a multiple of BITS_PER_LONG).
+ */
+#define CMDQ_BATCH_ENTRIES BITS_PER_LONG
+
#define CMDQ_0_OP GENMASK_ULL(7, 0)
#define CMDQ_0_SSV (1UL << 11)
@@ -368,9 +378,8 @@
#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
/* High-level queue structures */
-#define ARM_SMMU_POLL_TIMEOUT_US 100
-#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */
-#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
+#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_POLL_SPIN_COUNT 10
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -472,13 +481,29 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_CMD_SYNC 0x46
struct {
- u32 msidata;
u64 msiaddr;
} sync;
};
};
+struct arm_smmu_ll_queue {
+ union {
+ u64 val;
+ struct {
+ u32 prod;
+ u32 cons;
+ };
+ struct {
+ atomic_t prod;
+ atomic_t cons;
+ } atomic;
+ u8 __pad[SMP_CACHE_BYTES];
+ } ____cacheline_aligned_in_smp;
+ u32 max_n_shift;
+};
+
struct arm_smmu_queue {
+ struct arm_smmu_ll_queue llq;
int irq; /* Wired interrupt */
__le64 *base;
@@ -486,17 +511,23 @@ struct arm_smmu_queue {
u64 q_base;
size_t ent_dwords;
- u32 max_n_shift;
- u32 prod;
- u32 cons;
u32 __iomem *prod_reg;
u32 __iomem *cons_reg;
};
+struct arm_smmu_queue_poll {
+ ktime_t timeout;
+ unsigned int delay;
+ unsigned int spin_cnt;
+ bool wfe;
+};
+
struct arm_smmu_cmdq {
struct arm_smmu_queue q;
- spinlock_t lock;
+ atomic_long_t *valid_map;
+ atomic_t owner_prod;
+ atomic_t lock;
};
struct arm_smmu_evtq {
@@ -576,8 +607,6 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
- u32 sync_nr;
- u8 prev_cmd_opcode;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -596,12 +625,6 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg;
- /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
- union {
- u32 sync_count;
- u64 padding;
- };
-
/* IOMMU core code handle */
struct iommu_device iommu;
};
@@ -614,7 +637,7 @@ struct arm_smmu_master {
struct list_head domain_head;
u32 *sids;
unsigned int num_sids;
- bool ats_enabled :1;
+ bool ats_enabled;
};
/* SMMU private data for an IOMMU domain */
@@ -631,6 +654,7 @@ struct arm_smmu_domain {
struct io_pgtable_ops *pgtbl_ops;
bool non_strict;
+ atomic_t nr_ats_masters;
enum arm_smmu_domain_stage stage;
union {
@@ -685,85 +709,97 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
}
/* Low-level queue manipulation functions */
-static bool queue_full(struct arm_smmu_queue *q)
+static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
+{
+ u32 space, prod, cons;
+
+ prod = Q_IDX(q, q->prod);
+ cons = Q_IDX(q, q->cons);
+
+ if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
+ space = (1 << q->max_n_shift) - (prod - cons);
+ else
+ space = cons - prod;
+
+ return space >= n;
+}
+
+static bool queue_full(struct arm_smmu_ll_queue *q)
{
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
}
-static bool queue_empty(struct arm_smmu_queue *q)
+static bool queue_empty(struct arm_smmu_ll_queue *q)
{
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
}
-static void queue_sync_cons(struct arm_smmu_queue *q)
+static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
{
- q->cons = readl_relaxed(q->cons_reg);
+ return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
+ ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
+ (Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
}
-static void queue_inc_cons(struct arm_smmu_queue *q)
+static void queue_sync_cons_out(struct arm_smmu_queue *q)
{
- u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
-
- q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
-
/*
* Ensure that all CPU accesses (reads and writes) to the queue
* are complete before we update the cons pointer.
*/
mb();
- writel_relaxed(q->cons, q->cons_reg);
+ writel_relaxed(q->llq.cons, q->cons_reg);
}
-static int queue_sync_prod(struct arm_smmu_queue *q)
+static void queue_inc_cons(struct arm_smmu_ll_queue *q)
+{
+ u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
+ q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
+}
+
+static int queue_sync_prod_in(struct arm_smmu_queue *q)
{
int ret = 0;
u32 prod = readl_relaxed(q->prod_reg);
- if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
+ if (Q_OVF(prod) != Q_OVF(q->llq.prod))
ret = -EOVERFLOW;
- q->prod = prod;
+ q->llq.prod = prod;
return ret;
}
-static void queue_inc_prod(struct arm_smmu_queue *q)
+static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
{
- u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
-
- q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
- writel(q->prod, q->prod_reg);
+ u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
+ return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
}
-/*
- * Wait for the SMMU to consume items. If sync is true, wait until the queue
- * is empty. Otherwise, wait until there is at least one free slot.
- */
-static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
+static void queue_poll_init(struct arm_smmu_device *smmu,
+ struct arm_smmu_queue_poll *qp)
{
- ktime_t timeout;
- unsigned int delay = 1, spin_cnt = 0;
-
- /* Wait longer if it's a CMD_SYNC */
- timeout = ktime_add_us(ktime_get(), sync ?
- ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
- ARM_SMMU_POLL_TIMEOUT_US);
+ qp->delay = 1;
+ qp->spin_cnt = 0;
+ qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
+}
- while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
- if (ktime_compare(ktime_get(), timeout) > 0)
- return -ETIMEDOUT;
+static int queue_poll(struct arm_smmu_queue_poll *qp)
+{
+ if (ktime_compare(ktime_get(), qp->timeout) > 0)
+ return -ETIMEDOUT;
- if (wfe) {
- wfe();
- } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
- cpu_relax();
- continue;
- } else {
- udelay(delay);
- delay *= 2;
- spin_cnt = 0;
- }
+ if (qp->wfe) {
+ wfe();
+ } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) {
+ cpu_relax();
+ } else {
+ udelay(qp->delay);
+ qp->delay *= 2;
+ qp->spin_cnt = 0;
}
return 0;
@@ -777,16 +813,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
*dst++ = cpu_to_le64(*src++);
}
-static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
-{
- if (queue_full(q))
- return -ENOSPC;
-
- queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
- queue_inc_prod(q);
- return 0;
-}
-
static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
{
int i;
@@ -797,11 +823,12 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
{
- if (queue_empty(q))
+ if (queue_empty(&q->llq))
return -EAGAIN;
- queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
- queue_inc_cons(q);
+ queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
+ queue_inc_cons(&q->llq);
+ queue_sync_cons_out(q);
return 0;
}
@@ -868,20 +895,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
break;
case CMDQ_OP_CMD_SYNC:
- if (ent->sync.msiaddr)
+ if (ent->sync.msiaddr) {
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
- else
+ cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
+ } else {
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+ }
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
- /*
- * Commands are written little-endian, but we want the SMMU to
- * receive MSIData, and thus write it back to memory, in CPU
- * byte order, so big-endian needs an extra byteswap here.
- */
- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
- cpu_to_le32(ent->sync.msidata));
- cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
break;
default:
return -ENOENT;
@@ -890,6 +911,27 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
return 0;
}
+static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
+ u32 prod)
+{
+ struct arm_smmu_queue *q = &smmu->cmdq.q;
+ struct arm_smmu_cmdq_ent ent = {
+ .opcode = CMDQ_OP_CMD_SYNC,
+ };
+
+ /*
+ * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
+ * payload, so the write will zero the entire command on that platform.
+ */
+ if (smmu->features & ARM_SMMU_FEAT_MSI &&
+ smmu->features & ARM_SMMU_FEAT_COHERENCY) {
+ ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
+ q->ent_dwords * 8;
+ }
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+}
+
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
{
static const char *cerror_str[] = {
@@ -948,109 +990,456 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
-static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+/*
+ * Command queue locking.
+ * This is a form of bastardised rwlock with the following major changes:
+ *
+ * - The only LOCK routines are exclusive_trylock() and shared_lock().
+ * Neither have barrier semantics, and instead provide only a control
+ * dependency.
+ *
+ * - The UNLOCK routines are supplemented with shared_tryunlock(), which
+ * fails if the caller appears to be the last lock holder (yes, this is
+ * racy). All successful UNLOCK routines have RELEASE semantics.
+ */
+static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
{
- struct arm_smmu_queue *q = &smmu->cmdq.q;
- bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ int val;
+
+ /*
+ * We can try to avoid the cmpxchg() loop by simply incrementing the
+ * lock counter. When held in exclusive state, the lock counter is set
+ * to INT_MIN so these increments won't hurt as the value will remain
+ * negative.
+ */
+ if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
+ return;
- smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
+ do {
+ val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
+ } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
+}
+
+static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
+{
+ (void)atomic_dec_return_release(&cmdq->lock);
+}
+
+static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
+{
+ if (atomic_read(&cmdq->lock) == 1)
+ return false;
+
+ arm_smmu_cmdq_shared_unlock(cmdq);
+ return true;
+}
- while (queue_insert_raw(q, cmd) == -ENOSPC) {
- if (queue_poll_cons(q, false, wfe))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \
+({ \
+ bool __ret; \
+ local_irq_save(flags); \
+ __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
+ if (!__ret) \
+ local_irq_restore(flags); \
+ __ret; \
+})
+
+#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
+({ \
+ atomic_set_release(&cmdq->lock, 0); \
+ local_irq_restore(flags); \
+})
+
+
+/*
+ * Command queue insertion.
+ * This is made fiddly by our attempts to achieve some sort of scalability
+ * since there is one queue shared amongst all of the CPUs in the system. If
+ * you like mixed-size concurrency, dependency ordering and relaxed atomics,
+ * then you'll *love* this monstrosity.
+ *
+ * The basic idea is to split the queue up into ranges of commands that are
+ * owned by a given CPU; the owner may not have written all of the commands
+ * itself, but is responsible for advancing the hardware prod pointer when
+ * the time comes. The algorithm is roughly:
+ *
+ * 1. Allocate some space in the queue. At this point we also discover
+ * whether the head of the queue is currently owned by another CPU,
+ * or whether we are the owner.
+ *
+ * 2. Write our commands into our allocated slots in the queue.
+ *
+ * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map.
+ *
+ * 4. If we are an owner:
+ * a. Wait for the previous owner to finish.
+ * b. Mark the queue head as unowned, which tells us the range
+ * that we are responsible for publishing.
+ * c. Wait for all commands in our owned range to become valid.
+ * d. Advance the hardware prod pointer.
+ * e. Tell the next owner we've finished.
+ *
+ * 5. If we are inserting a CMD_SYNC (we may or may not have been an
+ * owner), then we need to stick around until it has completed:
+ * a. If we have MSIs, the SMMU can write back into the CMD_SYNC
+ * to clear the first 4 bytes.
+ * b. Otherwise, we spin waiting for the hardware cons pointer to
+ * advance past our command.
+ *
+ * The devil is in the details, particularly the use of locking for handling
+ * SYNC completion and freeing up space in the queue before we think that it is
+ * full.
+ */
+static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod, bool set)
+{
+ u32 swidx, sbidx, ewidx, ebidx;
+ struct arm_smmu_ll_queue llq = {
+ .max_n_shift = cmdq->q.llq.max_n_shift,
+ .prod = sprod,
+ };
+
+ ewidx = BIT_WORD(Q_IDX(&llq, eprod));
+ ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
+
+ while (llq.prod != eprod) {
+ unsigned long mask;
+ atomic_long_t *ptr;
+ u32 limit = BITS_PER_LONG;
+
+ swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
+ sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
+
+ ptr = &cmdq->valid_map[swidx];
+
+ if ((swidx == ewidx) && (sbidx < ebidx))
+ limit = ebidx;
+
+ mask = GENMASK(limit - 1, sbidx);
+
+ /*
+ * The valid bit is the inverse of the wrap bit. This means
+ * that a zero-initialised queue is invalid and, after marking
+ * all entries as valid, they become invalid again when we
+ * wrap.
+ */
+ if (set) {
+ atomic_long_xor(mask, ptr);
+ } else { /* Poll */
+ unsigned long valid;
+
+ valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
+ atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid);
+ }
+
+ llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
}
}
-static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq_ent *ent)
+/* Mark all entries in the range [sprod, eprod) as valid */
+static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod)
+{
+ __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
+}
+
+/* Wait for all entries in the range [sprod, eprod) to become valid */
+static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
+ u32 sprod, u32 eprod)
+{
+ __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
+}
+
+/* Wait for the command queue to become non-full */
+static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
{
- u64 cmd[CMDQ_ENT_DWORDS];
unsigned long flags;
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ int ret = 0;
- if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
- dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
- ent->opcode);
- return;
+ /*
+ * Try to update our copy of cons by grabbing exclusive cmdq access. If
+ * that fails, spin until somebody else updates it for us.
+ */
+ if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
+ WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
+ arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
+ llq->val = READ_ONCE(cmdq->q.llq.val);
+ return 0;
}
- spin_lock_irqsave(&smmu->cmdq.lock, flags);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
- spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+ queue_poll_init(smmu, &qp);
+ do {
+ llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+ if (!queue_full(llq))
+ break;
+
+ ret = queue_poll(&qp);
+ } while (!ret);
+
+ return ret;
}
/*
- * The difference between val and sync_idx is bounded by the maximum size of
- * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
+ * Wait until the SMMU signals a CMD_SYNC completion MSI.
+ * Must be called with the cmdq lock held in some capacity.
*/
-static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
+static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
{
- ktime_t timeout;
- u32 val;
+ int ret = 0;
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
- timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
- val = smp_cond_load_acquire(&smmu->sync_count,
- (int)(VAL - sync_idx) >= 0 ||
- !ktime_before(ktime_get(), timeout));
+ queue_poll_init(smmu, &qp);
- return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
+ /*
+ * The MSI won't generate an event, since it's being written back
+ * into the command queue.
+ */
+ qp.wfe = false;
+ smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp)));
+ llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
+ return ret;
}
-static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
+/*
+ * Wait until the SMMU cons index passes llq->prod.
+ * Must be called with the cmdq lock held in some capacity.
+ */
+static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
{
- u64 cmd[CMDQ_ENT_DWORDS];
- unsigned long flags;
- struct arm_smmu_cmdq_ent ent = {
- .opcode = CMDQ_OP_CMD_SYNC,
- .sync = {
- .msiaddr = virt_to_phys(&smmu->sync_count),
- },
- };
+ struct arm_smmu_queue_poll qp;
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ u32 prod = llq->prod;
+ int ret = 0;
- spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ queue_poll_init(smmu, &qp);
+ llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
+ do {
+ if (queue_consumed(llq, prod))
+ break;
- /* Piggy-back on the previous command if it's a SYNC */
- if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
- ent.sync.msidata = smmu->sync_nr;
- } else {
- ent.sync.msidata = ++smmu->sync_nr;
- arm_smmu_cmdq_build_cmd(cmd, &ent);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
- }
+ ret = queue_poll(&qp);
- spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+ /*
+ * This needs to be a readl() so that our subsequent call
+ * to arm_smmu_cmdq_shared_tryunlock() can fail accurately.
+ *
+ * Specifically, we need to ensure that we observe all
+ * shared_lock()s by other CMD_SYNCs that share our owner,
+ * so that a failing call to tryunlock() means that we're
+ * the last one out and therefore we can safely advance
+ * cmdq->q.llq.cons. Roughly speaking:
+ *
+ * CPU 0 CPU1 CPU2 (us)
+ *
+ * if (sync)
+ * shared_lock();
+ *
+ * dma_wmb();
+ * set_valid_map();
+ *
+ * if (owner) {
+ * poll_valid_map();
+ * <control dependency>
+ * writel(prod_reg);
+ *
+ * readl(cons_reg);
+ * tryunlock();
+ *
+ * Requires us to see CPU 0's shared_lock() acquisition.
+ */
+ llq->cons = readl(cmdq->q.cons_reg);
+ } while (!ret);
- return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
+ return ret;
}
-static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+ struct arm_smmu_ll_queue *llq)
{
- u64 cmd[CMDQ_ENT_DWORDS];
+ if (smmu->features & ARM_SMMU_FEAT_MSI &&
+ smmu->features & ARM_SMMU_FEAT_COHERENCY)
+ return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+
+ return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+}
+
+static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
+ u32 prod, int n)
+{
+ int i;
+ struct arm_smmu_ll_queue llq = {
+ .max_n_shift = cmdq->q.llq.max_n_shift,
+ .prod = prod,
+ };
+
+ for (i = 0; i < n; ++i) {
+ u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
+
+ prod = queue_inc_prod_n(&llq, i);
+ queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
+ }
+}
+
+/*
+ * This is the actual insertion function, and provides the following
+ * ordering guarantees to callers:
+ *
+ * - There is a dma_wmb() before publishing any commands to the queue.
+ * This can be relied upon to order prior writes to data structures
+ * in memory (such as a CD or an STE) before the command.
+ *
+ * - On completion of a CMD_SYNC, there is a control dependency.
+ * This can be relied upon to order subsequent writes to memory (e.g.
+ * freeing an IOVA) after completion of the CMD_SYNC.
+ *
+ * - Command insertion is totally ordered, so if two CPUs each race to
+ * insert their own list of commands then all of the commands from one
+ * CPU will appear before any of the commands from the other CPU.
+ */
+static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+ u64 *cmds, int n, bool sync)
+{
+ u64 cmd_sync[CMDQ_ENT_DWORDS];
+ u32 prod;
unsigned long flags;
- bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
- struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
- int ret;
+ bool owner;
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ struct arm_smmu_ll_queue llq = {
+ .max_n_shift = cmdq->q.llq.max_n_shift,
+ }, head = llq;
+ int ret = 0;
- arm_smmu_cmdq_build_cmd(cmd, &ent);
+ /* 1. Allocate some space in the queue */
+ local_irq_save(flags);
+ llq.val = READ_ONCE(cmdq->q.llq.val);
+ do {
+ u64 old;
+
+ while (!queue_has_space(&llq, n + sync)) {
+ local_irq_restore(flags);
+ if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+ dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ local_irq_save(flags);
+ }
+
+ head.cons = llq.cons;
+ head.prod = queue_inc_prod_n(&llq, n + sync) |
+ CMDQ_PROD_OWNED_FLAG;
+
+ old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
+ if (old == llq.val)
+ break;
+
+ llq.val = old;
+ } while (1);
+ owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
+ head.prod &= ~CMDQ_PROD_OWNED_FLAG;
+ llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+ /*
+ * 2. Write our commands into the queue
+ * Dependency ordering from the cmpxchg() loop above.
+ */
+ arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
+ if (sync) {
+ prod = queue_inc_prod_n(&llq, n);
+ arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
+ queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
+
+ /*
+ * In order to determine completion of our CMD_SYNC, we must
+ * ensure that the queue can't wrap twice without us noticing.
+ * We achieve that by taking the cmdq lock as shared before
+ * marking our slot as valid.
+ */
+ arm_smmu_cmdq_shared_lock(cmdq);
+ }
+
+ /* 3. Mark our slots as valid, ensuring commands are visible first */
+ dma_wmb();
+ arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
- spin_lock_irqsave(&smmu->cmdq.lock, flags);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
- ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
- spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+ /* 4. If we are the owner, take control of the SMMU hardware */
+ if (owner) {
+ /* a. Wait for previous owner to finish */
+ atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
+ /* b. Stop gathering work by clearing the owned flag */
+ prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
+ &cmdq->q.llq.atomic.prod);
+ prod &= ~CMDQ_PROD_OWNED_FLAG;
+
+ /*
+ * c. Wait for any gathered work to be written to the queue.
+ * Note that we read our own entries so that we have the control
+ * dependency required by (d).
+ */
+ arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
+
+ /*
+ * d. Advance the hardware prod pointer
+ * Control dependency ordering from the entries becoming valid.
+ */
+ writel_relaxed(prod, cmdq->q.prod_reg);
+
+ /*
+ * e. Tell the next owner we're done
+ * Make sure we've updated the hardware first, so that we don't
+ * race to update prod and potentially move it backwards.
+ */
+ atomic_set_release(&cmdq->owner_prod, prod);
+ }
+
+ /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
+ if (sync) {
+ llq.prod = queue_inc_prod_n(&llq, n);
+ ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+ if (ret) {
+ dev_err_ratelimited(smmu->dev,
+ "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
+ llq.prod,
+ readl_relaxed(cmdq->q.prod_reg),
+ readl_relaxed(cmdq->q.cons_reg));
+ }
+
+ /*
+ * Try to unlock the cmq lock. This will fail if we're the last
+ * reader, in which case we can safely update cmdq->q.llq.cons
+ */
+ if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
+ WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
+ arm_smmu_cmdq_shared_unlock(cmdq);
+ }
+ }
+
+ local_irq_restore(flags);
return ret;
}
-static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_ent *ent)
{
- int ret;
- bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
- (smmu->features & ARM_SMMU_FEAT_COHERENCY);
+ u64 cmd[CMDQ_ENT_DWORDS];
- ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
- : __arm_smmu_cmdq_issue_sync(smmu);
- if (ret)
- dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
- return ret;
+ if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
+ dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
+ ent->opcode);
+ return -EINVAL;
+ }
+
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
+}
+
+static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+ return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
}
/* Context descriptor manipulation functions */
@@ -1186,8 +1575,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
- if (disable_bypass)
- break;
+ BUG_ON(!disable_bypass);
+ break;
default:
BUG(); /* STE corruption */
}
@@ -1305,6 +1694,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
int i;
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->evtq.q;
+ struct arm_smmu_ll_queue *llq = &q->llq;
u64 evt[EVTQ_ENT_DWORDS];
do {
@@ -1322,12 +1712,13 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
* Not much we can do on overflow, so scream and pretend we're
* trying harder.
*/
- if (queue_sync_prod(q) == -EOVERFLOW)
+ if (queue_sync_prod_in(q) == -EOVERFLOW)
dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
- } while (!queue_empty(q));
+ } while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+ Q_IDX(llq, llq->cons);
return IRQ_HANDLED;
}
@@ -1373,19 +1764,21 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
{
struct arm_smmu_device *smmu = dev;
struct arm_smmu_queue *q = &smmu->priq.q;
+ struct arm_smmu_ll_queue *llq = &q->llq;
u64 evt[PRIQ_ENT_DWORDS];
do {
while (!queue_remove_raw(q, evt))
arm_smmu_handle_ppr(smmu, evt);
- if (queue_sync_prod(q) == -EOVERFLOW)
+ if (queue_sync_prod_in(q) == -EOVERFLOW)
dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
- } while (!queue_empty(q));
+ } while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
- writel(q->cons, q->cons_reg);
+ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+ Q_IDX(llq, llq->cons);
+ queue_sync_cons_out(q);
return IRQ_HANDLED;
}
@@ -1534,6 +1927,23 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0;
+ /*
+ * Ensure that we've completed prior invalidation of the main TLBs
+ * before we read 'nr_ats_masters' in case of a concurrent call to
+ * arm_smmu_enable_ats():
+ *
+ * // unmap() // arm_smmu_enable_ats()
+ * TLBI+SYNC atomic_inc(&nr_ats_masters);
+ * smp_mb(); [...]
+ * atomic_read(&nr_ats_masters); pci_enable_ats() // writel()
+ *
+ * Ensures that we always see the incremented 'nr_ats_masters' count if
+ * ATS was enabled at the PCI device before completion of the TLBI.
+ */
+ smp_mb();
+ if (!atomic_read(&smmu_domain->nr_ats_masters))
+ return 0;
+
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -1545,13 +1955,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
}
/* IO_PGTABLE API */
-static void arm_smmu_tlb_sync(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -1570,25 +1973,32 @@ static void arm_smmu_tlb_inv_context(void *cookie)
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
* PTEs previously cleared by unmaps on the current CPU not yet visible
- * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
- * to guarantee those are observed before the TLBI. Do be careful, 007.
+ * to the SMMU. We are relying on the dma_wmb() implicit during cmd
+ * insertion to guarantee those are observed before the TLBI. Do be
+ * careful, 007.
*/
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = cookie;
+ u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long start = iova, end = iova + size;
+ int i = 0;
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
.leaf = leaf,
- .addr = iova,
},
};
+ if (!size)
+ return;
+
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
@@ -1597,16 +2007,54 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
- do {
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.tlbi.addr += granule;
- } while (size -= granule);
+ while (iova < end) {
+ if (i == CMDQ_BATCH_ENTRIES) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false);
+ i = 0;
+ }
+
+ cmd.tlbi.addr = iova;
+ arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd);
+ iova += granule;
+ i++;
+ }
+
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true);
+
+ /*
+ * Unfortunately, this can't be leaf-only since we may have
+ * zapped an entire table.
+ */
+ arm_smmu_atc_inv_domain(smmu_domain, 0, start, size);
+}
+
+static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct iommu_domain *domain = &smmu_domain->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
-static const struct iommu_gather_ops arm_smmu_gather_ops = {
+static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
+}
+
+static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_inv_page_nosync,
};
/* IOMMU API */
@@ -1796,7 +2244,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
- .tlb = &arm_smmu_gather_ops,
+ .tlb = &arm_smmu_flush_ops,
.iommu_dev = smmu->dev,
};
@@ -1863,44 +2311,65 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
}
}
-static int arm_smmu_enable_ats(struct arm_smmu_master *master)
+#ifdef CONFIG_PCI_ATS
+static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
{
- int ret;
- size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
- return -ENXIO;
+ return false;
pdev = to_pci_dev(master->dev);
- if (pdev->untrusted)
- return -EPERM;
+ return !pdev->untrusted && pdev->ats_cap;
+}
+#else
+static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+#endif
+
+static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+{
+ size_t stu;
+ struct pci_dev *pdev;
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ /* Don't enable ATS at the endpoint if it's not enabled in the STE */
+ if (!master->ats_enabled)
+ return;
/* Smallest Translation Unit: log2 of the smallest supported granule */
stu = __ffs(smmu->pgsize_bitmap);
+ pdev = to_pci_dev(master->dev);
- ret = pci_enable_ats(pdev, stu);
- if (ret)
- return ret;
-
- master->ats_enabled = true;
- return 0;
+ atomic_inc(&smmu_domain->nr_ats_masters);
+ arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+ if (pci_enable_ats(pdev, stu))
+ dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
struct arm_smmu_cmdq_ent cmd;
+ struct arm_smmu_domain *smmu_domain = master->domain;
- if (!master->ats_enabled || !dev_is_pci(master->dev))
+ if (!master->ats_enabled)
return;
+ pci_disable_ats(to_pci_dev(master->dev));
+ /*
+ * Ensure ATS is disabled at the endpoint before we issue the
+ * ATC invalidation via the SMMU.
+ */
+ wmb();
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
arm_smmu_atc_inv_master(master, &cmd);
- pci_disable_ats(to_pci_dev(master->dev));
- master->ats_enabled = false;
+ atomic_dec(&smmu_domain->nr_ats_masters);
}
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
@@ -1911,14 +2380,15 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
if (!smmu_domain)
return;
+ arm_smmu_disable_ats(master);
+
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
master->domain = NULL;
+ master->ats_enabled = false;
arm_smmu_install_ste_for_dev(master);
-
- arm_smmu_disable_ats(master);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1958,17 +2428,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
master->domain = smmu_domain;
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_add(&master->domain_head, &smmu_domain->devices);
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
-
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
- arm_smmu_enable_ats(master);
+ master->ats_enabled = arm_smmu_ats_supported(master);
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
arm_smmu_write_ctx_desc(smmu, &smmu_domain->s1_cfg);
arm_smmu_install_ste_for_dev(master);
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_add(&master->domain_head, &smmu_domain->devices);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_enable_ats(master);
+
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1985,21 +2458,16 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return ops->map(ops, iova, paddr, size, prot);
}
-static size_t
-arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
- int ret;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
if (!ops)
return 0;
- ret = ops->unmap(ops, iova, size);
- if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size))
- return 0;
-
- return ret;
+ return ops->unmap(ops, iova, size, gather);
}
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
@@ -2010,12 +2478,13 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
arm_smmu_tlb_inv_context(smmu_domain);
}
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
- struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (smmu)
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
+ gather->pgsize, true, smmu_domain);
}
static phys_addr_t
@@ -2034,16 +2503,11 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
static struct platform_driver arm_smmu_driver;
-static int arm_smmu_match_node(struct device *dev, const void *data)
-{
- return dev->fwnode == data;
-}
-
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- fwnode, arm_smmu_match_node);
+ struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
+ fwnode);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -2286,13 +2750,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
size_t qsz;
do {
- qsz = ((1 << q->max_n_shift) * dwords) << 3;
+ qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
GFP_KERNEL);
if (q->base || qsz < PAGE_SIZE)
break;
- q->max_n_shift--;
+ q->llq.max_n_shift--;
} while (1);
if (!q->base) {
@@ -2304,7 +2768,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
if (!WARN_ON(q->base_dma & (qsz - 1))) {
dev_info(smmu->dev, "allocated %u entries for %s\n",
- 1 << q->max_n_shift, name);
+ 1 << q->llq.max_n_shift, name);
}
q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
@@ -2313,24 +2777,55 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
q->q_base = Q_BASE_RWA;
q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
- q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift);
+ q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
- q->prod = q->cons = 0;
+ q->llq.prod = q->llq.cons = 0;
return 0;
}
+static void arm_smmu_cmdq_free_bitmap(void *data)
+{
+ unsigned long *bitmap = data;
+ bitmap_free(bitmap);
+}
+
+static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
+{
+ int ret = 0;
+ struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
+ unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
+ atomic_long_t *bitmap;
+
+ atomic_set(&cmdq->owner_prod, 0);
+ atomic_set(&cmdq->lock, 0);
+
+ bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
+ if (!bitmap) {
+ dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
+ ret = -ENOMEM;
+ } else {
+ cmdq->valid_map = bitmap;
+ devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
+ }
+
+ return ret;
+}
+
static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
{
int ret;
/* cmdq */
- spin_lock_init(&smmu->cmdq.lock);
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
"cmdq");
if (ret)
return ret;
+ ret = arm_smmu_cmdq_init(smmu);
+ if (ret)
+ return ret;
+
/* evtq */
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
@@ -2708,8 +3203,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Command queue */
writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
- writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
- writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
+ writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
+ writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
enables = CR0_CMDQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
@@ -2736,9 +3231,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
- writel_relaxed(smmu->evtq.q.prod,
+ writel_relaxed(smmu->evtq.q.llq.prod,
arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
- writel_relaxed(smmu->evtq.q.cons,
+ writel_relaxed(smmu->evtq.q.llq.cons,
arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
enables |= CR0_EVTQEN;
@@ -2753,9 +3248,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
if (smmu->features & ARM_SMMU_FEAT_PRI) {
writeq_relaxed(smmu->priq.q.q_base,
smmu->base + ARM_SMMU_PRIQ_BASE);
- writel_relaxed(smmu->priq.q.prod,
+ writel_relaxed(smmu->priq.q.llq.prod,
arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
- writel_relaxed(smmu->priq.q.cons,
+ writel_relaxed(smmu->priq.q.llq.cons,
arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
enables |= CR0_PRIQEN;
@@ -2909,18 +3404,24 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
}
/* Queue sizes, capped to ensure natural alignment */
- smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
- FIELD_GET(IDR1_CMDQS, reg));
- if (!smmu->cmdq.q.max_n_shift) {
- /* Odd alignment restrictions on the base, so ignore for now */
- dev_err(smmu->dev, "unit-length command queue not supported\n");
+ smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_CMDQS, reg));
+ if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
+ /*
+ * We don't support splitting up batches, so one batch of
+ * commands plus an extra sync needs to fit inside the command
+ * queue. There's also no way we can handle the weird alignment
+ * restrictions on the base pointer for a unit-length queue.
+ */
+ dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
+ CMDQ_BATCH_ENTRIES);
return -ENXIO;
}
- smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
- FIELD_GET(IDR1_EVTQS, reg));
- smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
- FIELD_GET(IDR1_PRIQS, reg));
+ smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_EVTQS, reg));
+ smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_PRIQS, reg));
/* SID/SSID sizes */
smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 64977c131ee6..c3ef0cc8f764 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -19,16 +19,13 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
-#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/io-64-nonatomic-hi-lo.h>
-#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
@@ -40,12 +37,11 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
-#include "arm-smmu-regs.h"
+#include "arm-smmu.h"
/*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -56,46 +52,9 @@
*/
#define QCOM_DUMMY_VAL -1
-#define ARM_MMU500_ACTLR_CPRE (1 << 1)
-
-#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
-#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
-#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
-
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
#define TLB_SPIN_COUNT 10
-/* Maximum number of context banks per SMMU */
-#define ARM_SMMU_MAX_CBS 128
-
-/* SMMU global address space */
-#define ARM_SMMU_GR0(smmu) ((smmu)->base)
-#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
-
-/*
- * SMMU global address space with conditional offset to access secure
- * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
- * nsGFSYNR0: 0x450)
- */
-#define ARM_SMMU_GR0_NS(smmu) \
- ((smmu)->base + \
- ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
- ? 0x400 : 0))
-
-/*
- * Some 64-bit registers only make sense to write atomically, but in such
- * cases all the data relevant to AArch32 formats lies within the lower word,
- * therefore this actually makes more sense than it might first appear.
- */
-#ifdef CONFIG_64BIT
-#define smmu_write_atomic_lq writeq_relaxed
-#else
-#define smmu_write_atomic_lq writel_relaxed
-#endif
-
-/* Translation context bank */
-#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
-
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -113,19 +72,6 @@ module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-enum arm_smmu_arch_version {
- ARM_SMMU_V1,
- ARM_SMMU_V1_64K,
- ARM_SMMU_V2,
-};
-
-enum arm_smmu_implementation {
- GENERIC_SMMU,
- ARM_MMU500,
- CAVIUM_SMMUV2,
- QCOM_SMMUV2,
-};
-
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
@@ -163,117 +109,8 @@ struct arm_smmu_master_cfg {
#define for_each_cfg_sme(fw, i, idx) \
for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
-struct arm_smmu_device {
- struct device *dev;
-
- void __iomem *base;
- void __iomem *cb_base;
- unsigned long pgshift;
-
-#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
-#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
-#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
-#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
-#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
-#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
-#define ARM_SMMU_FEAT_VMID16 (1 << 6)
-#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
-#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
-#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
-#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
-#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
-#define ARM_SMMU_FEAT_EXIDS (1 << 12)
- u32 features;
-
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
- u32 options;
- enum arm_smmu_arch_version version;
- enum arm_smmu_implementation model;
-
- u32 num_context_banks;
- u32 num_s2_context_banks;
- DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
- struct arm_smmu_cb *cbs;
- atomic_t irptndx;
-
- u32 num_mapping_groups;
- u16 streamid_mask;
- u16 smr_mask_mask;
- struct arm_smmu_smr *smrs;
- struct arm_smmu_s2cr *s2crs;
- struct mutex stream_map_mutex;
-
- unsigned long va_size;
- unsigned long ipa_size;
- unsigned long pa_size;
- unsigned long pgsize_bitmap;
-
- u32 num_global_irqs;
- u32 num_context_irqs;
- unsigned int *irqs;
- struct clk_bulk_data *clks;
- int num_clks;
-
- u32 cavium_id_base; /* Specific to Cavium */
-
- spinlock_t global_sync_lock;
-
- /* IOMMU core code handle */
- struct iommu_device iommu;
-};
-
-enum arm_smmu_context_fmt {
- ARM_SMMU_CTX_FMT_NONE,
- ARM_SMMU_CTX_FMT_AARCH64,
- ARM_SMMU_CTX_FMT_AARCH32_L,
- ARM_SMMU_CTX_FMT_AARCH32_S,
-};
-
-struct arm_smmu_cfg {
- u8 cbndx;
- u8 irptndx;
- union {
- u16 asid;
- u16 vmid;
- };
- u32 cbar;
- enum arm_smmu_context_fmt fmt;
-};
-#define INVALID_IRPTNDX 0xff
-
-enum arm_smmu_domain_stage {
- ARM_SMMU_DOMAIN_S1 = 0,
- ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_NESTED,
- ARM_SMMU_DOMAIN_BYPASS,
-};
-
-struct arm_smmu_domain {
- struct arm_smmu_device *smmu;
- struct io_pgtable_ops *pgtbl_ops;
- const struct iommu_gather_ops *tlb_ops;
- struct arm_smmu_cfg cfg;
- enum arm_smmu_domain_stage stage;
- bool non_strict;
- struct mutex init_mutex; /* Protects smmu pointer */
- spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
- struct iommu_domain domain;
-};
-
-struct arm_smmu_option_prop {
- u32 opt;
- const char *prop;
-};
-
-static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
-
static bool using_legacy_binding, using_generic_binding;
-static struct arm_smmu_option_prop arm_smmu_options[] = {
- { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
- { 0, NULL},
-};
-
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
@@ -293,20 +130,6 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain);
}
-static void parse_driver_options(struct arm_smmu_device *smmu)
-{
- int i = 0;
-
- do {
- if (of_property_read_bool(smmu->dev->of_node,
- arm_smmu_options[i].prop)) {
- smmu->options |= arm_smmu_options[i].opt;
- dev_notice(smmu->dev, "option %s\n",
- arm_smmu_options[i].prop);
- }
- } while (arm_smmu_options[++i].opt);
-}
-
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -415,15 +238,17 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
- void __iomem *sync, void __iomem *status)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+ int sync, int status)
{
unsigned int spin_cnt, delay;
+ u32 reg;
- writel_relaxed(QCOM_DUMMY_VAL, sync);
+ arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
- if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
+ reg = arm_smmu_readl(smmu, page, status);
+ if (!(reg & sTLBGSTATUS_GSACTIVE))
return;
cpu_relax();
}
@@ -435,12 +260,11 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{
- void __iomem *base = ARM_SMMU_GR0(smmu);
unsigned long flags;
spin_lock_irqsave(&smmu->global_sync_lock, flags);
- __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
- base + ARM_SMMU_GR0_sTLBGSTATUS);
+ __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
+ ARM_SMMU_GR0_sTLBGSTATUS);
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
@@ -448,12 +272,11 @@ static void arm_smmu_tlb_sync_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
unsigned long flags;
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
- __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
- base + ARM_SMMU_CB_TLBSTATUS);
+ __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
+ ARM_SMMU_CB_TLBSYNC, ARM_SMMU_CB_TLBSTATUS);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
@@ -467,14 +290,13 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
-
/*
- * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
- * cleared by the current CPU are visible to the SMMU before the TLBI.
+ * The TLBI write may be relaxed, so ensure that PTEs cleared by the
+ * current CPU are visible beforehand.
*/
- writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ wmb();
+ arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
+ ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
arm_smmu_tlb_sync_context(cookie);
}
@@ -482,87 +304,143 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_GR0(smmu);
- /* NOTE: see above */
- writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ /* See above */
+ wmb();
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
arm_smmu_tlb_sync_global(smmu);
}
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+ int reg, idx = cfg->cbndx;
- if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- if (stage1) {
- reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
- if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
- iova &= ~12UL;
- iova |= cfg->asid;
- do {
- writel_relaxed(iova, reg);
- iova += granule;
- } while (size -= granule);
- } else {
- iova >>= 12;
- iova |= (u64)cfg->asid << 48;
- do {
- writeq_relaxed(iova, reg);
- iova += granule >> 12;
- } while (size -= granule);
- }
+ reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
+ iova = (iova >> 12) << 12;
+ iova |= cfg->asid;
+ do {
+ arm_smmu_cb_write(smmu, idx, reg, iova);
+ iova += granule;
+ } while (size -= granule);
} else {
- reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
- ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
+ iova |= (u64)cfg->asid << 48;
do {
- smmu_write_atomic_lq(iova, reg);
+ arm_smmu_cb_writeq(smmu, idx, reg, iova);
iova += granule >> 12;
} while (size -= granule);
}
}
+static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ int reg, idx = smmu_domain->cfg.cbndx;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
+ reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
+ iova >>= 12;
+ do {
+ if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ arm_smmu_cb_writeq(smmu, idx, reg, iova);
+ else
+ arm_smmu_cb_write(smmu, idx, reg, iova);
+ iova += granule >> 12;
+ } while (size -= granule);
+}
+
/*
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
* almost negligible, but the benefit of getting the first one in as far ahead
* of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
+ * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
*/
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+}
+
+static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+ ops->tlb_inv_range(iova, size, granule, false, cookie);
+ ops->tlb_sync(cookie);
+}
+
+static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+ ops->tlb_inv_range(iova, size, granule, true, cookie);
+ ops->tlb_sync(cookie);
}
-static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+
+ ops->tlb_inv_range(iova, granule, granule, true, cookie);
+}
+
+static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
+ .tlb_sync = arm_smmu_tlb_sync_context,
};
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
+ .tlb_sync = arm_smmu_tlb_sync_context,
};
-static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
+ .tlb_add_page = arm_smmu_tlb_add_page,
+ },
+ .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_vmid,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -571,26 +449,22 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
- void __iomem *cb_base;
-
- cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
- fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ int idx = smmu_domain->cfg.cbndx;
+ fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
- fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
- cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+ fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
+ iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
+ cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
- fsr, iova, fsynr, cbfrsynra, cfg->cbndx);
+ fsr, iova, fsynr, cbfrsynra, idx);
- writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
return IRQ_HANDLED;
}
@@ -598,12 +472,11 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
- void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
- gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
- gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
- gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
- gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+ gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+ gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
+ gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
+ gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
if (!gfsr)
return IRQ_NONE;
@@ -614,7 +487,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
"\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
gfsr, gfsynr0, gfsynr1, gfsynr2);
- writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
return IRQ_HANDLED;
}
@@ -627,16 +500,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->cfg = cfg;
- /* TTBCR */
+ /* TCR */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
} else {
cb->tcr[0] = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
cb->tcr[1] = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- cb->tcr[1] |= TTBCR2_SEP_UPSTREAM;
+ cb->tcr[1] |= FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM);
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
- cb->tcr[1] |= TTBCR2_AS;
+ cb->tcr[1] |= TCR2_AS;
}
} else {
cb->tcr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
@@ -649,9 +522,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->ttbr[1] = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- cb->ttbr[0] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
cb->ttbr[1] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- cb->ttbr[1] |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
}
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -675,74 +548,71 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
bool stage1;
struct arm_smmu_cb *cb = &smmu->cbs[idx];
struct arm_smmu_cfg *cfg = cb->cfg;
- void __iomem *cb_base, *gr1_base;
-
- cb_base = ARM_SMMU_CB(smmu, idx);
/* Unassigned context banks only need disabling */
if (!cfg) {
- writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
return;
}
- gr1_base = ARM_SMMU_GR1(smmu);
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
/* CBA2R */
if (smmu->version > ARM_SMMU_V1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
- reg = CBA2R_RW64_64BIT;
+ reg = CBA2R_VA64;
else
- reg = CBA2R_RW64_32BIT;
+ reg = 0;
/* 16-bit VMIDs live in CBA2R */
if (smmu->features & ARM_SMMU_FEAT_VMID16)
- reg |= cfg->vmid << CBA2R_VMID_SHIFT;
+ reg |= FIELD_PREP(CBA2R_VMID16, cfg->vmid);
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(idx));
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
}
/* CBAR */
- reg = cfg->cbar;
+ reg = FIELD_PREP(CBAR_TYPE, cfg->cbar);
if (smmu->version < ARM_SMMU_V2)
- reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+ reg |= FIELD_PREP(CBAR_IRPTNDX, cfg->irptndx);
/*
* Use the weakest shareability/memory types, so they are
* overridden by the ttbcr/pte.
*/
if (stage1) {
- reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
- (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ reg |= FIELD_PREP(CBAR_S1_BPSHCFG, CBAR_S1_BPSHCFG_NSH) |
+ FIELD_PREP(CBAR_S1_MEMATTR, CBAR_S1_MEMATTR_WB);
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
/* 8-bit VMIDs live in CBAR */
- reg |= cfg->vmid << CBAR_VMID_SHIFT;
+ reg |= FIELD_PREP(CBAR_VMID, cfg->vmid);
}
- writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(idx));
+ arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
/*
- * TTBCR
+ * TCR
* We must write this before the TTBRs, since it determines the
* access behaviour of some fields (in particular, ASID[15:8]).
*/
if (stage1 && smmu->version > ARM_SMMU_V1)
- writel_relaxed(cb->tcr[1], cb_base + ARM_SMMU_CB_TTBCR2);
- writel_relaxed(cb->tcr[0], cb_base + ARM_SMMU_CB_TTBCR);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
/* TTBRs */
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
- writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
- writel_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
- writel_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
} else {
- writeq_relaxed(cb->ttbr[0], cb_base + ARM_SMMU_CB_TTBR0);
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
if (stage1)
- writeq_relaxed(cb->ttbr[1], cb_base + ARM_SMMU_CB_TTBR1);
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
+ cb->ttbr[1]);
}
/* MAIRs (stage-1 only) */
if (stage1) {
- writel_relaxed(cb->mair[0], cb_base + ARM_SMMU_CB_S1_MAIR0);
- writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
}
/* SCTLR */
@@ -752,7 +622,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= SCTLR_E;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
@@ -842,7 +712,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
- smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
+ smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -862,9 +732,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
- smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
else
- smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
+ smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
@@ -884,23 +754,29 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
}
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
- cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
+ cfg->vmid = cfg->cbndx + 1;
else
- cfg->asid = cfg->cbndx + smmu->cavium_id_base;
+ cfg->asid = cfg->cbndx;
+
+ smmu_domain->smmu = smmu;
+ if (smmu->impl && smmu->impl->init_context) {
+ ret = smmu->impl->init_context(smmu_domain);
+ if (ret)
+ goto out_unlock;
+ }
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = smmu_domain->tlb_ops,
+ .tlb = &smmu_domain->flush_ops->tlb,
.iommu_dev = smmu->dev,
};
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
- smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
ret = -ENOMEM;
@@ -1019,24 +895,24 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_smr *smr = smmu->smrs + idx;
- u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
+ u32 reg = FIELD_PREP(SMR_ID, smr->id) | FIELD_PREP(SMR_MASK, smr->mask);
if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
reg |= SMR_VALID;
- writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
}
static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{
struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
- u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
- (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
- (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+ u32 reg = FIELD_PREP(S2CR_TYPE, s2cr->type) |
+ FIELD_PREP(S2CR_CBNDX, s2cr->cbndx) |
+ FIELD_PREP(S2CR_PRIVCFG, s2cr->privcfg);
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
smmu->smrs[idx].valid)
reg |= S2CR_EXIDVALID;
- writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
}
static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
@@ -1052,7 +928,6 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
*/
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 smr;
if (!smmu->smrs)
@@ -1063,15 +938,15 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
- smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+ smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
+ smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
- smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+ smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
+ smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
}
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
@@ -1140,8 +1015,8 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */
for_each_cfg_sme(fwspec, i, idx) {
- u16 sid = fwspec->ids[i];
- u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+ u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
if (idx != INVALID_SMENDX) {
ret = -EEXIST;
@@ -1301,7 +1176,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1311,7 +1186,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return 0;
arm_smmu_rpm_get(smmu);
- ret = ops->unmap(ops, iova, size);
+ ret = ops->unmap(ops, iova, size, gather);
arm_smmu_rpm_put(smmu);
return ret;
@@ -1322,21 +1197,22 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops) {
+ if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->tlb_ops) {
+ if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+ smmu_domain->flush_ops->tlb_sync(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1349,28 +1225,25 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
struct device *dev = smmu->dev;
- void __iomem *cb_base;
+ void __iomem *reg;
u32 tmp;
u64 phys;
unsigned long va, flags;
- int ret;
+ int ret, idx = cfg->cbndx;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
return 0;
- cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
-
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
- /* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
- if (smmu->version == ARM_SMMU_V2)
- smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
- else /* Register is only 32-bit in v1 */
- writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
+ else
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
- if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
- !(tmp & ATSR_ACTIVE), 5, 50)) {
+ reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
+ if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev,
"iova to phys timed out on %pad. Falling back to software table walk.\n",
@@ -1378,7 +1251,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova);
}
- phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
+ phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
if (phys & CB_PAR_F) {
dev_err(dev, "translation fault!\n");
@@ -1426,16 +1299,11 @@ static bool arm_smmu_capable(enum iommu_cap cap)
}
}
-static int arm_smmu_match_node(struct device *dev, const void *data)
-{
- return dev->fwnode == data;
-}
-
static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
- fwnode, arm_smmu_match_node);
+ struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
+ fwnode);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
@@ -1466,8 +1334,8 @@ static int arm_smmu_add_device(struct device *dev)
ret = -EINVAL;
for (i = 0; i < fwspec->num_ids; i++) {
- u16 sid = fwspec->ids[i];
- u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
+ u16 sid = FIELD_GET(SMR_ID, fwspec->ids[i]);
+ u16 mask = FIELD_GET(SMR_MASK, fwspec->ids[i]);
if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
@@ -1648,12 +1516,12 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
u32 mask, fwid = 0;
if (args->args_count > 0)
- fwid |= (u16)args->args[0];
+ fwid |= FIELD_PREP(SMR_ID, args->args[0]);
if (args->args_count > 1)
- fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+ fwid |= FIELD_PREP(SMR_MASK, args->args[1]);
else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
- fwid |= (u16)mask << SMR_MASK_SHIFT;
+ fwid |= FIELD_PREP(SMR_MASK, mask);
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
@@ -1706,13 +1574,12 @@ static struct iommu_ops arm_smmu_ops = {
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
int i;
- u32 reg, major;
+ u32 reg;
/* clear global FSR */
- reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
- writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
/*
* Reset stream mapping groups: Initial values mark all SMRn as
@@ -1721,47 +1588,17 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
- if (smmu->model == ARM_MMU500) {
- /*
- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
- * bit is only present in MMU-500r2 onwards.
- */
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
- if (major >= 2)
- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
- /*
- * Allow unmatched Stream IDs to allocate bypass
- * TLB entries for reduced latency.
- */
- reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
- }
-
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
- void __iomem *cb_base = ARM_SMMU_CB(smmu, i);
-
arm_smmu_write_context_bank(smmu, i);
- writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
- /*
- * Disable MMU-500's not-particularly-beneficial next-page
- * prefetcher for the sake of errata #841119 and #826419.
- */
- if (smmu->model == ARM_MMU500) {
- reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
- reg &= ~ARM_MMU500_ACTLR_CPRE;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
- }
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, FSR_FAULT);
}
/* Invalidate the TLB, just in case */
- writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
- writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
- reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
/* Enable fault reporting */
reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
@@ -1780,7 +1617,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg &= ~sCR0_FB;
/* Don't upgrade barriers */
- reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+ reg &= ~(sCR0_BSU);
if (smmu->features & ARM_SMMU_FEAT_VMID16)
reg |= sCR0_VMID16EN;
@@ -1788,9 +1625,12 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_EXIDS)
reg |= sCR0_EXIDENABLE;
+ if (smmu->impl && smmu->impl->reset)
+ smmu->impl->reset(smmu);
+
/* Push the button */
arm_smmu_tlb_sync_global(smmu);
- writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
}
static int arm_smmu_id_size_to_bits(int size)
@@ -1814,8 +1654,7 @@ static int arm_smmu_id_size_to_bits(int size)
static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
{
- unsigned long size;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ unsigned int size;
u32 id;
bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
@@ -1825,7 +1664,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->version == ARM_SMMU_V2 ? 2 : 1);
/* ID0 */
- id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
/* Restrict available stages based on module parameter */
if (force_stage == 1)
@@ -1879,12 +1718,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_EXIDS;
size = 1 << 16;
} else {
- size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ size = 1 << FIELD_GET(ID0_NUMSIDB, id);
}
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
- size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
+ size = FIELD_GET(ID0_NUMSMRG, id);
if (size == 0) {
dev_err(smmu->dev,
"stream-matching supported, but no SMRs present!\n");
@@ -1898,7 +1737,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENOMEM;
dev_notice(smmu->dev,
- "\tstream matching with %lu register groups", size);
+ "\tstream matching with %u register groups", size);
}
/* s2cr->type == 0 means translation, so initialise explicitly */
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -1919,49 +1758,38 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
}
/* ID1 */
- id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
- size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
- size <<= smmu->pgshift;
- if (smmu->cb_base != gr0_base + size)
+ size = 1 << (FIELD_GET(ID1_NUMPAGENDXB, id) + 1);
+ if (smmu->numpage != 2 * size << smmu->pgshift)
dev_warn(smmu->dev,
- "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
- size * 2, (smmu->cb_base - gr0_base) * 2);
+ "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
+ 2 * size << smmu->pgshift, smmu->numpage);
+ /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
+ smmu->numpage = size;
- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
- smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+ smmu->num_s2_context_banks = FIELD_GET(ID1_NUMS2CB, id);
+ smmu->num_context_banks = FIELD_GET(ID1_NUMCB, id);
if (smmu->num_s2_context_banks > smmu->num_context_banks) {
dev_err(smmu->dev, "impossible number of S2 context banks!\n");
return -ENODEV;
}
dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
smmu->num_context_banks, smmu->num_s2_context_banks);
- /*
- * Cavium CN88xx erratum #27704.
- * Ensure ASID and VMID allocation is unique across all SMMUs in
- * the system.
- */
- if (smmu->model == CAVIUM_SMMUV2) {
- smmu->cavium_id_base =
- atomic_add_return(smmu->num_context_banks,
- &cavium_smmu_context_count);
- smmu->cavium_id_base -= smmu->num_context_banks;
- dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
- }
smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
sizeof(*smmu->cbs), GFP_KERNEL);
if (!smmu->cbs)
return -ENOMEM;
/* ID2 */
- id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
- size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+ id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
smmu->ipa_size = size;
/* The output mask is also applied for bypass */
- size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+ size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS, id));
smmu->pa_size = size;
if (id & ID2_VMID16)
@@ -1981,7 +1809,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
if (smmu->version == ARM_SMMU_V1_64K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
} else {
- size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+ size = FIELD_GET(ID2_UBS, id);
smmu->va_size = arm_smmu_id_size_to_bits(size);
if (id & ID2_PTFS_4K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
@@ -2018,6 +1846,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
smmu->ipa_size, smmu->pa_size);
+ if (smmu->impl && smmu->impl->cfg_probe)
+ return smmu->impl->cfg_probe(smmu);
+
return 0;
}
@@ -2130,8 +1961,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
smmu->version = data->version;
smmu->model = data->model;
- parse_driver_options(smmu);
-
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
@@ -2194,12 +2023,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (err)
return err;
+ smmu = arm_smmu_impl_init(smmu);
+ if (IS_ERR(smmu))
+ return PTR_ERR(smmu);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- smmu->cb_base = smmu->base + resource_size(res) / 2;
+ /*
+ * The resource size should effectively match the value of SMMU_TOP;
+ * stash that temporarily until we know PAGESIZE to validate it with.
+ */
+ smmu->numpage = resource_size(res);
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
@@ -2339,7 +2176,7 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
arm_smmu_rpm_get(smmu);
/* Turn the thing off */
- writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
arm_smmu_rpm_put(smmu);
if (pm_runtime_enabled(smmu->dev))
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
new file mode 100644
index 000000000000..b19b6cae9b5e
--- /dev/null
+++ b/drivers/iommu/arm-smmu.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _ARM_SMMU_H
+#define _ARM_SMMU_H
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0 0x0
+#define sCR0_VMID16EN BIT(31)
+#define sCR0_BSU GENMASK(15, 14)
+#define sCR0_FB BIT(13)
+#define sCR0_PTM BIT(12)
+#define sCR0_VMIDPNE BIT(11)
+#define sCR0_USFCFG BIT(10)
+#define sCR0_GCFGFIE BIT(5)
+#define sCR0_GCFGFRE BIT(4)
+#define sCR0_EXIDENABLE BIT(3)
+#define sCR0_GFIE BIT(2)
+#define sCR0_GFRE BIT(1)
+#define sCR0_CLIENTPD BIT(0)
+
+/* Auxiliary Configuration register */
+#define ARM_SMMU_GR0_sACR 0x10
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0 0x20
+#define ID0_S1TS BIT(30)
+#define ID0_S2TS BIT(29)
+#define ID0_NTS BIT(28)
+#define ID0_SMS BIT(27)
+#define ID0_ATOSNS BIT(26)
+#define ID0_PTFS_NO_AARCH32 BIT(25)
+#define ID0_PTFS_NO_AARCH32S BIT(24)
+#define ID0_NUMIRPT GENMASK(23, 16)
+#define ID0_CTTW BIT(14)
+#define ID0_NUMSIDB GENMASK(12, 9)
+#define ID0_EXIDS BIT(8)
+#define ID0_NUMSMRG GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID1 0x24
+#define ID1_PAGESIZE BIT(31)
+#define ID1_NUMPAGENDXB GENMASK(30, 28)
+#define ID1_NUMS2CB GENMASK(23, 16)
+#define ID1_NUMCB GENMASK(7, 0)
+
+#define ARM_SMMU_GR0_ID2 0x28
+#define ID2_VMID16 BIT(15)
+#define ID2_PTFS_64K BIT(14)
+#define ID2_PTFS_16K BIT(13)
+#define ID2_PTFS_4K BIT(12)
+#define ID2_UBS GENMASK(11, 8)
+#define ID2_OAS GENMASK(7, 4)
+#define ID2_IAS GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_ID3 0x2c
+#define ARM_SMMU_GR0_ID4 0x30
+#define ARM_SMMU_GR0_ID5 0x34
+#define ARM_SMMU_GR0_ID6 0x38
+
+#define ARM_SMMU_GR0_ID7 0x3c
+#define ID7_MAJOR GENMASK(7, 4)
+#define ID7_MINOR GENMASK(3, 0)
+
+#define ARM_SMMU_GR0_sGFSR 0x48
+#define ARM_SMMU_GR0_sGFSYNR0 0x50
+#define ARM_SMMU_GR0_sGFSYNR1 0x54
+#define ARM_SMMU_GR0_sGFSYNR2 0x58
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_TLBIVMID 0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
+#define ARM_SMMU_GR0_TLBIALLH 0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC 0x70
+
+#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
+#define sTLBGSTATUS_GSACTIVE BIT(0)
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
+#define SMR_VALID BIT(31)
+#define SMR_MASK GENMASK(31, 16)
+#define SMR_ID GENMASK(15, 0)
+
+#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
+#define S2CR_PRIVCFG GENMASK(25, 24)
+enum arm_smmu_s2cr_privcfg {
+ S2CR_PRIVCFG_DEFAULT,
+ S2CR_PRIVCFG_DIPAN,
+ S2CR_PRIVCFG_UNPRIV,
+ S2CR_PRIVCFG_PRIV,
+};
+#define S2CR_TYPE GENMASK(17, 16)
+enum arm_smmu_s2cr_type {
+ S2CR_TYPE_TRANS,
+ S2CR_TYPE_BYPASS,
+ S2CR_TYPE_FAULT,
+};
+#define S2CR_EXIDVALID BIT(10)
+#define S2CR_CBNDX GENMASK(7, 0)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
+#define CBAR_IRPTNDX GENMASK(31, 24)
+#define CBAR_TYPE GENMASK(17, 16)
+enum arm_smmu_cbar_type {
+ CBAR_TYPE_S2_TRANS,
+ CBAR_TYPE_S1_TRANS_S2_BYPASS,
+ CBAR_TYPE_S1_TRANS_S2_FAULT,
+ CBAR_TYPE_S1_TRANS_S2_TRANS,
+};
+#define CBAR_S1_MEMATTR GENMASK(15, 12)
+#define CBAR_S1_MEMATTR_WB 0xf
+#define CBAR_S1_BPSHCFG GENMASK(9, 8)
+#define CBAR_S1_BPSHCFG_NSH 3
+#define CBAR_VMID GENMASK(7, 0)
+
+#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
+
+#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
+#define CBA2R_VMID16 GENMASK(31, 16)
+#define CBA2R_VA64 BIT(0)
+
+#define ARM_SMMU_CB_SCTLR 0x0
+#define SCTLR_S1_ASIDPNE BIT(12)
+#define SCTLR_CFCFG BIT(7)
+#define SCTLR_CFIE BIT(6)
+#define SCTLR_CFRE BIT(5)
+#define SCTLR_E BIT(4)
+#define SCTLR_AFE BIT(2)
+#define SCTLR_TRE BIT(1)
+#define SCTLR_M BIT(0)
+
+#define ARM_SMMU_CB_ACTLR 0x4
+
+#define ARM_SMMU_CB_RESUME 0x8
+#define RESUME_TERMINATE BIT(0)
+
+#define ARM_SMMU_CB_TCR2 0x10
+#define TCR2_SEP GENMASK(17, 15)
+#define TCR2_SEP_UPSTREAM 0x7
+#define TCR2_AS BIT(4)
+
+#define ARM_SMMU_CB_TTBR0 0x20
+#define ARM_SMMU_CB_TTBR1 0x28
+#define TTBRn_ASID GENMASK_ULL(63, 48)
+
+#define ARM_SMMU_CB_TCR 0x30
+#define ARM_SMMU_CB_CONTEXTIDR 0x34
+#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+
+#define ARM_SMMU_CB_PAR 0x50
+#define CB_PAR_F BIT(0)
+
+#define ARM_SMMU_CB_FSR 0x58
+#define FSR_MULTI BIT(31)
+#define FSR_SS BIT(30)
+#define FSR_UUT BIT(8)
+#define FSR_ASF BIT(7)
+#define FSR_TLBLKF BIT(6)
+#define FSR_TLBMCF BIT(5)
+#define FSR_EF BIT(4)
+#define FSR_PF BIT(3)
+#define FSR_AFF BIT(2)
+#define FSR_TF BIT(1)
+
+#define FSR_IGN (FSR_AFF | FSR_ASF | \
+ FSR_TLBMCF | FSR_TLBLKF)
+#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
+ FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
+
+#define ARM_SMMU_CB_FAR 0x60
+
+#define ARM_SMMU_CB_FSYNR0 0x68
+#define FSYNR0_WNR BIT(4)
+
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
+#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_TLBSYNC 0x7f0
+#define ARM_SMMU_CB_TLBSTATUS 0x7f4
+#define ARM_SMMU_CB_ATS1PR 0x800
+
+#define ARM_SMMU_CB_ATSR 0x8f0
+#define ATSR_ACTIVE BIT(0)
+
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS 128
+
+
+/* Shared driver definitions */
+enum arm_smmu_arch_version {
+ ARM_SMMU_V1,
+ ARM_SMMU_V1_64K,
+ ARM_SMMU_V2,
+};
+
+enum arm_smmu_implementation {
+ GENERIC_SMMU,
+ ARM_MMU500,
+ CAVIUM_SMMUV2,
+ QCOM_SMMUV2,
+};
+
+struct arm_smmu_device {
+ struct device *dev;
+
+ void __iomem *base;
+ unsigned int numpage;
+ unsigned int pgshift;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
+#define ARM_SMMU_FEAT_VMID16 (1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS (1 << 12)
+ u32 features;
+
+ enum arm_smmu_arch_version version;
+ enum arm_smmu_implementation model;
+ const struct arm_smmu_impl *impl;
+
+ u32 num_context_banks;
+ u32 num_s2_context_banks;
+ DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+ struct arm_smmu_cb *cbs;
+ atomic_t irptndx;
+
+ u32 num_mapping_groups;
+ u16 streamid_mask;
+ u16 smr_mask_mask;
+ struct arm_smmu_smr *smrs;
+ struct arm_smmu_s2cr *s2crs;
+ struct mutex stream_map_mutex;
+
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
+ unsigned long pgsize_bitmap;
+
+ u32 num_global_irqs;
+ u32 num_context_irqs;
+ unsigned int *irqs;
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ spinlock_t global_sync_lock;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+};
+
+enum arm_smmu_context_fmt {
+ ARM_SMMU_CTX_FMT_NONE,
+ ARM_SMMU_CTX_FMT_AARCH64,
+ ARM_SMMU_CTX_FMT_AARCH32_L,
+ ARM_SMMU_CTX_FMT_AARCH32_S,
+};
+
+struct arm_smmu_cfg {
+ u8 cbndx;
+ u8 irptndx;
+ union {
+ u16 asid;
+ u16 vmid;
+ };
+ enum arm_smmu_cbar_type cbar;
+ enum arm_smmu_context_fmt fmt;
+};
+#define INVALID_IRPTNDX 0xff
+
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
+};
+
+struct arm_smmu_flush_ops {
+ struct iommu_flush_ops tlb;
+ void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
+ bool leaf, void *cookie);
+ void (*tlb_sync)(void *cookie);
+};
+
+struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ const struct arm_smmu_flush_ops *flush_ops;
+ struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
+ bool non_strict;
+ struct mutex init_mutex; /* Protects smmu pointer */
+ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
+ struct iommu_domain domain;
+};
+
+
+/* Implementation details, yay! */
+struct arm_smmu_impl {
+ u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
+ void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
+ u32 val);
+ u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
+ void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
+ u64 val);
+ int (*cfg_probe)(struct arm_smmu_device *smmu);
+ int (*reset)(struct arm_smmu_device *smmu);
+ int (*init_context)(struct arm_smmu_domain *smmu_domain);
+};
+
+static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
+{
+ return smmu->base + (n << smmu->pgshift);
+}
+
+static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
+{
+ if (smmu->impl && unlikely(smmu->impl->read_reg))
+ return smmu->impl->read_reg(smmu, page, offset);
+ return readl_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page,
+ int offset, u32 val)
+{
+ if (smmu->impl && unlikely(smmu->impl->write_reg))
+ smmu->impl->write_reg(smmu, page, offset, val);
+ else
+ writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset)
+{
+ if (smmu->impl && unlikely(smmu->impl->read_reg64))
+ return smmu->impl->read_reg64(smmu, page, offset);
+ return readq_relaxed(arm_smmu_page(smmu, page) + offset);
+}
+
+static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
+ int offset, u64 val)
+{
+ if (smmu->impl && unlikely(smmu->impl->write_reg64))
+ smmu->impl->write_reg64(smmu, page, offset, val);
+ else
+ writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
+}
+
+#define ARM_SMMU_GR0 0
+#define ARM_SMMU_GR1 1
+#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))
+
+#define arm_smmu_gr0_read(s, o) \
+ arm_smmu_readl((s), ARM_SMMU_GR0, (o))
+#define arm_smmu_gr0_write(s, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))
+
+#define arm_smmu_gr1_read(s, o) \
+ arm_smmu_readl((s), ARM_SMMU_GR1, (o))
+#define arm_smmu_gr1_write(s, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_GR1, (o), (v))
+
+#define arm_smmu_cb_read(s, n, o) \
+ arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_write(s, n, o, v) \
+ arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v))
+#define arm_smmu_cb_readq(s, n, o) \
+ arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o))
+#define arm_smmu_cb_writeq(s, n, o, v) \
+ arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
+
+struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+
+#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..8f412af84247 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn;
+ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+ iovad = &cookie->iovad;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
@@ -444,13 +446,18 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
+ iommu_iotlb_gather_init(&iotlb_gather);
+
+ unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
- WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
if (!cookie->fq_domain)
- iommu_tlb_sync(domain);
+ iommu_tlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -459,13 +466,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
@@ -574,7 +579,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -764,7 +769,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
- (cur_len + s_length <= max_len)) {
+ (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
@@ -967,15 +972,18 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size);
+ int node = dev_to_node(dev);
struct page *page = NULL;
void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (!page)
+ page = alloc_pages_node(node, gfp, get_order(alloc_size));
+ if (!page)
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1043,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn, off = vma->vm_pgoff;
int ret;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -1147,16 +1155,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
- if (iova == DMA_MAPPING_ERROR)
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
goto out_free_page;
+ if (iommu_map(domain, iova, msi_addr, size, prot))
+ goto out_free_iova;
+
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 5d0754ed5fa0..eecd6a421667 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1519,6 +1519,64 @@ static const char *dma_remap_fault_reasons[] =
"PCE for translation request specifies blocking",
};
+static const char * const dma_remap_sm_fault_reasons[] = {
+ "SM: Invalid Root Table Address",
+ "SM: TTM 0 for request with PASID",
+ "SM: TTM 0 for page group request",
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
+ "SM: Error attempting to access Root Entry",
+ "SM: Present bit in Root Entry is clear",
+ "SM: Non-zero reserved field set in Root Entry",
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
+ "SM: Error attempting to access Context Entry",
+ "SM: Present bit in Context Entry is clear",
+ "SM: Non-zero reserved field set in the Context Entry",
+ "SM: Invalid Context Entry",
+ "SM: DTE field in Context Entry is clear",
+ "SM: PASID Enable field in Context Entry is clear",
+ "SM: PASID is larger than the max in Context Entry",
+ "SM: PRE field in Context-Entry is clear",
+ "SM: RID_PASID field error in Context-Entry",
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
+ "SM: Error attempting to access the PASID Directory Entry",
+ "SM: Present bit in Directory Entry is clear",
+ "SM: Non-zero reserved field set in PASID Directory Entry",
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
+ "SM: Error attempting to access PASID Table Entry",
+ "SM: Present bit in PASID Table Entry is clear",
+ "SM: Non-zero reserved field set in PASID Table Entry",
+ "SM: Invalid Scalable-Mode PASID Table Entry",
+ "SM: ERE field is clear in PASID Table Entry",
+ "SM: SRE field is clear in PASID Table Entry",
+ "Unknown", "Unknown",/* 0x5E-0x5F */
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
+ "SM: Error attempting to access first-level paging entry",
+ "SM: Present bit in first-level paging entry is clear",
+ "SM: Non-zero reserved field set in first-level paging entry",
+ "SM: Error attempting to access FL-PML4 entry",
+ "SM: First-level entry address beyond MGAW in Nested translation",
+ "SM: Read permission error in FL-PML4 entry in Nested translation",
+ "SM: Read permission error in first-level paging entry in Nested translation",
+ "SM: Write permission error in first-level paging entry in Nested translation",
+ "SM: Error attempting to access second-level paging entry",
+ "SM: Read/Write permission error in second-level paging entry",
+ "SM: Non-zero reserved field set in second-level paging entry",
+ "SM: Invalid second-level page table pointer",
+ "SM: A/D bit update needed in second-level entry when set up in no snoop",
+ "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
+ "SM: Address in first-level translation is not canonical",
+ "SM: U/S set 0 for first-level translation with user privilege",
+ "SM: No execute permission for request with PASID and ER=1",
+ "SM: Address beyond the DMA hardware max",
+ "SM: Second-level entry address beyond the max",
+ "SM: No write permission for Write/AtomicOp request",
+ "SM: No read permission for Read/AtomicOp request",
+ "SM: Invalid address-interrupt address",
+ "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
+ "SM: A/D bit update needed in first-level entry when set up in no snoop",
+};
+
static const char *irq_remap_fault_reasons[] =
{
"Detected reserved fields in the decoded interrupt-remapped request",
@@ -1536,6 +1594,10 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
ARRAY_SIZE(irq_remap_fault_reasons))) {
*fault_type = INTR_REMAP;
return irq_remap_fault_reasons[fault_reason - 0x20];
+ } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
+ ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
+ *fault_type = DMA_REMAP;
+ return dma_remap_sm_fault_reasons[fault_reason - 0x30];
} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
*fault_type = DMA_REMAP;
return dma_remap_fault_reasons[fault_reason];
@@ -1611,7 +1673,8 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
}
static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, u16 source_id, unsigned long long addr)
+ u8 fault_reason, int pasid, u16 source_id,
+ unsigned long long addr)
{
const char *reason;
int fault_type;
@@ -1624,10 +1687,11 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else
- pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
+ pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
type ? "DMA Read" : "DMA Write",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+ PCI_FUNC(source_id & 0xFF), pasid, addr,
+ fault_reason, reason);
return 0;
}
@@ -1659,8 +1723,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u8 fault_reason;
u16 source_id;
u64 guest_addr;
- int type;
+ int type, pasid;
u32 data;
+ bool pasid_present;
/* highest 32 bits */
data = readl(iommu->reg + reg +
@@ -1672,10 +1737,12 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
fault_reason = dma_frcd_fault_reason(data);
type = dma_frcd_type(data);
+ pasid = dma_frcd_pasid_value(data);
data = readl(iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 8);
source_id = dma_frcd_source_id(data);
+ pasid_present = dma_frcd_pasid_present(data);
guest_addr = dmar_readq(iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN);
guest_addr = dma_frcd_page_addr(guest_addr);
@@ -1688,7 +1755,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
if (!ratelimited)
+ /* Using pasid -1 if pasid is not present */
dmar_fault_do_one(iommu, type, fault_reason,
+ pasid_present ? pasid : -1,
source_id, guest_addr);
fault_index++;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index b0c1e5f9daae..9c94e16fb127 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -566,7 +566,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
static const struct iommu_ops exynos_iommu_ops;
-static int __init exynos_sysmmu_probe(struct platform_device *pdev)
+static int exynos_sysmmu_probe(struct platform_device *pdev)
{
int irq, ret;
struct device *dev = &pdev->dev;
@@ -583,10 +583,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
return PTR_ERR(data->sfrbase);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "Unable to find IRQ resource\n");
+ if (irq <= 0)
return irq;
- }
ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
dev_name(dev), data);
@@ -1130,7 +1128,8 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
}
static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
- unsigned long l_iova, size_t size)
+ unsigned long l_iova, size_t size,
+ struct iommu_iotlb_gather *gather)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 73a552914455..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m)
(u64)0, (u64)0, (u64)0);
else
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
- tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
+ tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
tbl_wlk->pasid_tbl_entry->val[1],
- tbl_wlk->pasid_tbl_entry->val[2]);
+ tbl_wlk->pasid_tbl_entry->val[0]);
}
static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
tbl_wlk.ctx_entry = context;
m->private = &tbl_wlk;
- if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
pasid_dir_size = get_pasid_dir_size(context);
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac4172c02244..87de0b975672 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,9 +41,11 @@
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
+#include <linux/swiotlb.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
+#include <trace/events/intel_iommu.h>
#include "irq_remapping.h"
#include "intel-pasid.h"
@@ -346,6 +348,8 @@ static int domain_detach_iommu(struct dmar_domain *domain,
static bool device_is_rmrr_locked(struct device *dev);
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -362,6 +366,7 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
+static int intel_no_bounce;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
@@ -375,6 +380,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
+#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
+ to_pci_dev(d)->untrusted)
+
/*
* Iterate over elements in device_domain_list and call the specified
* callback @fn against each element.
@@ -457,6 +465,9 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
+ } else if (!strncmp(str, "nobounce", 8)) {
+ pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
+ intel_no_bounce = 1;
}
str += strcspn(str, ",");
@@ -1833,9 +1844,65 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+ int guest_width)
+{
+ int adjust_width, agaw;
+ unsigned long sagaw;
+ int err;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+
+ err = init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova, iova_entry_free);
+ if (err)
+ return err;
+
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ if (guest_width > cap_mgaw(iommu->cap))
+ guest_width = cap_mgaw(iommu->cap);
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ agaw = width_to_agaw(adjust_width);
+ sagaw = cap_sagaw(iommu->cap);
+ if (!test_bit(agaw, &sagaw)) {
+ /* hardware doesn't support it, choose a bigger one */
+ pr_debug("Hardware doesn't support agaw %d\n", agaw);
+ agaw = find_next_bit(&sagaw, 5, agaw);
+ if (agaw >= 5)
+ return -ENODEV;
+ }
+ domain->agaw = agaw;
+
+ if (ecap_coherent(iommu->ecap))
+ domain->iommu_coherency = 1;
+ else
+ domain->iommu_coherency = 0;
+
+ if (ecap_sc_support(iommu->ecap))
+ domain->iommu_snooping = 1;
+ else
+ domain->iommu_snooping = 0;
+
+ if (intel_iommu_superpage)
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
+ else
+ domain->iommu_superpage = 0;
+
+ domain->nid = iommu->node;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static void domain_exit(struct dmar_domain *domain)
{
- struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
domain_remove_dev_info(domain);
@@ -1843,9 +1910,12 @@ static void domain_exit(struct dmar_domain *domain)
/* destroy iovas */
put_iova_domain(&domain->iovad);
- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ if (domain->pgd) {
+ struct page *freelist;
- dma_free_pagelist(freelist);
+ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ dma_free_pagelist(freelist);
+ }
free_domain_mem(domain);
}
@@ -2513,31 +2583,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
-static int domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info;
@@ -2575,19 +2620,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
-
- if (domain_init(domain, gaw)) {
+ if (domain_init(domain, iommu, gaw)) {
domain_exit(domain);
return NULL;
}
- if (init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova,
- iova_entry_free)) {
- pr_warn("iova flush queue initialization failed\n");
- intel_iommu_strict = 1;
- }
-
out:
return domain;
}
@@ -2692,6 +2729,8 @@ static int domain_prepare_identity_map(struct device *dev,
return iommu_domain_identity_map(domain, start, end);
}
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
static int __init si_domain_init(int hw)
{
struct dmar_rmrr_unit *rmrr;
@@ -2702,7 +2741,7 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
@@ -3268,7 +3307,7 @@ static int __init init_dmars(void)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
- if (iommu_pass_through)
+ if (iommu_default_passthrough())
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3450,6 +3489,7 @@ static bool iommu_need_mapping(struct device *dev)
dmar_domain = to_dmar_domain(domain);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
}
+ dmar_remove_one_dev_info(dev);
get_private_domain_for_dev(dev);
}
@@ -3505,6 +3545,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
+
+ trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
+
return start_paddr;
error:
@@ -3560,11 +3603,9 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
if (dev_is_pci(dev))
pdev = to_pci_dev(dev);
- dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
-
freelist = domain_unmap(domain, start_pfn, last_pfn);
-
- if (intel_iommu_strict || (pdev && pdev->untrusted)) {
+ if (intel_iommu_strict || (pdev && pdev->untrusted) ||
+ !has_iova_flush_queue(&domain->iovad)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
@@ -3578,6 +3619,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
* cpu used up by the iotlb flush operation...
*/
}
+
+ trace_unmap_single(dev, dev_addr, size);
}
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@ -3668,6 +3711,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
}
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
+
+ trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
@@ -3724,6 +3769,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
+ trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
+ sg_phys(sglist), size << VTD_PAGE_SHIFT);
+
return nelems;
}
@@ -3739,6 +3787,252 @@ static const struct dma_map_ops intel_dma_ops = {
.dma_supported = dma_direct_supported,
};
+static void
+bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, enum dma_sync_target target)
+{
+ struct dmar_domain *domain;
+ phys_addr_t tlb_addr;
+
+ domain = find_domain(dev);
+ if (WARN_ON(!domain))
+ return;
+
+ tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
+ if (is_swiotlb_buffer(tlb_addr))
+ swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
+}
+
+static dma_addr_t
+bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs,
+ u64 dma_mask)
+{
+ size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+ struct dmar_domain *domain;
+ struct intel_iommu *iommu;
+ unsigned long iova_pfn;
+ unsigned long nrpages;
+ phys_addr_t tlb_addr;
+ int prot = 0;
+ int ret;
+
+ domain = find_domain(dev);
+ if (WARN_ON(dir == DMA_NONE || !domain))
+ return DMA_MAPPING_ERROR;
+
+ iommu = domain_get_iommu(domain);
+ if (WARN_ON(!iommu))
+ return DMA_MAPPING_ERROR;
+
+ nrpages = aligned_nrpages(0, size);
+ iova_pfn = intel_alloc_iova(dev, domain,
+ dma_to_mm_pfn(nrpages), dma_mask);
+ if (!iova_pfn)
+ return DMA_MAPPING_ERROR;
+
+ /*
+ * Check if DMAR supports zero-length reads on write only
+ * mappings..
+ */
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
+ !cap_zlr(iommu->cap))
+ prot |= DMA_PTE_READ;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= DMA_PTE_WRITE;
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
+ tlb_addr = swiotlb_tbl_map_single(dev,
+ __phys_to_dma(dev, io_tlb_start),
+ paddr, size, aligned_size, dir, attrs);
+ if (tlb_addr == DMA_MAPPING_ERROR) {
+ goto swiotlb_error;
+ } else {
+ /* Cleanup the padding area. */
+ void *padding_start = phys_to_virt(tlb_addr);
+ size_t padding_size = aligned_size;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE ||
+ dir == DMA_BIDIRECTIONAL)) {
+ padding_start += size;
+ padding_size -= size;
+ }
+
+ memset(padding_start, 0, padding_size);
+ }
+ } else {
+ tlb_addr = paddr;
+ }
+
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
+ tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
+ if (ret)
+ goto mapping_error;
+
+ trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
+
+ return (phys_addr_t)iova_pfn << PAGE_SHIFT;
+
+mapping_error:
+ if (is_swiotlb_buffer(tlb_addr))
+ swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+ aligned_size, dir, attrs);
+swiotlb_error:
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
+ dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
+ size, (unsigned long long)paddr, dir);
+
+ return DMA_MAPPING_ERROR;
+}
+
+static void
+bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
+ struct dmar_domain *domain;
+ phys_addr_t tlb_addr;
+
+ domain = find_domain(dev);
+ if (WARN_ON(!domain))
+ return;
+
+ tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
+ if (WARN_ON(!tlb_addr))
+ return;
+
+ intel_unmap(dev, dev_addr, size);
+ if (is_swiotlb_buffer(tlb_addr))
+ swiotlb_tbl_unmap_single(dev, tlb_addr, size,
+ aligned_size, dir, attrs);
+
+ trace_bounce_unmap_single(dev, dev_addr, size);
+}
+
+static dma_addr_t
+bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return bounce_map_single(dev, page_to_phys(page) + offset,
+ size, dir, attrs, *dev->dma_mask);
+}
+
+static dma_addr_t
+bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return bounce_map_single(dev, phys_addr, size,
+ dir, attrs, *dev->dma_mask);
+}
+
+static void
+bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ bounce_unmap_single(dev, dev_addr, size, dir, attrs);
+}
+
+static void
+bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i)
+ bounce_unmap_page(dev, sg->dma_address,
+ sg_dma_len(sg), dir, attrs);
+}
+
+static int
+bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ sg->dma_address = bounce_map_page(dev, sg_page(sg),
+ sg->offset, sg->length,
+ dir, attrs);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ goto out_unmap;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nelems;
+
+out_unmap:
+ bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return 0;
+}
+
+static void
+bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+static void
+bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i)
+ bounce_sync_single(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir, SYNC_FOR_CPU);
+}
+
+static void
+bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i)
+ bounce_sync_single(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
+}
+
+static const struct dma_map_ops bounce_dma_ops = {
+ .alloc = intel_alloc_coherent,
+ .free = intel_free_coherent,
+ .map_sg = bounce_map_sg,
+ .unmap_sg = bounce_unmap_sg,
+ .map_page = bounce_map_page,
+ .unmap_page = bounce_unmap_page,
+ .sync_single_for_cpu = bounce_sync_single_for_cpu,
+ .sync_single_for_device = bounce_sync_single_for_device,
+ .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
+ .sync_sg_for_device = bounce_sync_sg_for_device,
+ .map_resource = bounce_map_resource,
+ .unmap_resource = bounce_unmap_resource,
+ .dma_supported = dma_direct_supported,
+};
+
static inline int iommu_domain_cache_init(void)
{
int ret = 0;
@@ -4539,22 +4833,20 @@ const struct attribute_group *intel_iommu_groups[] = {
NULL,
};
-static int __init platform_optin_force_iommu(void)
+static inline bool has_untrusted_dev(void)
{
struct pci_dev *pdev = NULL;
- bool has_untrusted_dev = false;
- if (!dmar_platform_optin() || no_platform_optin)
- return 0;
+ for_each_pci_dev(pdev)
+ if (pdev->untrusted)
+ return true;
- for_each_pci_dev(pdev) {
- if (pdev->untrusted) {
- has_untrusted_dev = true;
- break;
- }
- }
+ return false;
+}
- if (!has_untrusted_dev)
+static int __init platform_optin_force_iommu(void)
+{
+ if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
return 0;
if (no_iommu || dmar_disabled)
@@ -4568,9 +4860,6 @@ static int __init platform_optin_force_iommu(void)
iommu_identity_mapping |= IDENTMAP_ALL;
dmar_disabled = 0;
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
- swiotlb = 0;
-#endif
no_iommu = 0;
return 1;
@@ -4710,7 +4999,14 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
- swiotlb = 0;
+ /*
+ * If the system has no untrusted device or the user has decided
+ * to disable the bounce page mechanisms, we don't need swiotlb.
+ * Mark this and the pre-allocated bounce pages will be released
+ * later.
+ */
+ if (!has_untrusted_dev() || intel_no_bounce)
+ swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
@@ -4812,7 +5108,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
/* free the private domain */
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
domain_exit(info->domain);
free_devinfo_mem(info);
@@ -4825,10 +5122,36 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
- __dmar_remove_one_dev_info(info);
+ if (info)
+ __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
+{
+ int adjust_width;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
+ domain->max_addr = 0;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
@@ -4843,7 +5166,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
pr_err("Can't allocate dmar_domain\n");
return NULL;
}
- if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
@@ -5147,7 +5470,8 @@ static int intel_iommu_map(struct iommu_domain *domain,
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *gather)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
@@ -5278,6 +5602,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
domain_add_dev_info(si_domain, dev);
dev_info(dev,
@@ -5288,6 +5613,7 @@ static int intel_iommu_add_device(struct device *dev)
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
ret = iommu_request_dma_domain_for_dev(dev);
if (ret) {
+ dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
@@ -5301,6 +5627,11 @@ static int intel_iommu_add_device(struct device *dev)
}
}
+ if (device_needs_bounce(dev)) {
+ dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
+ set_dma_ops(dev, &bounce_dma_ops);
+ }
+
return 0;
}
@@ -5313,9 +5644,14 @@ static void intel_iommu_remove_device(struct device *dev)
if (!iommu)
return;
+ dmar_remove_one_dev_info(dev);
+
iommu_group_remove_device(dev);
iommu_device_unlink(&iommu->iommu, dev);
+
+ if (device_needs_bounce(dev))
+ set_dma_ops(dev, NULL);
}
static void intel_iommu_get_resv_regions(struct device *device,
@@ -5629,20 +5965,46 @@ const struct iommu_ops intel_iommu_ops = {
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
-static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+static void quirk_iommu_igfx(struct pci_dev *dev)
{
- /* G4x/GM45 integrated gfx dmar support is totally busted. */
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+/* G4x/GM45 integrated gfx dmar support is totally busted. */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+
+/* Broadwell igfx malfunctions with dmar */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 780de0caafe8..9b159132405d 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
}
static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
- unsigned long address, unsigned long pages, int ih, int gl)
+ unsigned long address, unsigned long pages, int ih)
{
struct qi_desc desc;
- if (pages == -1) {
- /* For global kernel pages we have to flush them in *all* PASIDs
- * because that's the only option the hardware gives us. Despite
- * the fact that they are actually only accessible through one. */
- if (gl)
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
- QI_EIOTLB_TYPE;
- else
- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
- QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
- QI_EIOTLB_TYPE;
+ /*
+ * Do PASID granu IOTLB invalidation if page selective capability is
+ * not available.
+ */
+ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(address) |
- QI_EIOTLB_GL(gl) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
- unsigned long pages, int ih, int gl)
+ unsigned long pages, int ih)
{
struct intel_svm_dev *sdev;
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
+ intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
rcu_read_unlock();
}
@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
intel_flush_svm_range(svm, start,
- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
+ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
}
rcu_read_unlock();
@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
diff --git a/drivers/iommu/intel-trace.c b/drivers/iommu/intel-trace.c
new file mode 100644
index 000000000000..bfb6a6e37a88
--- /dev/null
+++ b/drivers/iommu/intel-trace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel IOMMU trace support
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_iommu.h>
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 4786ca061e31..81e43c1df7ec 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct set_msi_sid_data *data = opaque;
+ if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
+ data->busmatch_count++;
+
data->pdev = pdev;
data->alias = alias;
data->count++;
- if (PCI_BUS_NUM(alias) == pdev->bus->number)
- data->busmatch_count++;
-
return 0;
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 0fc8dfab2abf..4cb394937700 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -112,7 +112,9 @@
#define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
-#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
+/* MediaTek extend the two bits for PA 32bit/33bit */
+#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
+#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
/* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
@@ -169,18 +171,62 @@ struct arm_v7s_io_pgtable {
spinlock_t split_lock;
};
+static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
+
static dma_addr_t __arm_v7s_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
}
-static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
+static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
+{
+ return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
+ (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
+}
+
+static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
+
+ if (!arm_v7s_is_mtk_enabled(cfg))
+ return pte;
+
+ if (paddr & BIT_ULL(32))
+ pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
+ if (paddr & BIT_ULL(33))
+ pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
+ return pte;
+}
+
+static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
+ struct io_pgtable_cfg *cfg)
{
+ arm_v7s_iopte mask;
+ phys_addr_t paddr;
+
if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
- pte &= ARM_V7S_TABLE_MASK;
+ mask = ARM_V7S_TABLE_MASK;
+ else if (arm_v7s_pte_is_cont(pte, lvl))
+ mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES;
else
- pte &= ARM_V7S_LVL_MASK(lvl);
- return phys_to_virt(pte);
+ mask = ARM_V7S_LVL_MASK(lvl);
+
+ paddr = pte & mask;
+ if (!arm_v7s_is_mtk_enabled(cfg))
+ return paddr;
+
+ if (pte & ARM_V7S_ATTR_MTK_PA_BIT32)
+ paddr |= BIT_ULL(32);
+ if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
+ paddr |= BIT_ULL(33);
+ return paddr;
+}
+
+static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl,
+ struct arm_v7s_io_pgtable *data)
+{
+ return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
}
static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
@@ -295,9 +341,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
pte |= ARM_V7S_ATTR_NS_SECTION;
- if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
- pte |= ARM_V7S_ATTR_MTK_4GB;
-
return pte;
}
@@ -362,7 +405,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
return false;
}
-static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *,
+ struct iommu_iotlb_gather *, unsigned long,
size_t, int, arm_v7s_iopte *);
static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
@@ -383,7 +427,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
- if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
+ if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz))
return -EINVAL;
} else if (ptep[i]) {
@@ -396,7 +440,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
if (num_entries > 1)
pte = arm_v7s_pte_to_cont(pte, lvl);
- pte |= paddr & ARM_V7S_LVL_MASK(lvl);
+ pte |= paddr_to_iopte(paddr, lvl, cfg);
__arm_v7s_set_pte(ptep, pte, num_entries, cfg);
return 0;
@@ -462,7 +506,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
}
if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
- cptep = iopte_deref(pte, lvl);
+ cptep = iopte_deref(pte, lvl, data);
} else if (pte) {
/* We require an unmap first */
WARN_ON(!selftest_running);
@@ -484,7 +528,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
+ paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
@@ -493,9 +538,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
* a chance for anything to kick off a table walk for the new iova.
*/
if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
- io_pgtable_tlb_add_flush(iop, iova, size,
- ARM_V7S_BLOCK_SIZE(2), false);
- io_pgtable_tlb_sync(iop);
+ io_pgtable_tlb_flush_walk(iop, iova, size,
+ ARM_V7S_BLOCK_SIZE(2));
} else {
wmb();
}
@@ -512,7 +556,8 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
arm_v7s_iopte pte = data->pgd[i];
if (ARM_V7S_PTE_IS_TABLE(pte, 1))
- __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
+ __arm_v7s_free_table(iopte_deref(pte, 1, data),
+ 2, data);
}
__arm_v7s_free_table(data->pgd, 1, data);
kmem_cache_destroy(data->l2_tables);
@@ -541,12 +586,12 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
size *= ARM_V7S_CONT_PAGES;
- io_pgtable_tlb_add_flush(iop, iova, size, size, true);
- io_pgtable_tlb_sync(iop);
+ io_pgtable_tlb_flush_leaf(iop, iova, size, size);
return pte;
}
static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+ struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size,
arm_v7s_iopte blk_pte,
arm_v7s_iopte *ptep)
@@ -582,16 +627,16 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
return 0;
- tablep = iopte_deref(pte, 1);
- return __arm_v7s_unmap(data, iova, size, 2, tablep);
+ tablep = iopte_deref(pte, 1, data);
+ return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
}
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
- io_pgtable_tlb_sync(&data->iop);
+ io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
return size;
}
static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+ struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl,
arm_v7s_iopte *ptep)
{
@@ -638,10 +683,9 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
for (i = 0; i < num_entries; i++) {
if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
/* Also flush any partial walks */
- io_pgtable_tlb_add_flush(iop, iova, blk_size,
- ARM_V7S_BLOCK_SIZE(lvl + 1), false);
- io_pgtable_tlb_sync(iop);
- ptep = iopte_deref(pte[i], lvl);
+ io_pgtable_tlb_flush_walk(iop, iova, blk_size,
+ ARM_V7S_BLOCK_SIZE(lvl + 1));
+ ptep = iopte_deref(pte[i], lvl, data);
__arm_v7s_free_table(ptep, lvl + 1, data);
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
/*
@@ -651,8 +695,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
*/
smp_wmb();
} else {
- io_pgtable_tlb_add_flush(iop, iova, blk_size,
- blk_size, true);
+ io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
}
iova += blk_size;
}
@@ -662,23 +705,24 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
*/
- return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep);
+ return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
+ ptep);
}
/* Keep on walkin' */
- ptep = iopte_deref(pte[0], lvl);
- return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+ ptep = iopte_deref(pte[0], lvl, data);
+ return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
}
static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
if (WARN_ON(upper_32_bits(iova)))
return 0;
- return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+ return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
}
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
@@ -692,7 +736,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
do {
ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
pte = READ_ONCE(*ptep);
- ptep = iopte_deref(pte, lvl);
+ ptep = iopte_deref(pte, lvl, data);
} while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
if (!ARM_V7S_PTE_IS_VALID(pte))
@@ -701,7 +745,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
mask = ARM_V7S_LVL_MASK(lvl);
if (arm_v7s_pte_is_cont(pte, lvl))
mask *= ARM_V7S_CONT_PAGES;
- return (pte & mask) | (iova & ~mask);
+ return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
}
static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
@@ -709,18 +753,21 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
{
struct arm_v7s_io_pgtable *data;
- if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+ if (cfg->ias > ARM_V7S_ADDR_BITS)
+ return NULL;
+
+ if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
return NULL;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
- IO_PGTABLE_QUIRK_ARM_MTK_4GB |
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
- if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT &&
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL;
@@ -806,22 +853,24 @@ static void dummy_tlb_flush_all(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_sync(void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie)
{
- WARN_ON(cookie != cfg_cookie);
+ dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_gather_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops = {
.tlb_flush_all = dummy_tlb_flush_all,
- .tlb_add_flush = dummy_tlb_add_flush,
- .tlb_sync = dummy_tlb_sync,
+ .tlb_flush_walk = dummy_tlb_flush,
+ .tlb_flush_leaf = dummy_tlb_flush,
+ .tlb_add_page = dummy_tlb_add_page,
};
#define __FAIL(ops) ({ \
@@ -896,7 +945,7 @@ static int __init arm_v7s_do_selftests(void)
size = 1UL << __ffs(cfg.pgsize_bitmap);
while (i < loopnr) {
iova_start = i * SZ_16M;
- if (ops->unmap(ops, iova_start + size, size) != size)
+ if (ops->unmap(ops, iova_start + size, size, NULL) != size)
return __FAIL(ops);
/* Remap of partial unmap */
@@ -914,7 +963,7 @@ static int __init arm_v7s_do_selftests(void)
for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << i;
- if (ops->unmap(ops, iova, size) != size)
+ if (ops->unmap(ops, iova, size, NULL) != size)
return __FAIL(ops);
if (ops->iova_to_phys(ops, iova + 42))
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 161a7d56264d..4c91359057c5 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -12,7 +12,6 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -290,6 +289,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep);
@@ -335,8 +335,10 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
- if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+ if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
+ WARN_ON(1);
return -EINVAL;
+ }
}
__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
@@ -537,6 +539,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
}
static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size,
arm_lpae_iopte blk_pte, int lvl,
arm_lpae_iopte *ptep)
@@ -582,15 +585,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
tablep = iopte_deref(pte, data);
} else if (unmap_idx >= 0) {
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
- io_pgtable_tlb_sync(&data->iop);
+ io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
return size;
}
- return __arm_lpae_unmap(data, iova, size, lvl, tablep);
+ return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep)
{
@@ -612,9 +615,8 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
- io_pgtable_tlb_add_flush(iop, iova, size,
- ARM_LPAE_GRANULE(data), false);
- io_pgtable_tlb_sync(iop);
+ io_pgtable_tlb_flush_walk(iop, iova, size,
+ ARM_LPAE_GRANULE(data));
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
@@ -625,7 +627,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
*/
smp_wmb();
} else {
- io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+ io_pgtable_tlb_add_page(iop, gather, iova, size);
}
return size;
@@ -634,17 +636,17 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
*/
- return arm_lpae_split_blk_unmap(data, iova, size, pte,
+ return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
lvl + 1, ptep);
}
/* Keep on walkin' */
ptep = iopte_deref(pte, data);
- return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
}
static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
@@ -653,7 +655,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
- return __arm_lpae_unmap(data, iova, size, lvl, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -1070,22 +1072,24 @@ static void dummy_tlb_flush_all(void *cookie)
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
+ void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_sync(void *cookie)
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie)
{
- WARN_ON(cookie != cfg_cookie);
+ dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
- .tlb_add_flush = dummy_tlb_add_flush,
- .tlb_sync = dummy_tlb_sync,
+ .tlb_flush_walk = dummy_tlb_flush,
+ .tlb_flush_leaf = dummy_tlb_flush,
+ .tlb_add_page = dummy_tlb_add_page,
};
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
@@ -1168,7 +1172,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
/* Partial unmap */
size = 1UL << __ffs(cfg->pgsize_bitmap);
- if (ops->unmap(ops, SZ_1G + size, size) != size)
+ if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
return __FAIL(ops, i);
/* Remap of partial unmap */
@@ -1183,7 +1187,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
size = 1UL << j;
- if (ops->unmap(ops, iova, size) != size)
+ if (ops->unmap(ops, iova, size, NULL) != size)
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42))
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0c674d80c37f..d658c7c6a2ab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -26,12 +26,10 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
-#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
-#else
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
-#endif
+
+static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = true;
+static u32 iommu_cmd_line __read_mostly;
struct iommu_group {
struct kobject kobj;
@@ -68,6 +66,18 @@ static const char * const iommu_group_resv_type_string[] = {
[IOMMU_RESV_SW_MSI] = "msi",
};
+#define IOMMU_CMD_LINE_DMA_API BIT(0)
+
+static void iommu_set_cmd_line_dma_api(void)
+{
+ iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
+}
+
+static bool iommu_cmd_line_dma_api(void)
+{
+ return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
+}
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -80,12 +90,55 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
+/*
+ * Use a function instead of an array here because the domain-type is a
+ * bit-field, so an array would waste memory.
+ */
+static const char *iommu_domain_type_str(unsigned int t)
+{
+ switch (t) {
+ case IOMMU_DOMAIN_BLOCKED:
+ return "Blocked";
+ case IOMMU_DOMAIN_IDENTITY:
+ return "Passthrough";
+ case IOMMU_DOMAIN_UNMANAGED:
+ return "Unmanaged";
+ case IOMMU_DOMAIN_DMA:
+ return "Translated";
+ default:
+ return "Unknown";
+ }
+}
+
+static int __init iommu_subsys_init(void)
+{
+ bool cmd_line = iommu_cmd_line_dma_api();
+
+ if (!cmd_line) {
+ if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
+ iommu_set_default_passthrough(false);
+ else
+ iommu_set_default_translated(false);
+
+ if (iommu_default_passthrough() && mem_encrypt_active()) {
+ pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
+ iommu_set_default_translated(false);
+ }
+ }
+
+ pr_info("Default domain type: %s %s\n",
+ iommu_domain_type_str(iommu_def_domain_type),
+ cmd_line ? "(set via kernel command line)" : "");
+
+ return 0;
+}
+subsys_initcall(iommu_subsys_init);
+
int iommu_device_register(struct iommu_device *iommu)
{
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
-
return 0;
}
@@ -165,7 +218,11 @@ static int __init iommu_set_def_domain_type(char *str)
if (ret)
return ret;
- iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+ if (pt)
+ iommu_set_default_passthrough(true);
+ else
+ iommu_set_default_translated(true);
+
return 0;
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
@@ -229,60 +286,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
* @new: new region to insert
* @regions: list of regions
*
- * The new element is sorted by address with respect to the other
- * regions of the same type. In case it overlaps with another
- * region of the same type, regions are merged. In case it
- * overlaps with another region of different type, regions are
- * not merged.
+ * Elements are sorted by start address and overlapping segments
+ * of the same type are merged.
*/
-static int iommu_insert_resv_region(struct iommu_resv_region *new,
- struct list_head *regions)
+int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
{
- struct iommu_resv_region *region;
- phys_addr_t start = new->start;
- phys_addr_t end = new->start + new->length - 1;
- struct list_head *pos = regions->next;
-
- while (pos != regions) {
- struct iommu_resv_region *entry =
- list_entry(pos, struct iommu_resv_region, list);
- phys_addr_t a = entry->start;
- phys_addr_t b = entry->start + entry->length - 1;
- int type = entry->type;
-
- if (end < a) {
- goto insert;
- } else if (start > b) {
- pos = pos->next;
- } else if ((start >= a) && (end <= b)) {
- if (new->type == type)
- return 0;
- else
- pos = pos->next;
+ struct iommu_resv_region *iter, *tmp, *nr, *top;
+ LIST_HEAD(stack);
+
+ nr = iommu_alloc_resv_region(new->start, new->length,
+ new->prot, new->type);
+ if (!nr)
+ return -ENOMEM;
+
+ /* First add the new element based on start address sorting */
+ list_for_each_entry(iter, regions, list) {
+ if (nr->start < iter->start ||
+ (nr->start == iter->start && nr->type <= iter->type))
+ break;
+ }
+ list_add_tail(&nr->list, &iter->list);
+
+ /* Merge overlapping segments of type nr->type in @regions, if any */
+ list_for_each_entry_safe(iter, tmp, regions, list) {
+ phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
+
+ /* no merge needed on elements of different types than @nr */
+ if (iter->type != nr->type) {
+ list_move_tail(&iter->list, &stack);
+ continue;
+ }
+
+ /* look for the last stack element of same type as @iter */
+ list_for_each_entry_reverse(top, &stack, list)
+ if (top->type == iter->type)
+ goto check_overlap;
+
+ list_move_tail(&iter->list, &stack);
+ continue;
+
+check_overlap:
+ top_end = top->start + top->length - 1;
+
+ if (iter->start > top_end + 1) {
+ list_move_tail(&iter->list, &stack);
} else {
- if (new->type == type) {
- phys_addr_t new_start = min(a, start);
- phys_addr_t new_end = max(b, end);
- int ret;
-
- list_del(&entry->list);
- entry->start = new_start;
- entry->length = new_end - new_start + 1;
- ret = iommu_insert_resv_region(entry, regions);
- kfree(entry);
- return ret;
- } else {
- pos = pos->next;
- }
+ top->length = max(top_end, iter_end) - top->start + 1;
+ list_del(&iter->list);
+ kfree(iter);
}
}
-insert:
- region = iommu_alloc_resv_region(new->start, new->length,
- new->prot, new->type);
- if (!region)
- return -ENOMEM;
-
- list_add_tail(&region->list, pos);
+ list_splice(&stack, regions);
return 0;
}
@@ -1862,7 +1917,7 @@ EXPORT_SYMBOL_GPL(iommu_map);
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
- bool sync)
+ struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
@@ -1899,13 +1954,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
while (unmapped < size) {
size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
- unmapped_page = ops->unmap(domain, iova, pgsize);
+ unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
if (!unmapped_page)
break;
- if (sync && ops->iotlb_range_add)
- ops->iotlb_range_add(domain, iova, pgsize);
-
pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
iova, unmapped_page);
@@ -1913,9 +1965,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unmapped += unmapped_page;
}
- if (sync && ops->iotlb_sync)
- ops->iotlb_sync(domain);
-
trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
@@ -1923,14 +1972,22 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
- return __iommu_unmap(domain, iova, size, true);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t ret;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
+ iommu_tlb_sync(domain, &iotlb_gather);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
size_t iommu_unmap_fast(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- return __iommu_unmap(domain, iova, size, false);
+ return __iommu_unmap(domain, iova, size, iotlb_gather);
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
@@ -2143,7 +2200,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
mutex_lock(&group->mutex);
- /* Check if the default domain is already direct mapped */
ret = 0;
if (group->default_domain && group->default_domain->type == type)
goto out;
@@ -2153,7 +2209,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
if (iommu_group_device_count(group) != 1)
goto out;
- /* Allocate a direct mapped domain */
ret = -ENOMEM;
domain = __iommu_domain_alloc(dev->bus, type);
if (!domain)
@@ -2168,7 +2223,7 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
iommu_group_create_direct_mappings(group, dev);
- /* Make the direct mapped domain the default for this group */
+ /* Make the domain the default for this group */
if (group->default_domain)
iommu_domain_free(group->default_domain);
group->default_domain = domain;
@@ -2196,6 +2251,28 @@ int iommu_request_dma_domain_for_dev(struct device *dev)
return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
}
+void iommu_set_default_passthrough(bool cmd_line)
+{
+ if (cmd_line)
+ iommu_set_cmd_line_dma_api();
+
+ iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+}
+
+void iommu_set_default_translated(bool cmd_line)
+{
+ if (cmd_line)
+ iommu_set_cmd_line_dma_api();
+
+ iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+}
+
+bool iommu_default_passthrough(void)
+{
+ return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
+}
+EXPORT_SYMBOL_GPL(iommu_default_passthrough);
+
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d499b2621239..41c605b0058f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
+bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return !!iovad->fq;
+}
+
static void free_iova_flush_queue(struct iova_domain *iovad)
{
- if (!iovad->fq)
+ if (!has_iova_flush_queue(iovad))
return;
if (timer_pending(&iovad->fq_timer))
@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
+ struct iova_fq __percpu *queue;
int cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
- iovad->fq = alloc_percpu(struct iova_fq);
- if (!iovad->fq)
+ queue = alloc_percpu(struct iova_fq);
+ if (!queue)
return -ENOMEM;
iovad->flush_cb = flush_cb;
@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
- fq = per_cpu_ptr(iovad->fq, cpu);
+ fq = per_cpu_ptr(queue, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
}
+ smp_wmb();
+
+ iovad->fq = queue;
+
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
struct iova *cached_iova;
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
- if (free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo) {
+ if (free == cached_iova ||
+ (free->pfn_hi < iovad->dma_32bit_pfn &&
+ free->pfn_lo >= cached_iova->pfn_lo)) {
iovad->cached32_node = rb_next(&free->node);
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
@@ -566,7 +577,9 @@ void queue_iova(struct iova_domain *iovad,
spin_unlock_irqrestore(&fq->lock, flags);
- if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+ /* Avoid false sharing as much as possible. */
+ if (!atomic_read(&iovad->fq_timer_on) &&
+ !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ad0098c0c87c..9da8309f7170 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -49,6 +49,7 @@ struct ipmmu_features {
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
bool reserved_context;
+ bool cache_snoop;
};
struct ipmmu_vmsa_device {
@@ -115,45 +116,44 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMTTBCR 0x0008
#define IMTTBCR_EAE (1 << 31)
#define IMTTBCR_PMB (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
-#define IMTTBCR_SH1_MASK (3 << 28)
-#define IMTTBCR_ORGN1_NC (0 << 26)
-#define IMTTBCR_ORGN1_WB_WA (1 << 26)
-#define IMTTBCR_ORGN1_WT (2 << 26)
-#define IMTTBCR_ORGN1_WB (3 << 26)
-#define IMTTBCR_ORGN1_MASK (3 << 26)
-#define IMTTBCR_IRGN1_NC (0 << 24)
-#define IMTTBCR_IRGN1_WB_WA (1 << 24)
-#define IMTTBCR_IRGN1_WT (2 << 24)
-#define IMTTBCR_IRGN1_WB (3 << 24)
-#define IMTTBCR_IRGN1_MASK (3 << 24)
+#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
+#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
+#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
+#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
#define IMTTBCR_TSZ1_MASK (7 << 16)
#define IMTTBCR_TSZ1_SHIFT 16
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
-#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
-#define IMTTBCR_SH0_MASK (3 << 12)
-#define IMTTBCR_ORGN0_NC (0 << 10)
-#define IMTTBCR_ORGN0_WB_WA (1 << 10)
-#define IMTTBCR_ORGN0_WT (2 << 10)
-#define IMTTBCR_ORGN0_WB (3 << 10)
-#define IMTTBCR_ORGN0_MASK (3 << 10)
-#define IMTTBCR_IRGN0_NC (0 << 8)
-#define IMTTBCR_IRGN0_WB_WA (1 << 8)
-#define IMTTBCR_IRGN0_WT (2 << 8)
-#define IMTTBCR_IRGN0_WB (3 << 8)
-#define IMTTBCR_IRGN0_MASK (3 << 8)
+#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
+#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
+#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
+#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
+#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
+#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
+#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_LVL_2 (0 << 4)
#define IMTTBCR_SL0_LVL_1 (1 << 4)
#define IMTTBCR_TSZ0_MASK (7 << 0)
#define IMTTBCR_TSZ0_SHIFT O
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
-#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
-
#define IMBUSCR 0x000c
#define IMBUSCR_DVM (1 << 2)
#define IMBUSCR_BUSSEL_SYS (0 << 0)
@@ -361,16 +361,16 @@ static void ipmmu_tlb_flush_all(void *cookie)
ipmmu_tlb_invalidate(domain);
}
-static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void ipmmu_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- /* The hardware doesn't support selective TLB flush. */
+ ipmmu_tlb_flush_all(cookie);
}
-static const struct iommu_gather_ops ipmmu_gather_ops = {
+static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
- .tlb_add_flush = ipmmu_tlb_add_flush,
- .tlb_sync = ipmmu_tlb_flush_all,
+ .tlb_flush_walk = ipmmu_tlb_flush,
+ .tlb_flush_leaf = ipmmu_tlb_flush,
};
/* -----------------------------------------------------------------------------
@@ -422,17 +422,19 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
/*
* TTBCR
- * We use long descriptors with inner-shareable WBWA tables and allocate
- * the whole 32-bit VA space to TTBR0.
+ * We use long descriptors and allocate the whole 32-bit VA space to
+ * TTBR0.
*/
if (domain->mmu->features->twobit_imttbcr_sl0)
tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
else
tmp = IMTTBCR_SL0_LVL_1;
- ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
- IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
- IMTTBCR_IRGN0_WB_WA | tmp);
+ if (domain->mmu->features->cache_snoop)
+ tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+ IMTTBCR_IRGN0_WB_WA;
+
+ ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
@@ -480,7 +482,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
- domain->cfg.tlb = &ipmmu_gather_ops;
+ domain->cfg.tlb = &ipmmu_flush_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true;
/*
@@ -733,14 +735,14 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- return domain->iop->unmap(domain->iop, iova, size);
+ return domain->iop->unmap(domain->iop, iova, size, gather);
}
-static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -748,6 +750,12 @@ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
ipmmu_tlb_flush_all(domain);
}
+static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
+ struct iommu_iotlb_gather *gather)
+{
+ ipmmu_flush_iotlb_all(io_domain);
+}
+
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
@@ -957,7 +965,7 @@ static const struct iommu_ops ipmmu_ops = {
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
- .flush_iotlb_all = ipmmu_iotlb_sync,
+ .flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
@@ -988,6 +996,7 @@ static const struct ipmmu_features ipmmu_features_default = {
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
+ .cache_snoop = true,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -998,6 +1007,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
+ .cache_snoop = false,
};
static const struct of_device_id ipmmu_of_ids[] = {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b25e2eb9e038..be99d408cf35 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -168,20 +168,29 @@ fail:
return;
}
-static void __flush_iotlb_sync(void *cookie)
+static void __flush_iotlb_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- /*
- * Nothing is needed here, the barrier to guarantee
- * completion of the tlb sync operation is implicitly
- * taken care when the iommu client does a writel before
- * kick starting the other master.
- */
+ __flush_iotlb_range(iova, size, granule, false, cookie);
+}
+
+static void __flush_iotlb_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ __flush_iotlb_range(iova, size, granule, true, cookie);
}
-static const struct iommu_gather_ops msm_iommu_gather_ops = {
+static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie)
+{
+ __flush_iotlb_range(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb,
- .tlb_add_flush = __flush_iotlb_range,
- .tlb_sync = __flush_iotlb_sync,
+ .tlb_flush_walk = __flush_iotlb_walk,
+ .tlb_flush_leaf = __flush_iotlb_leaf,
+ .tlb_add_page = __flush_iotlb_page,
};
static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
@@ -345,7 +354,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
.ias = 32,
.oas = 32,
- .tlb = &msm_iommu_gather_ops,
+ .tlb = &msm_iommu_flush_ops,
.iommu_dev = priv->dev,
};
@@ -509,13 +518,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t len)
+ size_t len, struct iommu_iotlb_gather *gather)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
spin_lock_irqsave(&priv->pgtlock, flags);
- len = priv->iop->unmap(priv->iop, iova, len);
+ len = priv->iop->unmap(priv->iop, iova, len, gather);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return len;
@@ -691,6 +700,13 @@ static struct iommu_ops msm_iommu_ops = {
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+ .iotlb_sync = NULL,
.iova_to_phys = msm_iommu_iova_to_phys,
.add_device = msm_iommu_add_device,
.remove_device = msm_iommu_remove_device,
@@ -750,7 +766,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
- dev_err(iommu->dev, "could not get iommu irq\n");
ret = -ENODEV;
goto fail;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 82e4be4dfdaf..67a483c1a935 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -28,6 +28,7 @@
#include "mtk_iommu.h"
#define REG_MMU_PT_BASE_ADDR 0x000
+#define MMU_PT_ADDR_MASK GENMASK(31, 7)
#define REG_MMU_INVALIDATE 0x020
#define F_ALL_INVLD 0x2
@@ -44,12 +45,9 @@
#define REG_MMU_DCM_DIS 0x050
#define REG_MMU_CTRL_REG 0x110
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
-#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
- ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
-/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
-#define F_MMU_TF_PROTECT_SEL(prot, data) \
- (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
+#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
#define REG_MMU_IVRP_PADDR 0x114
@@ -66,26 +64,32 @@
#define F_INT_CLR_BIT BIT(12)
#define REG_MMU_INT_MAIN_CONTROL 0x124
-#define F_INT_TRANSLATION_FAULT BIT(0)
-#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
-#define F_INT_INVALID_PA_FAULT BIT(2)
-#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
-#define F_INT_TLB_MISS_FAULT BIT(4)
-#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
-#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
+ /* mmu0 | mmu1 */
+#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
+#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
+#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
+#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
+#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
#define REG_MMU_CPE_DONE 0x12C
#define REG_MMU_FAULT_ST1 0x134
+#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
+#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
-#define REG_MMU_FAULT_VA 0x13c
+#define REG_MMU0_FAULT_VA 0x13c
#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
-#define REG_MMU_INVLD_PA 0x140
-#define REG_MMU_INT_ID 0x150
-#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
-#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
+#define REG_MMU0_INVLD_PA 0x140
+#define REG_MMU1_FAULT_VA 0x144
+#define REG_MMU1_INVLD_PA 0x148
+#define REG_MMU0_INT_ID 0x150
+#define REG_MMU1_INT_ID 0x154
+#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
+#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
#define MTK_PROTECT_PA_ALIGN 128
@@ -107,6 +111,30 @@ struct mtk_iommu_domain {
static const struct iommu_ops mtk_iommu_ops;
+/*
+ * In M4U 4GB mode, the physical address is remapped as below:
+ *
+ * CPU Physical address:
+ * ====================
+ *
+ * 0 1G 2G 3G 4G 5G
+ * |---A---|---B---|---C---|---D---|---E---|
+ * +--I/O--+------------Memory-------------+
+ *
+ * IOMMU output physical address:
+ * =============================
+ *
+ * 4G 5G 6G 7G 8G
+ * |---E---|---B---|---C---|---D---|
+ * +------------Memory-------------+
+ *
+ * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
+ * bit32 of the CPU physical address always is needed to set, and for Region
+ * 'E', the CPU physical address keep as is.
+ * Additionally, The iommu consumers always use the CPU phyiscal address.
+ */
+#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
+
static LIST_HEAD(m4ulist); /* List all the M4U HWs */
#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
@@ -188,10 +216,32 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
}
-static const struct iommu_gather_ops mtk_iommu_gather_ops = {
+static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
+ mtk_iommu_tlb_sync(cookie);
+}
+
+static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
+ mtk_iommu_tlb_sync(cookie);
+}
+
+static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
- .tlb_sync = mtk_iommu_tlb_sync,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+ .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
@@ -204,13 +254,21 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
/* Read error info from registers */
int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
- fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+ if (int_state & F_REG_MMU0_FAULT_MASK) {
+ regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
+ fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
+ fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
+ } else {
+ regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
+ fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
+ fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
+ }
layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
- fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
- regval = readl_relaxed(data->base + REG_MMU_INT_ID);
- fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
- fault_port = F_MMU0_INT_ID_PORT_ID(regval);
+ fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+ fault_port = F_MMU_INT_ID_PORT_ID(regval);
+
+ fault_larb = data->plat_data->larbid_remap[fault_larb];
if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -242,7 +300,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
- larb_mmu = &data->smi_imu.larb_imu[larbid];
+ larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
enable ? "enable" : "disable", portid);
@@ -263,17 +321,15 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP |
+ IO_PGTABLE_QUIRK_ARM_MTK_EXT,
.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
.ias = 32,
- .oas = 32,
- .tlb = &mtk_iommu_gather_ops,
+ .oas = 34,
+ .tlb = &mtk_iommu_flush_ops,
.iommu_dev = data->dev,
};
- if (data->enable_4GB)
- dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
-
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
if (!dom->iop) {
dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -336,7 +392,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
/* Update the pgtable base address register of the M4U HW */
if (!data->m4u_dom) {
data->m4u_dom = dom;
- writel(dom->cfg.arm_v7s_cfg.ttbr[0],
+ writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
data->base + REG_MMU_PT_BASE_ADDR);
}
@@ -359,32 +415,43 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
unsigned long flags;
int ret;
+ /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
+ if (data->enable_4GB)
+ paddr |= BIT_ULL(32);
+
spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
- size, prot);
+ ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return ret;
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned long flags;
size_t unmapsz;
spin_lock_irqsave(&dom->pgtlock, flags);
- unmapsz = dom->iop->unmap(dom->iop, iova, size);
+ unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return unmapsz;
}
-static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
+static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+}
+
+static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
}
@@ -401,8 +468,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
pa = dom->iop->iova_to_phys(dom->iop, iova);
spin_unlock_irqrestore(&dom->pgtlock, flags);
- if (data->enable_4GB)
- pa |= BIT_ULL(32);
+ if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+ pa &= ~BIT_ULL(32);
return pa;
}
@@ -490,7 +557,7 @@ static const struct iommu_ops mtk_iommu_ops = {
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
- .flush_iotlb_all = mtk_iommu_iotlb_sync,
+ .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
@@ -511,9 +578,11 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return ret;
}
- regval = F_MMU_TF_PROTECT_SEL(2, data);
- if (data->m4u_plat == M4U_MT8173)
- regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
+ if (data->plat_data->m4u_plat == M4U_MT8173)
+ regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+ F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
+ else
+ regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
regval = F_L2_MULIT_HIT_EN |
@@ -533,14 +602,14 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
- if (data->m4u_plat == M4U_MT8173)
+ if (data->plat_data->m4u_plat == M4U_MT8173)
regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
else
regval = lower_32_bits(data->protect_base) |
upper_32_bits(data->protect_base);
writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
- if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
+ if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
/*
* If 4GB mode is enabled, the validate PA range is from
* 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -550,8 +619,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
}
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
- /* It's MISC control register whose default value is ok except mt8173.*/
- if (data->m4u_plat == M4U_MT8173)
+ if (data->plat_data->reset_axi)
writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
@@ -584,7 +652,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
data->dev = dev;
- data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
+ data->plat_data = of_device_get_match_data(dev);
/* Protect memory. HW will access here while translation fault.*/
protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
@@ -594,6 +662,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
+ if (!data->plat_data->has_4gb_mode)
+ data->enable_4GB = false;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res);
@@ -605,15 +675,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (data->irq < 0)
return data->irq;
- data->bclk = devm_clk_get(dev, "bclk");
- if (IS_ERR(data->bclk))
- return PTR_ERR(data->bclk);
+ if (data->plat_data->has_bclk) {
+ data->bclk = devm_clk_get(dev, "bclk");
+ if (IS_ERR(data->bclk))
+ return PTR_ERR(data->bclk);
+ }
larb_nr = of_count_phandle_with_args(dev->of_node,
"mediatek,larbs", NULL);
if (larb_nr < 0)
return larb_nr;
- data->smi_imu.larb_nr = larb_nr;
for (i = 0; i < larb_nr; i++) {
struct device_node *larbnode;
@@ -638,7 +709,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
of_node_put(larbnode);
return -EPROBE_DEFER;
}
- data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
+ data->larb_imu[id].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
compare_of, larbnode);
@@ -699,6 +770,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+ reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
clk_disable_unprepare(data->bclk);
return 0;
}
@@ -707,6 +779,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
struct mtk_iommu_suspend_reg *reg = &data->reg;
+ struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
void __iomem *base = data->base;
int ret;
@@ -722,8 +795,9 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
- if (data->m4u_dom)
- writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
+ if (m4u_dom)
+ writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
base + REG_MMU_PT_BASE_ADDR);
return 0;
}
@@ -732,9 +806,32 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
};
+static const struct mtk_iommu_plat_data mt2712_data = {
+ .m4u_plat = M4U_MT2712,
+ .has_4gb_mode = true,
+ .has_bclk = true,
+ .has_vld_pa_rng = true,
+ .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+};
+
+static const struct mtk_iommu_plat_data mt8173_data = {
+ .m4u_plat = M4U_MT8173,
+ .has_4gb_mode = true,
+ .has_bclk = true,
+ .reset_axi = true,
+ .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+};
+
+static const struct mtk_iommu_plat_data mt8183_data = {
+ .m4u_plat = M4U_MT8183,
+ .reset_axi = true,
+ .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+};
+
static const struct of_device_id mtk_iommu_of_ids[] = {
- { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
- { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
+ { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+ { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
+ { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}
};
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 59337323db58..fc0f16eabacd 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -24,12 +24,25 @@ struct mtk_iommu_suspend_reg {
u32 int_control0;
u32 int_main_control;
u32 ivrp_paddr;
+ u32 vld_pa_rng;
};
enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT8173,
+ M4U_MT8183,
+};
+
+struct mtk_iommu_plat_data {
+ enum mtk_iommu_plat m4u_plat;
+ bool has_4gb_mode;
+
+ /* HW will use the EMI clock if there isn't the "bclk". */
+ bool has_bclk;
+ bool has_vld_pa_rng;
+ bool reset_axi;
+ unsigned char larbid_remap[MTK_LARB_NR_MAX];
};
struct mtk_iommu_domain;
@@ -43,14 +56,14 @@ struct mtk_iommu_data {
struct mtk_iommu_suspend_reg reg;
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
- struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
bool tlb_flush_active;
struct iommu_device iommu;
- enum mtk_iommu_plat m4u_plat;
+ const struct mtk_iommu_plat_data *plat_data;
struct list_head list;
+ struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
static inline int compare_of(struct device *dev, void *data)
@@ -67,14 +80,14 @@ static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
- return component_bind_all(dev, &data->smi_imu);
+ return component_bind_all(dev, &data->larb_imu);
}
static inline void mtk_iommu_unbind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
- component_unbind_all(dev, &data->smi_imu);
+ component_unbind_all(dev, &data->larb_imu);
}
#endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index abeeac488372..210b1c7c0bda 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -206,7 +206,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
for (i = 0; i < fwspec->num_ids; ++i) {
larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
portid = mt2701_m4u_to_port(fwspec->ids[i]);
- larb_mmu = &data->smi_imu.larb_imu[larbid];
+ larb_mmu = &data->larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
enable ? "enable" : "disable", portid);
@@ -324,7 +324,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned long flags;
@@ -610,14 +611,12 @@ static int mtk_iommu_probe(struct platform_device *pdev)
}
}
- data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
+ data->larb_imu[larb_nr].dev = &plarbdev->dev;
component_match_add_release(dev, &match, release_of,
compare_of, larb_spec.np);
larb_nr++;
}
- data->smi_imu.larb_nr = larb_nr;
-
platform_set_drvdata(pdev, data);
ret = mtk_iommu_hw_init(data);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index dfb961d8c21b..09c6e1c680db 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -35,6 +35,15 @@
static const struct iommu_ops omap_iommu_ops;
+struct orphan_dev {
+ struct device *dev;
+ struct list_head node;
+};
+
+static LIST_HEAD(orphan_dev_list);
+
+static DEFINE_SPINLOCK(orphan_lock);
+
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
@@ -53,6 +62,8 @@ static const struct iommu_ops omap_iommu_ops;
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
+static int _omap_iommu_add_device(struct device *dev);
+
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
@@ -65,6 +76,9 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
+ *
+ * This should be treated as an deprecated API. It is preserved only
+ * to maintain existing functionality for OMAP3 ISP driver.
**/
void omap_iommu_save_ctx(struct device *dev)
{
@@ -92,6 +106,9 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
* @dev: client device
+ *
+ * This should be treated as an deprecated API. It is preserved only
+ * to maintain existing functionality for OMAP3 ISP driver.
**/
void omap_iommu_restore_ctx(struct device *dev)
{
@@ -186,36 +203,18 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
static int iommu_enable(struct omap_iommu *obj)
{
- int err;
- struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
- if (pdata && pdata->deassert_reset) {
- err = pdata->deassert_reset(pdev, pdata->reset_name);
- if (err) {
- dev_err(obj->dev, "deassert_reset failed: %d\n", err);
- return err;
- }
- }
-
- pm_runtime_get_sync(obj->dev);
+ int ret;
- err = omap2_iommu_enable(obj);
+ ret = pm_runtime_get_sync(obj->dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(obj->dev);
- return err;
+ return ret < 0 ? ret : 0;
}
static void iommu_disable(struct omap_iommu *obj)
{
- struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
- omap2_iommu_disable(obj);
-
pm_runtime_put_sync(obj->dev);
-
- if (pdata && pdata->assert_reset)
- pdata->assert_reset(pdev, pdata->reset_name);
}
/*
@@ -901,15 +900,219 @@ static void omap_iommu_detach(struct omap_iommu *obj)
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
- iommu_disable(obj);
obj->pd_dma = 0;
obj->iopgd = NULL;
+ iommu_disable(obj);
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
+static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
+{
+ struct iotlb_lock lock;
+ struct cr_regs cr;
+ struct cr_regs *tmp;
+ int i;
+
+ /* check if there are any locked tlbs to save */
+ iotlb_lock_get(obj, &lock);
+ obj->num_cr_ctx = lock.base;
+ if (!obj->num_cr_ctx)
+ return;
+
+ tmp = obj->cr_ctx;
+ for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
+ * tmp++ = cr;
+}
+
+static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
+{
+ struct iotlb_lock l;
+ struct cr_regs *tmp;
+ int i;
+
+ /* no locked tlbs to restore */
+ if (!obj->num_cr_ctx)
+ return;
+
+ l.base = 0;
+ tmp = obj->cr_ctx;
+ for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
+ l.vict = i;
+ iotlb_lock_set(obj, &l);
+ iotlb_load_cr(obj, tmp);
+ }
+ l.base = obj->num_cr_ctx;
+ l.vict = i;
+ iotlb_lock_set(obj, &l);
+}
+
+/**
+ * omap_iommu_domain_deactivate - deactivate attached iommu devices
+ * @domain: iommu domain attached to the target iommu device
+ *
+ * This API allows the client devices of IOMMU devices to suspend
+ * the IOMMUs they control at runtime, after they are idled and
+ * suspended all activity. System Suspend will leverage the PM
+ * driver late callbacks.
+ **/
+int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+{
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
+ int i;
+
+ if (!omap_domain->dev)
+ return 0;
+
+ iommu = omap_domain->iommus;
+ iommu += (omap_domain->num_iommus - 1);
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
+ oiommu = iommu->iommu_dev;
+ pm_runtime_put_sync(oiommu->dev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
+
+/**
+ * omap_iommu_domain_activate - activate attached iommu devices
+ * @domain: iommu domain attached to the target iommu device
+ *
+ * This API allows the client devices of IOMMU devices to resume the
+ * IOMMUs they control at runtime, before they can resume operations.
+ * System Resume will leverage the PM driver late callbacks.
+ **/
+int omap_iommu_domain_activate(struct iommu_domain *domain)
+{
+ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
+ int i;
+
+ if (!omap_domain->dev)
+ return 0;
+
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ oiommu = iommu->iommu_dev;
+ pm_runtime_get_sync(oiommu->dev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
+
+/**
+ * omap_iommu_runtime_suspend - disable an iommu device
+ * @dev: iommu device
+ *
+ * This function performs all that is necessary to disable an
+ * IOMMU device, either during final detachment from a client
+ * device, or during system/runtime suspend of the device. This
+ * includes programming all the appropriate IOMMU registers, and
+ * managing the associated omap_hwmod's state and the device's
+ * reset line. This function also saves the context of any
+ * locked TLBs if suspending.
+ **/
+static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ struct omap_iommu *obj = to_iommu(dev);
+ int ret;
+
+ /* save the TLBs only during suspend, and not for power down */
+ if (obj->domain && obj->iopgd)
+ omap_iommu_save_tlb_entries(obj);
+
+ omap2_iommu_disable(obj);
+
+ if (pdata && pdata->device_idle)
+ pdata->device_idle(pdev);
+
+ if (pdata && pdata->assert_reset)
+ pdata->assert_reset(pdev, pdata->reset_name);
+
+ if (pdata && pdata->set_pwrdm_constraint) {
+ ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
+ if (ret) {
+ dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
+ ret);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * omap_iommu_runtime_resume - enable an iommu device
+ * @dev: iommu device
+ *
+ * This function performs all that is necessary to enable an
+ * IOMMU device, either during initial attachment to a client
+ * device, or during system/runtime resume of the device. This
+ * includes programming all the appropriate IOMMU registers, and
+ * managing the associated omap_hwmod's state and the device's
+ * reset line. The function also restores any locked TLBs if
+ * resuming after a suspend.
+ **/
+static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ struct omap_iommu *obj = to_iommu(dev);
+ int ret = 0;
+
+ if (pdata && pdata->set_pwrdm_constraint) {
+ ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
+ if (ret) {
+ dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
+ ret);
+ }
+ }
+
+ if (pdata && pdata->deassert_reset) {
+ ret = pdata->deassert_reset(pdev, pdata->reset_name);
+ if (ret) {
+ dev_err(dev, "deassert_reset failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pdata && pdata->device_enable)
+ pdata->device_enable(pdev);
+
+ /* restore the TLBs only during resume, and not for power up */
+ if (obj->domain)
+ omap_iommu_restore_tlb_entries(obj);
+
+ ret = omap2_iommu_enable(obj);
+
+ return ret;
+}
+
+/**
+ * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * @dev: iommu device
+ *
+ * This function performs the necessary checks to determine if the IOMMU
+ * device needs suspending or not. The function checks if the runtime_pm
+ * status of the device is suspended, and returns 1 in that case. This
+ * results in the PM core to skip invoking any of the Sleep PM callbacks
+ * (suspend, suspend_late, resume, resume_early etc).
+ */
+static int omap_iommu_prepare(struct device *dev)
+{
+ if (pm_runtime_status_suspended(dev))
+ return 1;
+ return 0;
+}
+
static bool omap_iommu_can_register(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -974,6 +1177,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
struct omap_iommu *obj;
struct resource *res;
struct device_node *of = pdev->dev.of_node;
+ struct orphan_dev *orphan_dev, *tmp;
if (!of) {
pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -984,6 +1188,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
if (!obj)
return -ENOMEM;
+ /*
+ * self-manage the ordering dependencies between omap_device_enable/idle
+ * and omap_device_assert/deassert_hardreset API
+ */
+ if (pdev->dev.pm_domain) {
+ dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
+ pdev->dev.pm_domain = NULL;
+ }
+
obj->name = dev_name(&pdev->dev);
obj->nr_tlb_entries = 32;
err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
@@ -996,6 +1209,11 @@ static int omap_iommu_probe(struct platform_device *pdev)
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
+ obj->cr_ctx = devm_kzalloc(&pdev->dev,
+ sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
+ GFP_KERNEL);
+ if (!obj->cr_ctx)
+ return -ENOMEM;
spin_lock_init(&obj->iommu_lock);
spin_lock_init(&obj->page_table_lock);
@@ -1036,13 +1254,20 @@ static int omap_iommu_probe(struct platform_device *pdev)
goto out_sysfs;
}
- pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
+ list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
+ err = _omap_iommu_add_device(orphan_dev->dev);
+ if (!err) {
+ list_del(&orphan_dev->node);
+ kfree(orphan_dev);
+ }
+ }
+
return 0;
out_sysfs:
@@ -1072,6 +1297,14 @@ static int omap_iommu_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops omap_iommu_pm_ops = {
+ .prepare = omap_iommu_prepare,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
+ omap_iommu_runtime_resume, NULL)
+};
+
static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" },
@@ -1085,6 +1318,7 @@ static struct platform_driver omap_iommu_driver = {
.remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
+ .pm = &omap_iommu_pm_ops,
.of_match_table = of_match_ptr(omap_iommu_of_match),
},
};
@@ -1149,7 +1383,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
@@ -1423,7 +1657,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static int omap_iommu_add_device(struct device *dev)
+static int _omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data, *tmp;
struct omap_iommu *oiommu;
@@ -1432,6 +1666,8 @@ static int omap_iommu_add_device(struct device *dev)
struct platform_device *pdev;
int num_iommus, i;
int ret;
+ struct orphan_dev *orphan_dev;
+ unsigned long flags;
/*
* Allocate the archdata iommu structure for DT-based devices.
@@ -1463,10 +1699,26 @@ static int omap_iommu_add_device(struct device *dev)
}
pdev = of_find_device_by_node(np);
- if (WARN_ON(!pdev)) {
+ if (!pdev) {
of_node_put(np);
kfree(arch_data);
- return -EINVAL;
+ spin_lock_irqsave(&orphan_lock, flags);
+ list_for_each_entry(orphan_dev, &orphan_dev_list,
+ node) {
+ if (orphan_dev->dev == dev)
+ break;
+ }
+ spin_unlock_irqrestore(&orphan_lock, flags);
+
+ if (orphan_dev && orphan_dev->dev == dev)
+ return -EPROBE_DEFER;
+
+ orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
+ orphan_dev->dev = dev;
+ spin_lock_irqsave(&orphan_lock, flags);
+ list_add(&orphan_dev->node, &orphan_dev_list);
+ spin_unlock_irqrestore(&orphan_lock, flags);
+ return -EPROBE_DEFER;
}
oiommu = platform_get_drvdata(pdev);
@@ -1477,6 +1729,7 @@ static int omap_iommu_add_device(struct device *dev)
}
tmp->iommu_dev = oiommu;
+ tmp->dev = &pdev->dev;
of_node_put(np);
}
@@ -1511,6 +1764,17 @@ static int omap_iommu_add_device(struct device *dev)
return 0;
}
+static int omap_iommu_add_device(struct device *dev)
+{
+ int ret;
+
+ ret = _omap_iommu_add_device(dev);
+ if (ret == -EPROBE_DEFER)
+ return 0;
+
+ return ret;
+}
+
static void omap_iommu_remove_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1554,7 +1818,7 @@ static const struct iommu_ops omap_iommu_ops = {
static int __init omap_iommu_init(void)
{
struct kmem_cache *p;
- const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
int ret;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 09968a02d291..18ee713ede78 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -73,16 +73,22 @@ struct omap_iommu {
void *ctx; /* iommu context: registres saved area */
+ struct cr_regs *cr_ctx;
+ u32 num_cr_ctx;
+
int has_bus_err_back;
u32 id;
struct iommu_device iommu;
struct iommu_group *group;
+
+ u8 pwrst;
};
/**
* struct omap_iommu_arch_data - omap iommu private data
- * @iommu_dev: handle of the iommu device
+ * @iommu_dev: handle of the OMAP iommu device
+ * @dev: handle of the iommu device
*
* This is an omap iommu private data object, which binds an iommu user
* to its iommu device. This object should be placed at the iommu user's
@@ -91,6 +97,7 @@ struct omap_iommu {
*/
struct omap_iommu_arch_data {
struct omap_iommu *iommu_dev;
+ struct device *dev;
};
struct cr_regs {
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 34d0b9783b3e..c31e7bc4ccbe 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -7,6 +7,7 @@
*/
#include <linux/atomic.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
@@ -32,7 +33,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "arm-smmu-regs.h"
+#include "arm-smmu.h"
#define SMMU_INTR_SEL_NS 0x2000
@@ -155,7 +156,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
size_t s = size;
- iova &= ~12UL;
+ iova = (iova >> 12) << 12;
iova |= ctx->asid;
do {
iommu_writel(ctx, reg, iova);
@@ -164,10 +165,32 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
}
}
-static const struct iommu_gather_ops qcom_gather_ops = {
+static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
+ qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
+ qcom_iommu_tlb_sync(cookie);
+}
+
+static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+}
+
+static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context,
- .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
- .tlb_sync = qcom_iommu_tlb_sync,
+ .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
+ .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
+ .tlb_add_page = qcom_iommu_tlb_add_page,
};
static irqreturn_t qcom_iommu_fault(int irq, void *dev)
@@ -215,7 +238,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
.pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
.ias = 32,
.oas = 40,
- .tlb = &qcom_gather_ops,
+ .tlb = &qcom_flush_ops,
.iommu_dev = qcom_iommu->dev,
};
@@ -247,16 +270,16 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
- ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+ FIELD_PREP(TTBRn_ASID, ctx->asid));
iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
- ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+ FIELD_PREP(TTBRn_ASID, ctx->asid));
- /* TTBCR */
- iommu_writel(ctx, ARM_SMMU_CB_TTBCR2,
+ /* TCR */
+ iommu_writel(ctx, ARM_SMMU_CB_TCR2,
(pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
- TTBCR2_SEP_UPSTREAM);
- iommu_writel(ctx, ARM_SMMU_CB_TTBCR,
+ FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+ iommu_writel(ctx, ARM_SMMU_CB_TCR,
pgtbl_cfg.arm_lpae_s1_cfg.tcr);
/* MAIRs (stage-1 only) */
@@ -417,7 +440,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
size_t ret;
unsigned long flags;
@@ -434,14 +457,14 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->unmap(ops, iova, size);
+ ret = ops->unmap(ops, iova, size, gather);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
pm_runtime_put_sync(qcom_domain->iommu->dev);
return ret;
}
-static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
@@ -454,6 +477,12 @@ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
+static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ qcom_iommu_flush_iotlb_all(domain);
+}
+
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -581,7 +610,7 @@ static const struct iommu_ops qcom_iommu_ops = {
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
- .flush_iotlb_all = qcom_iommu_iotlb_sync,
+ .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device,
@@ -696,10 +725,8 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
return PTR_ERR(ctx->base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return -ENODEV;
- }
/* clear IRQs before registering fault handler, just in case the
* boot-loader left us a surprise:
@@ -775,7 +802,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
struct qcom_iommu_dev *qcom_iommu;
struct device *dev = &pdev->dev;
struct resource *res;
- int ret, sz, max_asid = 0;
+ int ret, max_asid = 0;
/* find the max asid (which is 1:1 to ctx bank idx), so we know how
* many child ctx devices we have:
@@ -783,9 +810,8 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
for_each_child_of_node(dev->of_node, child)
max_asid = max(max_asid, get_asid(child));
- sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0]));
-
- qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL);
+ qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+ GFP_KERNEL);
if (!qcom_iommu)
return -ENOMEM;
qcom_iommu->num_ctxs = max_asid;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index dc26d74d79c2..26290f310f90 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -794,7 +794,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
}
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 22d4db302c1c..3b0b18e23187 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -314,7 +314,8 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
}
static size_t s390_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *gather)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_INVALID;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 6d40bc1b38bf..3924f7c05544 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -207,7 +207,7 @@ static inline int __gart_iommu_unmap(struct gart_device *gart,
}
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t bytes)
+ size_t bytes, struct iommu_iotlb_gather *gather)
{
struct gart_device *gart = gart_handle;
int err;
@@ -273,11 +273,17 @@ static int gart_iommu_of_xlate(struct device *dev,
return 0;
}
-static void gart_iommu_sync(struct iommu_domain *domain)
+static void gart_iommu_sync_map(struct iommu_domain *domain)
{
FLUSH_GART_REGS(gart_handle);
}
+static void gart_iommu_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ gart_iommu_sync_map(domain);
+}
+
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc,
@@ -292,7 +298,7 @@ static const struct iommu_ops gart_iommu_ops = {
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
- .iotlb_sync_map = gart_iommu_sync,
+ .iotlb_sync_map = gart_iommu_sync_map,
.iotlb_sync = gart_iommu_sync,
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c4a652b227f8..7293fc3f796d 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -680,7 +680,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 433f4d2ee956..3ea9d7682999 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -2,7 +2,7 @@
/*
* Virtio driver for the paravirtualized IOMMU
*
- * Copyright (C) 2018 Arm Limited
+ * Copyright (C) 2019 Arm Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
/* Device configuration */
struct iommu_domain_geometry geometry;
u64 pgsize_bitmap;
- u8 domain_bits;
+ u32 first_domain;
+ u32 last_domain;
+ /* Supported MAP flags */
+ u32 map_flags;
u32 probe_size;
};
@@ -62,6 +65,7 @@ struct viommu_domain {
struct viommu_dev *viommu;
struct mutex mutex; /* protects viommu pointer */
unsigned int id;
+ u32 map_flags;
spinlock_t mappings_lock;
struct rb_root_cached mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
return -ENOENT;
case VIRTIO_IOMMU_S_FAULT:
return -EFAULT;
+ case VIRTIO_IOMMU_S_NOMEM:
+ return -ENOMEM;
case VIRTIO_IOMMU_S_IOERR:
case VIRTIO_IOMMU_S_DEVERR:
default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
{
int ret;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
- (1U << viommu->domain_bits) - 1;
vdomain->viommu = viommu;
+ vdomain->map_flags = viommu->map_flags;
domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry;
- ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
if (ret >= 0)
vdomain->id = (unsigned int)ret;
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
int ret;
- int flags;
+ u32 flags;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
+ if (flags & ~vdomain->map_flags)
+ return -EINVAL;
+
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
if (ret)
return ret;
@@ -742,7 +751,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
}
static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+ size_t size, struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
@@ -788,7 +797,8 @@ static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
return paddr;
}
-static void viommu_iotlb_sync(struct iommu_domain *domain)
+static void viommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -1027,7 +1037,8 @@ static int viommu_probe(struct virtio_device *vdev)
goto err_free_vqs;
}
- viommu->domain_bits = 32;
+ viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ viommu->last_domain = ~0U;
/* Optional features */
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1049,13 @@ static int viommu_probe(struct virtio_device *vdev)
struct virtio_iommu_config, input_range.end,
&input_end);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
- struct virtio_iommu_config, domain_bits,
- &viommu->domain_bits);
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.start,
+ &viommu->first_domain);
+
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.end,
+ &viommu->last_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
struct virtio_iommu_config, probe_size,
@@ -1052,6 +1067,9 @@ static int viommu_probe(struct virtio_device *vdev)
.force_aperture = true,
};
+ if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
+ viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
@@ -1130,9 +1148,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
static unsigned int features[] = {
VIRTIO_IOMMU_F_MAP_UNMAP,
- VIRTIO_IOMMU_F_DOMAIN_BITS,
VIRTIO_IOMMU_F_INPUT_RANGE,
+ VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
+ VIRTIO_IOMMU_F_MMIO,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index b0a8215a13fc..82520006195d 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -41,6 +41,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data)
{
for (; quirks->desc; quirks++) {
+ if (quirks->compatible)
+ continue;
if (quirks->iidr != (quirks->mask & iidr))
continue;
if (quirks->init(data))
@@ -63,7 +65,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
* for "irq", depending on "type".
*/
raw_spin_lock_irqsave(&irq_controller_lock, flags);
- val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ val = oldval = readl_relaxed(base + confoff);
if (type & IRQ_TYPE_LEVEL_MASK)
val &= ~confmask;
else if (type & IRQ_TYPE_EDGE_BOTH)
@@ -83,14 +85,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
* does not allow us to set the configuration or we are in a
* non-secure mode, and hence it may not be catastrophic.
*/
- writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
- if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
- if (WARN_ON(irq >= 32))
- ret = -EINVAL;
- else
- pr_warn("GIC: PPI%d is secure or misconfigured\n",
- irq - 16);
- }
+ writel_relaxed(val, base + confoff);
+ if (readl_relaxed(base + confoff) != val)
+ ret = -EINVAL;
+
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
if (sync_access)
@@ -132,26 +130,31 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
sync_access();
}
-void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
{
int i;
/*
* Deal with the banked PPI and SGI interrupts - disable all
- * PPI interrupts, ensure all SGI interrupts are enabled.
- * Make sure everything is deactivated.
+ * private interrupts. Make sure everything is deactivated.
*/
- writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
- writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
- writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
+ for (i = 0; i < nr; i += 32) {
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ACTIVE_CLEAR + i / 8);
+ writel_relaxed(GICD_INT_EN_CLR_X32,
+ base + GIC_DIST_ENABLE_CLEAR + i / 8);
+ }
/*
* Set priority on PPI and SGI interrupts
*/
- for (i = 0; i < 32; i += 4)
+ for (i = 0; i < nr; i += 4)
writel_relaxed(GICD_INT_DEF_PRI_X4,
base + GIC_DIST_PRI + i * 4 / 4);
+ /* Ensure all SGI interrupts are now enabled */
+ writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
+
if (sync_access)
sync_access();
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 5a46b6b57750..ccba8b0fe0f5 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -22,7 +22,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
void __iomem *base, void (*sync_access)(void));
void gic_dist_config(void __iomem *base, int gic_irqs,
void (*sync_access)(void));
-void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void));
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data);
void gic_enable_of_quirks(const struct device_node *np,
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 7338f90b2f9e..e88e75c22b6a 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -525,7 +525,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
spi_start, nr_spis);
}
- fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
+ fwnode = irq_domain_alloc_fwnode(&res.start);
if (!fwnode) {
pr_err("Unable to allocate GICv2m domain token\n");
return -EINVAL;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 730fbe0e2a9d..62e54f1a248b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2464,6 +2464,7 @@ static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number
{
int idx;
+ /* Find a free LPI region in lpi_map and allocate them. */
idx = bitmap_find_free_region(dev->event_map.lpi_map,
dev->event_map.nr_lpis,
get_count_order(nvecs));
@@ -2471,7 +2472,6 @@ static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number
return -ENOSPC;
*hwirq = dev->event_map.lpi_base + idx;
- set_bit(idx, dev->event_map.lpi_map);
return 0;
}
@@ -2641,14 +2641,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct its_node *its = its_dev->its;
int i;
+ bitmap_release_region(its_dev->event_map.lpi_map,
+ its_get_event_id(irq_domain_get_irq_data(domain, virq)),
+ get_count_order(nr_irqs));
+
for (i = 0; i < nr_irqs; i++) {
struct irq_data *data = irq_domain_get_irq_data(domain,
virq + i);
- u32 event = its_get_event_id(data);
-
- /* Mark interrupt index as unused */
- clear_bit(event, its_dev->event_map.lpi_map);
-
/* Nuke the entry in the domain */
irq_domain_reset_irq_data(data);
}
@@ -3010,7 +3009,7 @@ static int its_vpe_init(struct its_vpe *vpe)
if (!its_alloc_vpe_table(vpe_id)) {
its_vpe_id_free(vpe_id);
- its_free_pending_table(vpe->vpt_page);
+ its_free_pending_table(vpt_page);
return -ENOMEM;
}
@@ -3921,7 +3920,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
res.flags = IORESOURCE_MEM;
- dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
+ dom_handle = irq_domain_alloc_fwnode(&res.start);
if (!dom_handle) {
pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
&res.start);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9bca4896fa6f..422664ac5f53 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -51,13 +51,17 @@ struct gic_chip_data {
u32 nr_redist_regions;
u64 flags;
bool has_rss;
- unsigned int irq_nr;
- struct partition_desc *ppi_descs[16];
+ unsigned int ppi_nr;
+ struct partition_desc **ppi_descs;
};
static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
+#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+
/*
* The behaviours of RPR and PMR registers differ depending on the value of
* SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
@@ -84,7 +88,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
-static refcount_t ppi_nmi_refs[16];
+static refcount_t *ppi_nmi_refs;
static struct gic_kvm_info gic_v3_kvm_info;
static DEFINE_PER_CPU(bool, has_rss);
@@ -97,6 +101,38 @@ static DEFINE_PER_CPU(bool, has_rss);
/* Our default, arbitrary priority value. Linux only uses one anyway. */
#define DEFAULT_PMR_VALUE 0xf0
+enum gic_intid_range {
+ PPI_RANGE,
+ SPI_RANGE,
+ EPPI_RANGE,
+ ESPI_RANGE,
+ LPI_RANGE,
+ __INVALID_RANGE__
+};
+
+static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
+{
+ switch (hwirq) {
+ case 16 ... 31:
+ return PPI_RANGE;
+ case 32 ... 1019:
+ return SPI_RANGE;
+ case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
+ return EPPI_RANGE;
+ case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
+ return ESPI_RANGE;
+ case 8192 ... GENMASK(23, 0):
+ return LPI_RANGE;
+ default:
+ return __INVALID_RANGE__;
+ }
+}
+
+static enum gic_intid_range get_intid_range(struct irq_data *d)
+{
+ return __get_intid_range(d->hwirq);
+}
+
static inline unsigned int gic_irq(struct irq_data *d)
{
return d->hwirq;
@@ -104,18 +140,26 @@ static inline unsigned int gic_irq(struct irq_data *d)
static inline int gic_irq_in_rdist(struct irq_data *d)
{
- return gic_irq(d) < 32;
+ enum gic_intid_range range = get_intid_range(d);
+ return range == PPI_RANGE || range == EPPI_RANGE;
}
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
- if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
+ switch (get_intid_range(d)) {
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ /* SGI+PPI -> SGI_base for this CPU */
return gic_data_rdist_sgi_base();
- if (d->hwirq <= 1023) /* SPI -> dist_base */
+ case SPI_RANGE:
+ case ESPI_RANGE:
+ /* SPI -> dist_base */
return gic_data.dist_base;
- return NULL;
+ default:
+ return NULL;
+ }
}
static void gic_do_wait_for_rwp(void __iomem *base)
@@ -196,24 +240,79 @@ static void gic_enable_redist(bool enable)
/*
* Routines to disable, enable, EOI and route interrupts
*/
+static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
+{
+ switch (get_intid_range(d)) {
+ case PPI_RANGE:
+ case SPI_RANGE:
+ *index = d->hwirq;
+ return offset;
+ case EPPI_RANGE:
+ /*
+ * Contrary to the ESPI range, the EPPI range is contiguous
+ * to the PPI range in the registers, so let's adjust the
+ * displacement accordingly. Consistency is overrated.
+ */
+ *index = d->hwirq - EPPI_BASE_INTID + 32;
+ return offset;
+ case ESPI_RANGE:
+ *index = d->hwirq - ESPI_BASE_INTID;
+ switch (offset) {
+ case GICD_ISENABLER:
+ return GICD_ISENABLERnE;
+ case GICD_ICENABLER:
+ return GICD_ICENABLERnE;
+ case GICD_ISPENDR:
+ return GICD_ISPENDRnE;
+ case GICD_ICPENDR:
+ return GICD_ICPENDRnE;
+ case GICD_ISACTIVER:
+ return GICD_ISACTIVERnE;
+ case GICD_ICACTIVER:
+ return GICD_ICACTIVERnE;
+ case GICD_IPRIORITYR:
+ return GICD_IPRIORITYRnE;
+ case GICD_ICFGR:
+ return GICD_ICFGRnE;
+ case GICD_IROUTER:
+ return GICD_IROUTERnE;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(1);
+ *index = d->hwirq;
+ return offset;
+}
+
static int gic_peek_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
void __iomem *base;
+ u32 index, mask;
+
+ offset = convert_offset_index(d, offset, &index);
+ mask = 1 << (index % 32);
if (gic_irq_in_rdist(d))
base = gic_data_rdist_sgi_base();
else
base = gic_data.dist_base;
- return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+ return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
}
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
void (*rwp_wait)(void);
void __iomem *base;
+ u32 index, mask;
+
+ offset = convert_offset_index(d, offset, &index);
+ mask = 1 << (index % 32);
if (gic_irq_in_rdist(d)) {
base = gic_data_rdist_sgi_base();
@@ -223,7 +322,7 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
rwp_wait = gic_dist_wait_for_rwp;
}
- writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+ writel_relaxed(mask, base + offset + (index / 32) * 4);
rwp_wait();
}
@@ -263,7 +362,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
{
u32 reg;
- if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
+ if (d->hwirq >= 8192) /* PPI/SPI only */
return -EINVAL;
switch (which) {
@@ -290,7 +389,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
static int gic_irq_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool *val)
{
- if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
+ if (d->hwirq >= 8192) /* PPI/SPI only */
return -EINVAL;
switch (which) {
@@ -316,8 +415,23 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
static void gic_irq_set_prio(struct irq_data *d, u8 prio)
{
void __iomem *base = gic_dist_base(d);
+ u32 offset, index;
+
+ offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
- writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d));
+ writeb_relaxed(prio, base + offset + index);
+}
+
+static u32 gic_get_ppi_index(struct irq_data *d)
+{
+ switch (get_intid_range(d)) {
+ case PPI_RANGE:
+ return d->hwirq - 16;
+ case EPPI_RANGE:
+ return d->hwirq - EPPI_BASE_INTID + 16;
+ default:
+ unreachable();
+ }
}
static int gic_irq_nmi_setup(struct irq_data *d)
@@ -340,10 +454,12 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq(d) < 32) {
+ if (gic_irq_in_rdist(d)) {
+ u32 idx = gic_get_ppi_index(d);
+
/* Setting up PPI as NMI, only switch handler for first NMI */
- if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) {
- refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1);
+ if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
+ refcount_set(&ppi_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
} else {
@@ -375,9 +491,11 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq(d) < 32) {
+ if (gic_irq_in_rdist(d)) {
+ u32 idx = gic_get_ppi_index(d);
+
/* Tearing down NMI, only switch handler for last NMI */
- if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16]))
+ if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
} else {
desc->handle_irq = handle_fasteoi_irq;
@@ -404,17 +522,22 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ enum gic_intid_range range;
unsigned int irq = gic_irq(d);
void (*rwp_wait)(void);
void __iomem *base;
+ u32 offset, index;
+ int ret;
/* Interrupt configuration for SGIs can't be changed */
if (irq < 16)
return -EINVAL;
+ range = get_intid_range(d);
+
/* SPIs have restrictions on the supported types */
- if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
- type != IRQ_TYPE_EDGE_RISING)
+ if ((range == SPI_RANGE || range == ESPI_RANGE) &&
+ type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
if (gic_irq_in_rdist(d)) {
@@ -425,7 +548,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
rwp_wait = gic_dist_wait_for_rwp;
}
- return gic_configure_irq(irq, type, base, rwp_wait);
+ offset = convert_offset_index(d, GICD_ICFGR, &index);
+
+ ret = gic_configure_irq(index, type, base + offset, rwp_wait);
+ if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
+ ret = 0;
+ }
+
+ return ret;
}
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
@@ -500,7 +632,12 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ /* Check for special IDs first */
+ if ((irqnr >= 1020 && irqnr <= 1023))
+ return;
+
+ /* Treat anything but SGIs in a uniform way */
+ if (likely(irqnr > 15)) {
int err;
if (static_branch_likely(&supports_deactivate_key))
@@ -588,10 +725,26 @@ static void __init gic_dist_init(void)
* do the right thing if the kernel is running in secure mode,
* but that's not the intended use case anyway.
*/
- for (i = 32; i < gic_data.irq_nr; i += 32)
+ for (i = 32; i < GIC_LINE_NR; i += 32)
writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
- gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+ /* Extended SPI range, not handled by the GICv2/GICv3 common code */
+ for (i = 0; i < GIC_ESPI_NR; i += 32) {
+ writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
+ writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
+ }
+
+ for (i = 0; i < GIC_ESPI_NR; i += 32)
+ writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
+
+ for (i = 0; i < GIC_ESPI_NR; i += 16)
+ writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
+
+ for (i = 0; i < GIC_ESPI_NR; i += 4)
+ writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
+
+ /* Now do the common stuff, and wait for the distributor to drain */
+ gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
/* Enable distributor with ARE, Group1 */
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
@@ -602,8 +755,11 @@ static void __init gic_dist_init(void)
* enabled.
*/
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
- for (i = 32; i < gic_data.irq_nr; i++)
+ for (i = 32; i < GIC_LINE_NR; i++)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+
+ for (i = 0; i < GIC_ESPI_NR; i++)
+ gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
}
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
@@ -689,19 +845,24 @@ static int gic_populate_rdist(void)
return -ENODEV;
}
-static int __gic_update_vlpi_properties(struct redist_region *region,
- void __iomem *ptr)
+static int __gic_update_rdist_properties(struct redist_region *region,
+ void __iomem *ptr)
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+ gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
return 1;
}
-static void gic_update_vlpi_properties(void)
+static void gic_update_rdist_properties(void)
{
- gic_iterate_rdists(__gic_update_vlpi_properties);
+ gic_data.ppi_nr = UINT_MAX;
+ gic_iterate_rdists(__gic_update_rdist_properties);
+ if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
+ gic_data.ppi_nr = 0;
+ pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
pr_info("%sVLPI support, %sdirect LPI support\n",
!gic_data.rdists.has_vlpis ? "no " : "",
!gic_data.rdists.has_direct_lpi ? "no " : "");
@@ -771,8 +932,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
@@ -786,8 +949,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
@@ -841,6 +1006,7 @@ static int gic_dist_supports_lpis(void)
static void gic_cpu_init(void)
{
void __iomem *rbase;
+ int i;
/* Register ourselves with the rest of the world */
if (gic_populate_rdist())
@@ -848,12 +1014,18 @@ static void gic_cpu_init(void)
gic_enable_redist(true);
+ WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
+ !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
+ "Distributor has extended ranges, but CPU%d doesn't\n",
+ smp_processor_id());
+
rbase = gic_data_rdist_sgi_base();
/* Configure SGIs/PPIs as non-secure Group-1 */
- writel_relaxed(~0, rbase + GICR_IGROUPR0);
+ for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
+ writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
- gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+ gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
/* initialise system registers */
gic_cpu_sys_reg_init();
@@ -957,6 +1129,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
unsigned int cpu;
+ u32 offset, index;
void __iomem *reg;
int enabled;
u64 val;
@@ -977,7 +1150,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (enabled)
gic_mask_irq(d);
- reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+ offset = convert_offset_index(d, GICD_IROUTER, &index);
+ reg = gic_dist_base(d) + offset + (index * 8);
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
gic_write_irouter(val, reg);
@@ -1061,8 +1235,6 @@ static struct irq_chip gic_eoimode1_chip = {
IRQCHIP_MASK_ON_SUSPEND,
};
-#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
@@ -1071,36 +1243,32 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
if (static_branch_likely(&supports_deactivate_key))
chip = &gic_eoimode1_chip;
- /* SGIs are private to the core kernel */
- if (hw < 16)
- return -EPERM;
- /* Nothing here */
- if (hw >= gic_data.irq_nr && hw < 8192)
- return -EPERM;
- /* Off limits */
- if (hw >= GIC_ID_NR)
- return -EPERM;
-
- /* PPIs */
- if (hw < 32) {
+ switch (__get_intid_range(hw)) {
+ case PPI_RANGE:
+ case EPPI_RANGE:
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
- }
- /* SPIs */
- if (hw >= 32 && hw < gic_data.irq_nr) {
+ break;
+
+ case SPI_RANGE:
+ case ESPI_RANGE:
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
- }
- /* LPIs */
- if (hw >= 8192 && hw < GIC_ID_NR) {
+ break;
+
+ case LPI_RANGE:
if (!gic_dist_supports_lpis())
return -EPERM;
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
+ break;
+
+ default:
+ return -EPERM;
}
return 0;
@@ -1122,12 +1290,24 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*hwirq = fwspec->param[1] + 32;
break;
case 1: /* PPI */
- case GIC_IRQ_TYPE_PARTITION:
*hwirq = fwspec->param[1] + 16;
break;
+ case 2: /* ESPI */
+ *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
+ break;
+ case 3: /* EPPI */
+ *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
+ break;
case GIC_IRQ_TYPE_LPI: /* LPI */
*hwirq = fwspec->param[1];
break;
+ case GIC_IRQ_TYPE_PARTITION:
+ *hwirq = fwspec->param[1];
+ if (fwspec->param[1] >= 16)
+ *hwirq += EPPI_BASE_INTID - 16;
+ else
+ *hwirq += 16;
+ break;
default:
return -EINVAL;
}
@@ -1207,7 +1387,8 @@ static int gic_irq_domain_select(struct irq_domain *d,
* then we need to match the partition domain.
*/
if (fwspec->param_count >= 4 &&
- fwspec->param[0] == 1 && fwspec->param[3] != 0)
+ fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
+ gic_data.ppi_descs)
return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
return d == gic_data.domain;
@@ -1228,6 +1409,9 @@ static int partition_domain_translate(struct irq_domain *d,
struct device_node *np;
int ret;
+ if (!gic_data.ppi_descs)
+ return -ENOMEM;
+
np = of_find_node_by_phandle(fwspec->param[3]);
if (WARN_ON(!np))
return -EINVAL;
@@ -1257,11 +1441,65 @@ static bool gic_enable_quirk_msm8996(void *data)
return true;
}
+static bool gic_enable_quirk_hip06_07(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ /*
+ * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
+ * not being an actual ARM implementation). The saving grace is
+ * that GIC-600 doesn't have ESPI, so nothing to do in that case.
+ * HIP07 doesn't even have a proper IIDR, and still pretends to
+ * have ESPI. In both cases, put them right.
+ */
+ if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
+ /* Zero both ESPI and the RES0 field next to it... */
+ d->rdists.gicd_typer &= ~GENMASK(9, 8);
+ return true;
+ }
+
+ return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "GICv3: Qualcomm MSM8996 broken firmware",
+ .compatible = "qcom,msm8996-gic-v3",
+ .init = gic_enable_quirk_msm8996,
+ },
+ {
+ .desc = "GICv3: HIP06 erratum 161010803",
+ .iidr = 0x0204043b,
+ .mask = 0xffffffff,
+ .init = gic_enable_quirk_hip06_07,
+ },
+ {
+ .desc = "GICv3: HIP07 erratum 161010803",
+ .iidr = 0x00000000,
+ .mask = 0xffffffff,
+ .init = gic_enable_quirk_hip06_07,
+ },
+ {
+ }
+};
+
static void gic_enable_nmi_support(void)
{
int i;
- for (i = 0; i < 16; i++)
+ if (!gic_prio_masking_enabled())
+ return;
+
+ if (gic_has_group0() && !gic_dist_security_disabled()) {
+ pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
+ return;
+ }
+
+ ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
+ if (!ppi_nmi_refs)
+ return;
+
+ for (i = 0; i < gic_data.ppi_nr; i++)
refcount_set(&ppi_nmi_refs[i], 0);
static_branch_enable(&supports_pseudo_nmis);
@@ -1279,7 +1517,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
struct fwnode_handle *handle)
{
u32 typer;
- int gic_irqs;
int err;
if (!is_hyp_mode_available())
@@ -1296,15 +1533,15 @@ static int __init gic_init_bases(void __iomem *dist_base,
/*
* Find out how many interrupts are supported.
- * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
*/
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
gic_data.rdists.gicd_typer = typer;
- gic_irqs = GICD_TYPER_IRQS(typer);
- if (gic_irqs > 1020)
- gic_irqs = 1020;
- gic_data.irq_nr = gic_irqs;
+ gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
+ gic_quirks, &gic_data);
+
+ pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
+ pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
@@ -1329,7 +1566,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
set_handle_irq(gic_handle_irq);
- gic_update_vlpi_properties();
+ gic_update_rdist_properties();
gic_smp_init();
gic_dist_init();
@@ -1344,12 +1581,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
gicv2m_init(handle, gic_data.domain);
}
- if (gic_prio_masking_enabled()) {
- if (!gic_has_group0() || gic_dist_security_disabled())
- gic_enable_nmi_support();
- else
- pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
- }
+ gic_enable_nmi_support();
return 0;
@@ -1382,6 +1614,10 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (!parts_node)
return;
+ gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
+ if (!gic_data.ppi_descs)
+ return;
+
nr_parts = of_get_child_count(parts_node);
if (!nr_parts)
@@ -1433,7 +1669,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
part_idx++;
}
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < gic_data.ppi_nr; i++) {
unsigned int irq;
struct partition_desc *desc;
struct irq_fwspec ppi_fwspec = {
@@ -1486,16 +1722,6 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_set_kvm_info(&gic_v3_kvm_info);
}
-static const struct gic_quirk gic_quirks[] = {
- {
- .desc = "GICv3: Qualcomm MSM8996 broken firmware",
- .compatible = "qcom,msm8996-gic-v3",
- .init = gic_enable_quirk_msm8996,
- },
- {
- }
-};
-
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
@@ -1841,7 +2067,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
if (err)
goto out_redist_unmap;
- domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
+ domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
if (!domain_handle) {
err = -ENOMEM;
goto out_redist_unmap;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index e45f45e68720..30ab623343d3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -291,6 +291,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
+ int ret;
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
@@ -301,7 +302,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- return gic_configure_irq(gicirq, type, base, NULL);
+ ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
+ if (ret && gicirq < 32) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
+ ret = 0;
+ }
+
+ return ret;
}
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
@@ -535,7 +543,7 @@ static int gic_cpu_init(struct gic_chip_data *gic)
gic_cpu_map[i] &= ~cpu_mask;
}
- gic_cpu_config(dist_base, NULL);
+ gic_cpu_config(dist_base, 32, NULL);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
@@ -1627,7 +1635,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
/*
* Initialize GIC instance zero (no multi-GIC support).
*/
- domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
+ domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
gic_teardown(gic);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index cf705827599c..130caa1c9d93 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -130,7 +130,12 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&irq_controller_lock);
- ret = gic_configure_irq(irq, type, base, NULL);
+ ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
+ if (ret && irq < 32) {
+ /* Misconfigured PPIs are usually not fatal */
+ pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
+ ret = 0;
+ }
raw_spin_unlock(&irq_controller_lock);
@@ -268,7 +273,7 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
if (i != cpu)
hip04_cpu_map[i] &= ~cpu_mask;
- gic_cpu_config(dist_base, NULL);
+ gic_cpu_config(dist_base, 32, NULL);
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
writel_relaxed(1, base + GIC_CPU_CTRL);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index d00489a4b54f..698d07f48fed 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -362,10 +362,8 @@ static int pdc_intc_probe(struct platform_device *pdev)
}
for (i = 0; i < priv->nr_perips; ++i) {
irq = platform_get_irq(pdev, 1 + i);
- if (irq < 0) {
- dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
+ if (irq < 0)
return irq;
- }
priv->perip_irqs[i] = irq;
}
/* check if too many were provided */
@@ -376,10 +374,8 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Get syswake IRQ number */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "cannot find syswake IRQ\n");
+ if (irq < 0)
return irq;
- }
priv->syswake_irq = irq;
/* Set up an IRQ domain */
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bf2237ac5d09..4f74c15c4755 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_set_wake = imx_gpcv2_irq_set_wake,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 6751c35b7e1d..37e0749215c7 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -319,7 +319,7 @@ void __init ixp4xx_irq_init(resource_size_t irqbase,
pr_crit("IXP4XX: could not ioremap interrupt controller\n");
return;
}
- fwnode = irq_domain_alloc_fwnode(base);
+ fwnode = irq_domain_alloc_fwnode(&irqbase);
if (!fwnode) {
pr_crit("IXP4XX: no domain handle\n");
return;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index efbcf8435185..8118ebe80b09 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -164,10 +164,8 @@ static int keystone_irq_probe(struct platform_device *pdev)
}
kirq->irq = platform_get_irq(pdev, 0);
- if (kirq->irq < 0) {
- dev_err(dev, "no irq resource %d\n", kirq->irq);
+ if (kirq->irq < 0)
return kirq->irq;
- }
kirq->dev = dev;
kirq->mask = ~0x0;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3dd28382d5f5..3f09f658e8e2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent);
- if (!child)
+ if (!child) {
+ of_node_put(np);
return -ENOMEM;
+ }
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
+ of_node_put(np);
return -EINVAL;
}
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
mbigen_write_msg,
&mbigen_domain_ops,
mgn_chip);
- if (!domain)
+ if (!domain) {
+ of_node_put(np);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index dcdc23b9dce6..829084b568fa 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -24,14 +24,25 @@
#define REG_PIN_47_SEL 0x08
#define REG_FILTER_SEL 0x0c
-#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
+/*
+ * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
+ * bits 24 to 31. Tests on the actual HW show that these bits are
+ * stuck at 0. Bits 8 to 15 are responsive and have the expected
+ * effect.
+ */
#define REG_EDGE_POL_EDGE(x) BIT(x)
#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
+#define REG_BOTH_EDGE(x) BIT(8 + (x))
+#define REG_EDGE_POL_MASK(x) ( \
+ REG_EDGE_POL_EDGE(x) | \
+ REG_EDGE_POL_LOW(x) | \
+ REG_BOTH_EDGE(x))
#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
struct meson_gpio_irq_params {
unsigned int nr_hwirq;
+ bool support_edge_both;
};
static const struct meson_gpio_irq_params meson8_params = {
@@ -54,6 +65,11 @@ static const struct meson_gpio_irq_params axg_params = {
.nr_hwirq = 100,
};
+static const struct meson_gpio_irq_params sm1_params = {
+ .nr_hwirq = 100,
+ .support_edge_both = true,
+};
+
static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -61,11 +77,12 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
{ .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
+ { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ }
};
struct meson_gpio_irq_controller {
- unsigned int nr_hwirq;
+ const struct meson_gpio_irq_params *params;
void __iomem *base;
u32 channel_irqs[NUM_CHANNEL];
DECLARE_BITMAP(channel_map, NUM_CHANNEL);
@@ -168,14 +185,22 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
*/
type &= IRQ_TYPE_SENSE_MASK;
- if (type == IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
+ /*
+ * New controller support EDGE_BOTH trigger. This setting takes
+ * precedence over the other edge/polarity settings
+ */
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ if (!ctl->params->support_edge_both)
+ return -EINVAL;
- if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_EDGE(idx);
+ val |= REG_BOTH_EDGE(idx);
+ } else {
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_EDGE(idx);
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
- val |= REG_EDGE_POL_LOW(idx);
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_LOW(idx);
+ }
spin_lock(&ctl->lock);
@@ -199,7 +224,7 @@ static unsigned int meson_gpio_irq_type_output(unsigned int type)
*/
if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
type |= IRQ_TYPE_LEVEL_HIGH;
- else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ else
type |= IRQ_TYPE_EDGE_RISING;
return type;
@@ -328,15 +353,13 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
struct meson_gpio_irq_controller *ctl)
{
const struct of_device_id *match;
- const struct meson_gpio_irq_params *params;
int ret;
match = of_match_node(meson_irq_gpio_matches, node);
if (!match)
return -ENODEV;
- params = match->data;
- ctl->nr_hwirq = params->nr_hwirq;
+ ctl->params = match->data;
ret = of_property_read_variable_u32_array(node,
"amlogic,channel-interrupts",
@@ -385,7 +408,8 @@ static int __init meson_gpio_irq_of_init(struct device_node *node,
if (ret)
goto free_channel_irqs;
- domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
+ domain = irq_domain_create_hierarchy(parent_domain, 0,
+ ctl->params->nr_hwirq,
of_node_to_fwnode(node),
&meson_gpio_irq_domain_ops,
ctl);
@@ -396,7 +420,7 @@ static int __init meson_gpio_irq_of_init(struct device_node *node,
}
pr_info("%d to %d gpio interrupt mux initialized\n",
- ctl->nr_hwirq, NUM_CHANNEL);
+ ctl->params->nr_hwirq, NUM_CHANNEL);
return 0;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 14618dc0bd39..4a74ac7b7c42 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -43,6 +44,7 @@ struct icu_chip_data {
unsigned int conf_enable;
unsigned int conf_disable;
unsigned int conf_mask;
+ unsigned int conf2_mask;
unsigned int clr_mfp_irq_base;
unsigned int clr_mfp_hwirq;
struct irq_domain *domain;
@@ -52,9 +54,11 @@ struct mmp_intc_conf {
unsigned int conf_enable;
unsigned int conf_disable;
unsigned int conf_mask;
+ unsigned int conf2_mask;
};
static void __iomem *mmp_icu_base;
+static void __iomem *mmp_icu2_base;
static struct icu_chip_data icu_data[MAX_ICU_NR];
static int max_icu_nr;
@@ -97,6 +101,16 @@ static void icu_mask_irq(struct irq_data *d)
r &= ~data->conf_mask;
r |= data->conf_disable;
writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+
+ if (data->conf2_mask) {
+ /*
+ * ICU1 (above) only controls PJ4 MP1; if using SMP,
+ * we need to also mask the MP2 and MM cores via ICU2.
+ */
+ r = readl_relaxed(mmp_icu2_base + (hwirq << 2));
+ r &= ~data->conf2_mask;
+ writel_relaxed(r, mmp_icu2_base + (hwirq << 2));
+ }
} else {
r = readl_relaxed(data->reg_mask) | (1 << hwirq);
writel_relaxed(r, data->reg_mask);
@@ -132,11 +146,14 @@ struct irq_chip icu_irq_chip = {
static void icu_mux_irq_demux(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *domain;
struct icu_chip_data *data;
int i;
unsigned long mask, status, n;
+ chained_irq_enter(chip, desc);
+
for (i = 1; i < max_icu_nr; i++) {
if (irq == icu_data[i].cascade_irq) {
domain = icu_data[i].domain;
@@ -146,7 +163,7 @@ static void icu_mux_irq_demux(struct irq_desc *desc)
}
if (i >= max_icu_nr) {
pr_err("Spurious irq %d in MMP INTC\n", irq);
- return;
+ goto out;
}
mask = readl_relaxed(data->reg_mask);
@@ -158,6 +175,9 @@ static void icu_mux_irq_demux(struct irq_desc *desc)
generic_handle_irq(icu_data[i].virq_base + n);
}
}
+
+out:
+ chained_irq_exit(chip, desc);
}
static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -194,6 +214,14 @@ static const struct mmp_intc_conf mmp2_conf = {
MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
+static struct mmp_intc_conf mmp3_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
+ .conf2_mask = 0xf0,
+};
+
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
{
int hwirq;
@@ -395,7 +423,6 @@ static int __init mmp_of_init(struct device_node *node,
icu_data[0].conf_enable = mmp_conf.conf_enable;
icu_data[0].conf_disable = mmp_conf.conf_disable;
icu_data[0].conf_mask = mmp_conf.conf_mask;
- irq_set_default_host(icu_data[0].domain);
set_handle_irq(mmp_handle_irq);
max_icu_nr = 1;
return 0;
@@ -414,19 +441,50 @@ static int __init mmp2_of_init(struct device_node *node,
icu_data[0].conf_enable = mmp2_conf.conf_enable;
icu_data[0].conf_disable = mmp2_conf.conf_disable;
icu_data[0].conf_mask = mmp2_conf.conf_mask;
- irq_set_default_host(icu_data[0].domain);
set_handle_irq(mmp2_handle_irq);
max_icu_nr = 1;
return 0;
}
IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+static int __init mmp3_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ mmp_icu2_base = of_iomap(node, 1);
+ if (!mmp_icu2_base) {
+ pr_err("Failed to get interrupt controller register #2\n");
+ return -ENODEV;
+ }
+
+ ret = mmp_init_bases(node);
+ if (ret < 0) {
+ iounmap(mmp_icu2_base);
+ return ret;
+ }
+
+ icu_data[0].conf_enable = mmp3_conf.conf_enable;
+ icu_data[0].conf_disable = mmp3_conf.conf_disable;
+ icu_data[0].conf_mask = mmp3_conf.conf_mask;
+ icu_data[0].conf2_mask = mmp3_conf.conf2_mask;
+
+ if (!parent) {
+ /* This is the main interrupt controller. */
+ set_handle_irq(mmp2_handle_irq);
+ }
+
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init);
+
static int __init mmp2_mux_of_init(struct device_node *node,
struct device_node *parent)
{
- struct resource res;
int i, ret, irq, j = 0;
u32 nr_irqs, mfp_irq;
+ u32 reg[4];
if (!parent)
return -ENODEV;
@@ -438,18 +496,22 @@ static int __init mmp2_mux_of_init(struct device_node *node,
pr_err("Not found mrvl,intc-nr-irqs property\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &res);
- if (ret < 0) {
- pr_err("Not found reg property\n");
- return -EINVAL;
- }
- icu_data[i].reg_status = mmp_icu_base + res.start;
- ret = of_address_to_resource(node, 1, &res);
+
+ /*
+ * For historical reasons, the "regs" property of the
+ * mrvl,mmp2-mux-intc is not a regular "regs" property containing
+ * addresses on the parent bus, but offsets from the intc's base.
+ * That is why we can't use of_address_to_resource() here.
+ */
+ ret = of_property_read_variable_u32_array(node, "reg", reg,
+ ARRAY_SIZE(reg),
+ ARRAY_SIZE(reg));
if (ret < 0) {
pr_err("Not found reg property\n");
return -EINVAL;
}
- icu_data[i].reg_mask = mmp_icu_base + res.start;
+ icu_data[i].reg_status = mmp_icu_base + reg[0];
+ icu_data[i].reg_mask = mmp_icu_base + reg[2];
icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
if (!icu_data[i].cascade_irq)
return -EINVAL;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index cf755964f2f8..c72c036aea76 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -244,6 +244,7 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
+ u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -266,10 +267,16 @@ static int __init plic_init(struct device_node *node,
continue;
}
+ /*
+ * When running in M-mode we need to ignore the S-mode handler.
+ * Here we assume it always comes later, but that might be a
+ * little fragile.
+ */
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
- continue;
+ threshold = 0xffffffff;
+ goto done;
}
handler->present = true;
@@ -279,8 +286,9 @@ static int __init plic_init(struct device_node *node,
handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
+done:
/* priority must be > threshold to trigger an interrupt */
- writel(0, handler->hart_base + CONTEXT_THRESHOLD);
+ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
index ed7b4f47ff3f..89121b39be26 100644
--- a/drivers/irqchip/irq-uniphier-aidet.c
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -166,7 +166,6 @@ static int uniphier_aidet_probe(struct platform_device *pdev)
struct device_node *parent_np;
struct irq_domain *parent_domain;
struct uniphier_aidet_priv *priv;
- struct resource *res;
parent_np = of_irq_find_parent(dev->of_node);
if (!parent_np)
@@ -181,8 +180,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(dev, res);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index d88e993aa66d..abfe59284ff2 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -248,10 +248,8 @@ static int __init combiner_probe(struct platform_device *pdev)
return err;
combiner->parent_irq = platform_get_irq(pdev, 0);
- if (combiner->parent_irq <= 0) {
- dev_err(&pdev->dev, "Error getting IRQ resource\n");
+ if (combiner->parent_irq <= 0)
return -EPROBE_DEFER;
- }
combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
&domain_ops, combiner);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 3c3ad42f22bf..c92b405b7646 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
if (!cdev->ap.applid)
return -ENODEV;
+ if (count < CAPIMSG_BASELEN)
+ return -EINVAL;
+
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+ if (count < CAPI_DATA_B3_REQ_LEN ||
+ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 0e224232f746..008a74a1ed44 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
+ continue;
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
static int
setup_hfcsusb(struct hfcsusb *hw)
{
+ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
u_char b;
+ int ret;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+ memcpy(&b, dmabuf, sizeof(u_char));
+ kfree(dmabuf);
+
/* check the chip id */
- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+ if (ret != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b0fdeef10bd9..1988de1d64c0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -74,9 +74,12 @@ config LEDS_APU
depends on LEDS_CLASS
depends on X86 && DMI
help
- This driver makes the PC Engines APU/APU2/APU3 front panel LEDs
+ This driver makes the PC Engines APU1 front panel LEDs
accessible from userspace programs through the LED subsystem.
+ If you're looking for APU2/3, use the pcengines-apu2 driver.
+ (symbol CONFIG_PCENGINES_APU2)
+
To compile this driver as a module, choose M here: the
module will be called leds-apu.
@@ -587,6 +590,7 @@ config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
depends on LEDS_CLASS
depends on MACH_KIRKWOOD
+ depends on OF_GPIO
default y
help
This option enables support for LEDs found on the LaCie 2Big
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 94980c654d89..60c3de5c6b9f 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -282,8 +282,9 @@ static void led_flash_init_sysfs_groups(struct led_classdev_flash *fled_cdev)
led_cdev->groups = flash_groups;
}
-int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev)
+int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
{
struct led_classdev *led_cdev;
const struct led_flash_ops *ops;
@@ -309,13 +310,13 @@ int led_classdev_flash_register(struct device *parent,
}
/* Register led class device */
- ret = led_classdev_register(parent, led_cdev);
+ ret = led_classdev_register_ext(parent, led_cdev, init_data);
if (ret < 0)
return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(led_classdev_flash_register);
+EXPORT_SYMBOL_GPL(led_classdev_flash_register_ext);
void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
{
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 4793e77808e2..647b1263c579 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -14,6 +14,7 @@
#include <linux/leds.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
@@ -213,13 +214,6 @@ static int led_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
-static int match_name(struct device *dev, const void *data)
-{
- if (!dev_name(dev))
- return 0;
- return !strcmp(dev_name(dev), (char *)data);
-}
-
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
@@ -230,7 +224,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
strlcpy(name, init_name, len);
while ((ret < len) &&
- (dev = class_find_device(leds_class, NULL, name, match_name))) {
+ (dev = class_find_device_by_name(leds_class, name))) {
put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
}
@@ -242,31 +236,48 @@ static int led_classdev_next_name(const char *init_name, char *name,
}
/**
- * of_led_classdev_register - register a new object of led_classdev class.
+ * led_classdev_register_ext - register a new object of led_classdev class
+ * with init data.
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
- * @np: DT node describing this LED
+ * @init_data: LED class device initialization data
*/
-int of_led_classdev_register(struct device *parent, struct device_node *np,
- struct led_classdev *led_cdev)
+int led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data)
{
- char name[LED_MAX_NAME_SIZE];
+ char composed_name[LED_MAX_NAME_SIZE];
+ char final_name[LED_MAX_NAME_SIZE];
+ const char *proposed_name = composed_name;
int ret;
- ret = led_classdev_next_name(led_cdev->name, name, sizeof(name));
+ if (init_data) {
+ if (init_data->devname_mandatory && !init_data->devicename) {
+ dev_err(parent, "Mandatory device name is missing");
+ return -EINVAL;
+ }
+ ret = led_compose_name(parent, init_data, composed_name);
+ if (ret < 0)
+ return ret;
+ } else {
+ proposed_name = led_cdev->name;
+ }
+
+ ret = led_classdev_next_name(proposed_name, final_name, sizeof(final_name));
if (ret < 0)
return ret;
mutex_init(&led_cdev->led_access);
mutex_lock(&led_cdev->led_access);
led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
- led_cdev, led_cdev->groups, "%s", name);
+ led_cdev, led_cdev->groups, "%s", final_name);
if (IS_ERR(led_cdev->dev)) {
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
}
- led_cdev->dev->of_node = np;
+ if (init_data && init_data->fwnode)
+ led_cdev->dev->fwnode = init_data->fwnode;
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
@@ -276,6 +287,7 @@ int of_led_classdev_register(struct device *parent, struct device_node *np,
ret = led_add_brightness_hw_changed(led_cdev);
if (ret) {
device_unregister(led_cdev->dev);
+ led_cdev->dev = NULL;
mutex_unlock(&led_cdev->led_access);
return ret;
}
@@ -311,7 +323,7 @@ int of_led_classdev_register(struct device *parent, struct device_node *np,
return 0;
}
-EXPORT_SYMBOL_GPL(of_led_classdev_register);
+EXPORT_SYMBOL_GPL(led_classdev_register_ext);
/**
* led_classdev_unregister - unregisters a object of led_properties class.
@@ -321,6 +333,9 @@ EXPORT_SYMBOL_GPL(of_led_classdev_register);
*/
void led_classdev_unregister(struct led_classdev *led_cdev)
{
+ if (IS_ERR_OR_NULL(led_cdev->dev))
+ return;
+
#ifdef CONFIG_LEDS_TRIGGERS
down_write(&led_cdev->trigger_lock);
if (led_cdev->trigger)
@@ -356,14 +371,15 @@ static void devm_led_classdev_release(struct device *dev, void *res)
}
/**
- * devm_of_led_classdev_register - resource managed led_classdev_register()
+ * devm_led_classdev_register_ext - resource managed led_classdev_register_ext()
*
* @parent: parent of LED device
* @led_cdev: the led_classdev structure for this device.
+ * @init_data: LED class device initialization data
*/
-int devm_of_led_classdev_register(struct device *parent,
- struct device_node *np,
- struct led_classdev *led_cdev)
+int devm_led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data)
{
struct led_classdev **dr;
int rc;
@@ -372,7 +388,7 @@ int devm_of_led_classdev_register(struct device *parent,
if (!dr)
return -ENOMEM;
- rc = of_led_classdev_register(parent, np, led_cdev);
+ rc = led_classdev_register_ext(parent, led_cdev, init_data);
if (rc) {
devres_free(dr);
return rc;
@@ -383,7 +399,7 @@ int devm_of_led_classdev_register(struct device *parent,
return 0;
}
-EXPORT_SYMBOL_GPL(devm_of_led_classdev_register);
+EXPORT_SYMBOL_GPL(devm_led_classdev_register_ext);
static int devm_led_classdev_match(struct device *dev, void *res, void *data)
{
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 7107cd7e87cf..f1f718dbe0f8 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -13,8 +13,10 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
#include "leds.h"
DECLARE_RWSEM(leds_list_lock);
@@ -23,6 +25,18 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
+const char * const led_colors[LED_COLOR_ID_MAX] = {
+ [LED_COLOR_ID_WHITE] = "white",
+ [LED_COLOR_ID_RED] = "red",
+ [LED_COLOR_ID_GREEN] = "green",
+ [LED_COLOR_ID_BLUE] = "blue",
+ [LED_COLOR_ID_AMBER] = "amber",
+ [LED_COLOR_ID_VIOLET] = "violet",
+ [LED_COLOR_ID_YELLOW] = "yellow",
+ [LED_COLOR_ID_IR] = "ir",
+};
+EXPORT_SYMBOL_GPL(led_colors);
+
static int __led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -310,14 +324,11 @@ EXPORT_SYMBOL_GPL(led_update_brightness);
u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size)
{
- struct device_node *np = dev_of_node(led_cdev->dev);
+ struct fwnode_handle *fwnode = led_cdev->dev->fwnode;
u32 *pattern;
int count;
- if (!np)
- return NULL;
-
- count = of_property_count_u32_elems(np, "led-pattern");
+ count = fwnode_property_count_u32(fwnode, "led-pattern");
if (count < 0)
return NULL;
@@ -325,7 +336,7 @@ u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size)
if (!pattern)
return NULL;
- if (of_property_read_u32_array(np, "led-pattern", pattern, count)) {
+ if (fwnode_property_read_u32_array(fwnode, "led-pattern", pattern, count)) {
kfree(pattern);
return NULL;
}
@@ -353,3 +364,116 @@ void led_sysfs_enable(struct led_classdev *led_cdev)
led_cdev->flags &= ~LED_SYSFS_DISABLE;
}
EXPORT_SYMBOL_GPL(led_sysfs_enable);
+
+static void led_parse_fwnode_props(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct led_properties *props)
+{
+ int ret;
+
+ if (!fwnode)
+ return;
+
+ if (fwnode_property_present(fwnode, "label")) {
+ ret = fwnode_property_read_string(fwnode, "label", &props->label);
+ if (ret)
+ dev_err(dev, "Error parsing 'label' property (%d)\n", ret);
+ return;
+ }
+
+ if (fwnode_property_present(fwnode, "color")) {
+ ret = fwnode_property_read_u32(fwnode, "color", &props->color);
+ if (ret)
+ dev_err(dev, "Error parsing 'color' property (%d)\n", ret);
+ else if (props->color >= LED_COLOR_ID_MAX)
+ dev_err(dev, "LED color identifier out of range\n");
+ else
+ props->color_present = true;
+ }
+
+
+ if (!fwnode_property_present(fwnode, "function"))
+ return;
+
+ ret = fwnode_property_read_string(fwnode, "function", &props->function);
+ if (ret) {
+ dev_err(dev,
+ "Error parsing 'function' property (%d)\n",
+ ret);
+ }
+
+ if (!fwnode_property_present(fwnode, "function-enumerator"))
+ return;
+
+ ret = fwnode_property_read_u32(fwnode, "function-enumerator",
+ &props->func_enum);
+ if (ret) {
+ dev_err(dev,
+ "Error parsing 'function-enumerator' property (%d)\n",
+ ret);
+ } else {
+ props->func_enum_present = true;
+ }
+}
+
+int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ char *led_classdev_name)
+{
+ struct led_properties props = {};
+ struct fwnode_handle *fwnode = init_data->fwnode;
+ const char *devicename = init_data->devicename;
+
+ if (!led_classdev_name)
+ return -EINVAL;
+
+ led_parse_fwnode_props(dev, fwnode, &props);
+
+ if (props.label) {
+ /*
+ * If init_data.devicename is NULL, then it indicates that
+ * DT label should be used as-is for LED class device name.
+ * Otherwise the label is prepended with devicename to compose
+ * the final LED class device name.
+ */
+ if (!devicename) {
+ strscpy(led_classdev_name, props.label,
+ LED_MAX_NAME_SIZE);
+ } else {
+ snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, props.label);
+ }
+ } else if (props.function || props.color_present) {
+ char tmp_buf[LED_MAX_NAME_SIZE];
+
+ if (props.func_enum_present) {
+ snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "", props.func_enum);
+ } else {
+ snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "");
+ }
+ if (init_data->devname_mandatory) {
+ snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, tmp_buf);
+ } else {
+ strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE);
+
+ }
+ } else if (init_data->default_label) {
+ if (!devicename) {
+ dev_err(dev, "Legacy LED naming requires devicename segment");
+ return -EINVAL;
+ }
+ snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, init_data->default_label);
+ } else if (is_of_node(fwnode)) {
+ strscpy(led_classdev_name, to_of_node(fwnode)->name,
+ LED_MAX_NAME_SIZE);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_compose_name);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 8d11a5e23227..23963e5cb5d6 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -167,12 +167,13 @@ err_add_groups:
trig->deactivate(led_cdev);
err_activate:
- led_cdev->trigger = NULL;
- led_cdev->trigger_data = NULL;
write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
list_del(&led_cdev->trig_list);
write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_cdev->trigger = NULL;
+ led_cdev->trigger_data = NULL;
led_set_brightness(led_cdev, LED_OFF);
+ kfree(event);
return ret;
}
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index bf26f5bed1f0..5a0fe7b7b8bc 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -42,6 +42,8 @@
#define AAT1290_FLASH_TM_NUM_LEVELS 16
#define AAT1290_MM_CURRENT_SCALE_SIZE 15
+#define AAT1290_NAME "aat1290"
+
struct aat1290_led_config_data {
/* maximum LED current in movie mode */
@@ -75,7 +77,6 @@ struct aat1290_led {
int *mm_current_scale;
/* device mode */
bool movie_mode;
-
/* brightness cache */
unsigned int torch_brightness;
};
@@ -215,7 +216,6 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
struct aat1290_led_config_data *cfg,
struct device_node **sub_node)
{
- struct led_classdev *led_cdev = &led->fled_cdev.led_cdev;
struct device *dev = &led->pdev->dev;
struct device_node *child_node;
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
@@ -254,9 +254,6 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
return -EINVAL;
}
- led_cdev->name = of_get_property(child_node, "label", NULL) ? :
- child_node->name;
-
ret = of_property_read_u32(child_node, "led-max-microamp",
&cfg->max_mm_current);
/*
@@ -428,7 +425,7 @@ static void aat1290_init_v4l2_flash_config(struct aat1290_led *led,
struct led_classdev *led_cdev = &led->fled_cdev.led_cdev;
struct led_flash_setting *s;
- strlcpy(v4l2_sd_cfg->dev_name, led_cdev->name,
+ strlcpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
sizeof(v4l2_sd_cfg->dev_name));
s = &v4l2_sd_cfg->intensity;
@@ -466,6 +463,7 @@ static int aat1290_led_probe(struct platform_device *pdev)
struct aat1290_led *led;
struct led_classdev *led_cdev;
struct led_classdev_flash *fled_cdev;
+ struct led_init_data init_data = {};
struct aat1290_led_config_data led_cfg = {};
struct v4l2_flash_config v4l2_sd_cfg = {};
int ret;
@@ -494,8 +492,12 @@ static int aat1290_led_probe(struct platform_device *pdev)
aat1290_init_flash_timeout(led, &led_cfg);
+ init_data.fwnode = of_fwnode_handle(sub_node);
+ init_data.devicename = AAT1290_NAME;
+
/* Register LED Flash class device */
- ret = led_classdev_flash_register(&pdev->dev, fled_cdev);
+ ret = led_classdev_flash_register_ext(&pdev->dev, fled_cdev,
+ &init_data);
if (ret < 0)
goto err_flash_register;
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 1c1f0c8c56f4..250dc9d6f635 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/regmap.h>
-#include <uapi/linux/uleds.h>
#define AN30259A_MAX_LEDS 3
@@ -54,6 +53,8 @@
#define AN30259A_BLINK_MAX_TIME 7500 /* ms */
#define AN30259A_SLOPE_RESOLUTION 500 /* ms */
+#define AN30259A_NAME "an30259a"
+
#define STATE_OFF 0
#define STATE_KEEP 1
#define STATE_ON 2
@@ -62,11 +63,11 @@ struct an30259a;
struct an30259a_led {
struct an30259a *chip;
+ struct fwnode_handle *fwnode;
struct led_classdev cdev;
u32 num;
u32 default_state;
bool sloping;
- char label[LED_MAX_NAME_SIZE];
};
struct an30259a {
@@ -226,14 +227,7 @@ static int an30259a_dt_init(struct i2c_client *client,
led->num = source;
led->chip = chip;
-
- if (of_property_read_string(child, "label", &str))
- snprintf(led->label, sizeof(led->label), "an30259a::");
- else
- snprintf(led->label, sizeof(led->label), "an30259a:%s",
- str);
-
- led->cdev.name = led->label;
+ led->fwnode = of_fwnode_handle(child);
if (!of_property_read_string(child, "default-state", &str)) {
if (!strcmp(str, "on"))
@@ -312,13 +306,20 @@ static int an30259a_probe(struct i2c_client *client)
chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
for (i = 0; i < chip->num_leds; i++) {
+ struct led_init_data init_data = {};
+
an30259a_init_default_state(&chip->leds[i]);
chip->leds[i].cdev.brightness_set_blocking =
an30259a_brightness_set;
chip->leds[i].cdev.blink_set = an30259a_blink_set;
- err = devm_led_classdev_register(&client->dev,
- &chip->leds[i].cdev);
+ init_data.fwnode = chip->leds[i].fwnode;
+ init_data.devicename = AN30259A_NAME;
+ init_data.default_label = ":";
+
+ err = devm_led_classdev_register_ext(&client->dev,
+ &chip->leds[i].cdev,
+ &init_data);
if (err < 0)
goto exit;
}
@@ -353,7 +354,7 @@ MODULE_DEVICE_TABLE(i2c, an30259a_id);
static struct i2c_driver an30259a_driver = {
.driver = {
- .name = "leds-an32059a",
+ .name = "leds-an30259a",
.of_match_table = of_match_ptr(an30259a_match_table),
},
.probe_new = an30259a_probe,
@@ -364,5 +365,5 @@ static struct i2c_driver an30259a_driver = {
module_i2c_driver(an30259a_driver);
MODULE_AUTHOR("Simon Shields <simon@lineageos.org>");
-MODULE_DESCRIPTION("AN32059A LED driver");
+MODULE_DESCRIPTION("AN30259A LED driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
index 8d42e46e2de3..7fd557aceff6 100644
--- a/drivers/leds/leds-apu.c
+++ b/drivers/leds/leds-apu.c
@@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -47,12 +49,6 @@
#define APU1_NUM_GPIO 3
#define APU1_IOSIZE sizeof(u8)
-#define APU2_FCH_ACPI_MMIO_BASE 0xFED80000
-#define APU2_FCH_GPIO_BASE (APU2_FCH_ACPI_MMIO_BASE + 0x1500)
-#define APU2_GPIO_BIT_WRITE 22
-#define APU2_APU2_NUM_GPIO 4
-#define APU2_IOSIZE sizeof(u32)
-
/* LED access parameters */
struct apu_param {
void __iomem *addr; /* for ioread/iowrite */
@@ -72,19 +68,9 @@ struct apu_led_profile {
unsigned long offset; /* for devm_ioremap */
};
-/* Supported platform types */
-enum apu_led_platform_types {
- APU1_LED_PLATFORM,
- APU2_LED_PLATFORM,
-};
-
struct apu_led_pdata {
struct platform_device *pdev;
struct apu_led_priv *pled;
- const struct apu_led_profile *profile;
- enum apu_led_platform_types platform;
- int num_led_instances;
- int iosize; /* for devm_ioremap() */
spinlock_t lock;
};
@@ -96,19 +82,6 @@ static const struct apu_led_profile apu1_led_profile[] = {
{ "apu:green:3", LED_OFF, APU1_FCH_GPIO_BASE + 2 * APU1_IOSIZE },
};
-static const struct apu_led_profile apu2_led_profile[] = {
- { "apu2:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
- { "apu2:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
- { "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
-};
-
-/* Same as apu2_led_profile, but with "3" in the LED names. */
-static const struct apu_led_profile apu3_led_profile[] = {
- { "apu3:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
- { "apu3:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
- { "apu3:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
-};
-
static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
{
.ident = "apu",
@@ -117,54 +90,6 @@ static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "APU")
}
},
- /* PC Engines APU2 with "Legacy" bios < 4.0.8 */
- {
- .ident = "apu2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "APU2")
- }
- },
- /* PC Engines APU2 with "Legacy" bios >= 4.0.8 */
- {
- .ident = "apu2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "apu2")
- }
- },
- /* PC Engines APU2 with "Mainline" bios */
- {
- .ident = "apu2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2")
- }
- },
- /* PC Engines APU3 with "Legacy" bios < 4.0.8 */
- {
- .ident = "apu3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "APU3")
- }
- },
- /* PC Engines APU3 with "Legacy" bios >= 4.0.8 */
- {
- .ident = "apu3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "apu3")
- }
- },
- /* PC Engines APU2 with "Mainline" bios */
- {
- .ident = "apu3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
- DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3")
- }
- },
{}
};
MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
@@ -178,52 +103,30 @@ static void apu1_led_brightness_set(struct led_classdev *led, enum led_brightnes
spin_unlock(&apu_led->lock);
}
-static void apu2_led_brightness_set(struct led_classdev *led, enum led_brightness value)
-{
- struct apu_led_priv *pled = cdev_to_priv(led);
- u32 value_new;
-
- spin_lock(&apu_led->lock);
-
- value_new = ioread32(pled->param.addr);
-
- if (value)
- value_new &= ~BIT(APU2_GPIO_BIT_WRITE);
- else
- value_new |= BIT(APU2_GPIO_BIT_WRITE);
-
- iowrite32(value_new, pled->param.addr);
-
- spin_unlock(&apu_led->lock);
-}
-
static int apu_led_config(struct device *dev, struct apu_led_pdata *apuld)
{
int i;
int err;
apu_led->pled = devm_kcalloc(dev,
- apu_led->num_led_instances, sizeof(struct apu_led_priv),
+ ARRAY_SIZE(apu1_led_profile), sizeof(struct apu_led_priv),
GFP_KERNEL);
if (!apu_led->pled)
return -ENOMEM;
- for (i = 0; i < apu_led->num_led_instances; i++) {
+ for (i = 0; i < ARRAY_SIZE(apu1_led_profile); i++) {
struct apu_led_priv *pled = &apu_led->pled[i];
struct led_classdev *led_cdev = &pled->cdev;
- led_cdev->name = apu_led->profile[i].name;
- led_cdev->brightness = apu_led->profile[i].brightness;
+ led_cdev->name = apu1_led_profile[i].name;
+ led_cdev->brightness = apu1_led_profile[i].brightness;
led_cdev->max_brightness = 1;
led_cdev->flags = LED_CORE_SUSPENDRESUME;
- if (apu_led->platform == APU1_LED_PLATFORM)
- led_cdev->brightness_set = apu1_led_brightness_set;
- else if (apu_led->platform == APU2_LED_PLATFORM)
- led_cdev->brightness_set = apu2_led_brightness_set;
+ led_cdev->brightness_set = apu1_led_brightness_set;
pled->param.addr = devm_ioremap(dev,
- apu_led->profile[i].offset, apu_led->iosize);
+ apu1_led_profile[i].offset, APU1_IOSIZE);
if (!pled->param.addr) {
err = -ENOMEM;
goto error;
@@ -233,7 +136,7 @@ static int apu_led_config(struct device *dev, struct apu_led_pdata *apuld)
if (err)
goto error;
- led_cdev->brightness_set(led_cdev, apu_led->profile[i].brightness);
+ apu1_led_brightness_set(led_cdev, apu1_led_profile[i].brightness);
}
return 0;
@@ -254,28 +157,6 @@ static int __init apu_led_probe(struct platform_device *pdev)
apu_led->pdev = pdev;
- if (dmi_match(DMI_PRODUCT_NAME, "APU")) {
- apu_led->profile = apu1_led_profile;
- apu_led->platform = APU1_LED_PLATFORM;
- apu_led->num_led_instances = ARRAY_SIZE(apu1_led_profile);
- apu_led->iosize = APU1_IOSIZE;
- } else if (dmi_match(DMI_BOARD_NAME, "APU2") ||
- dmi_match(DMI_BOARD_NAME, "apu2") ||
- dmi_match(DMI_BOARD_NAME, "PC Engines apu2")) {
- apu_led->profile = apu2_led_profile;
- apu_led->platform = APU2_LED_PLATFORM;
- apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
- apu_led->iosize = APU2_IOSIZE;
- } else if (dmi_match(DMI_BOARD_NAME, "APU3") ||
- dmi_match(DMI_BOARD_NAME, "apu3") ||
- dmi_match(DMI_BOARD_NAME, "PC Engines apu3")) {
- apu_led->profile = apu3_led_profile;
- /* Otherwise identical to APU2. */
- apu_led->platform = APU2_LED_PLATFORM;
- apu_led->num_led_instances = ARRAY_SIZE(apu3_led_profile);
- apu_led->iosize = APU2_IOSIZE;
- }
-
spin_lock_init(&apu_led->lock);
return apu_led_config(&pdev->dev, apu_led);
}
@@ -291,19 +172,9 @@ static int __init apu_led_init(void)
struct platform_device *pdev;
int err;
- if (!dmi_match(DMI_SYS_VENDOR, "PC Engines")) {
- pr_err("No PC Engines board detected\n");
- return -ENODEV;
- }
- if (!(dmi_match(DMI_PRODUCT_NAME, "APU") ||
- dmi_match(DMI_PRODUCT_NAME, "APU2") ||
- dmi_match(DMI_PRODUCT_NAME, "apu2") ||
- dmi_match(DMI_PRODUCT_NAME, "PC Engines apu2") ||
- dmi_match(DMI_PRODUCT_NAME, "APU3") ||
- dmi_match(DMI_PRODUCT_NAME, "apu3") ||
- dmi_match(DMI_PRODUCT_NAME, "PC Engines apu3"))) {
- pr_err("Unknown PC Engines board: %s\n",
- dmi_get_system_info(DMI_PRODUCT_NAME));
+ if (!(dmi_match(DMI_SYS_VENDOR, "PC Engines") &&
+ dmi_match(DMI_PRODUCT_NAME, "APU"))) {
+ pr_err("No PC Engines APUv1 board detected. For APUv2,3 support, enable CONFIG_PCENGINES_APU2\n");
return -ENODEV;
}
@@ -326,7 +197,7 @@ static void __exit apu_led_exit(void)
{
int i;
- for (i = 0; i < apu_led->num_led_instances; i++)
+ for (i = 0; i < ARRAY_SIZE(apu1_led_profile); i++)
led_classdev_unregister(&apu_led->pled[i].cdev);
platform_device_unregister(apu_led->pdev);
@@ -337,6 +208,6 @@ module_init(apu_led_init);
module_exit(apu_led_exit);
MODULE_AUTHOR("Alan Mizrahi");
-MODULE_DESCRIPTION("PC Engines APU family LED driver");
+MODULE_DESCRIPTION("PC Engines APU1 front LED driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds_apu");
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 14ab6b0e4de9..b7e0ae1af8fa 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -124,11 +124,6 @@ struct as3645a_config {
u32 peak;
};
-struct as3645a_names {
- char flash[32];
- char indicator[32];
-};
-
struct as3645a {
struct i2c_client *client;
@@ -484,12 +479,10 @@ static int as3645a_detect(struct as3645a *flash)
}
static int as3645a_parse_node(struct as3645a *flash,
- struct as3645a_names *names,
struct fwnode_handle *fwnode)
{
struct as3645a_config *cfg = &flash->cfg;
struct fwnode_handle *child;
- const char *name;
int rval;
fwnode_for_each_child_node(fwnode, child) {
@@ -517,17 +510,6 @@ static int as3645a_parse_node(struct as3645a *flash,
return -ENODEV;
}
- rval = fwnode_property_read_string(flash->flash_node, "label", &name);
- if (!rval) {
- strlcpy(names->flash, name, sizeof(names->flash));
- } else if (is_of_node(fwnode)) {
- snprintf(names->flash, sizeof(names->flash),
- "%pOFn:flash", to_of_node(fwnode));
- } else {
- dev_err(&flash->client->dev, "flash node has no label!\n");
- return -EINVAL;
- }
-
rval = fwnode_property_read_u32(flash->flash_node, "flash-timeout-us",
&cfg->flash_timeout_us);
if (rval < 0) {
@@ -565,17 +547,6 @@ static int as3645a_parse_node(struct as3645a *flash,
goto out_err;
}
- rval = fwnode_property_read_string(flash->indicator_node, "label",
- &name);
- if (!rval) {
- strlcpy(names->indicator, name, sizeof(names->indicator));
- } else if (is_of_node(fwnode)) {
- snprintf(names->indicator, sizeof(names->indicator),
- "%pOFn:indicator", to_of_node(fwnode));
- } else {
- dev_err(&flash->client->dev, "indicator node has no label!\n");
- return -EINVAL;
- }
rval = fwnode_property_read_u32(flash->indicator_node,
"led-max-microamp",
@@ -595,21 +566,25 @@ out_err:
return rval;
}
-static int as3645a_led_class_setup(struct as3645a *flash,
- struct as3645a_names *names)
+static int as3645a_led_class_setup(struct as3645a *flash)
{
struct led_classdev *fled_cdev = &flash->fled.led_cdev;
struct led_classdev *iled_cdev = &flash->iled_cdev;
+ struct led_init_data init_data = {};
struct led_flash_setting *cfg;
int rval;
- iled_cdev->name = names->indicator;
iled_cdev->brightness_set_blocking = as3645a_set_indicator_brightness;
iled_cdev->max_brightness =
flash->cfg.indicator_max_ua / AS_INDICATOR_INTENSITY_STEP;
iled_cdev->flags = LED_CORE_SUSPENDRESUME;
- rval = led_classdev_register(&flash->client->dev, iled_cdev);
+ init_data.fwnode = flash->indicator_node;
+ init_data.devicename = AS_NAME;
+ init_data.default_label = "indicator";
+
+ rval = led_classdev_register_ext(&flash->client->dev, iled_cdev,
+ &init_data);
if (rval < 0)
return rval;
@@ -627,7 +602,6 @@ static int as3645a_led_class_setup(struct as3645a *flash,
flash->fled.ops = &as3645a_led_flash_ops;
- fled_cdev->name = names->flash;
fled_cdev->brightness_set_blocking = as3645a_set_assist_brightness;
/* Value 0 is off in LED class. */
fled_cdev->max_brightness =
@@ -635,15 +609,23 @@ static int as3645a_led_class_setup(struct as3645a *flash,
flash->cfg.assist_max_ua) + 1;
fled_cdev->flags = LED_DEV_CAP_FLASH | LED_CORE_SUSPENDRESUME;
- rval = led_classdev_flash_register(&flash->client->dev, &flash->fled);
- if (rval) {
- led_classdev_unregister(iled_cdev);
- dev_err(&flash->client->dev,
- "led_classdev_flash_register() failed, error %d\n",
- rval);
- }
+ init_data.fwnode = flash->flash_node;
+ init_data.devicename = AS_NAME;
+ init_data.default_label = "flash";
+
+ rval = led_classdev_flash_register_ext(&flash->client->dev,
+ &flash->fled, &init_data);
+ if (rval)
+ goto out_err;
return rval;
+
+out_err:
+ led_classdev_unregister(iled_cdev);
+ dev_err(&flash->client->dev,
+ "led_classdev_flash_register() failed, error %d\n",
+ rval);
+ return rval;
}
static int as3645a_v4l2_setup(struct as3645a *flash)
@@ -667,8 +649,9 @@ static int as3645a_v4l2_setup(struct as3645a *flash)
},
};
- strlcpy(cfg.dev_name, led->name, sizeof(cfg.dev_name));
- strlcpy(cfgind.dev_name, flash->iled_cdev.name, sizeof(cfg.dev_name));
+ strlcpy(cfg.dev_name, led->dev->kobj.name, sizeof(cfg.dev_name));
+ strlcpy(cfgind.dev_name, flash->iled_cdev.dev->kobj.name,
+ sizeof(cfgind.dev_name));
flash->vf = v4l2_flash_init(
&flash->client->dev, flash->flash_node, &flash->fled, NULL,
@@ -689,7 +672,6 @@ static int as3645a_v4l2_setup(struct as3645a *flash)
static int as3645a_probe(struct i2c_client *client)
{
- struct as3645a_names names;
struct as3645a *flash;
int rval;
@@ -702,7 +684,7 @@ static int as3645a_probe(struct i2c_client *client)
flash->client = client;
- rval = as3645a_parse_node(flash, &names, dev_fwnode(&client->dev));
+ rval = as3645a_parse_node(flash, dev_fwnode(&client->dev));
if (rval < 0)
return rval;
@@ -717,7 +699,7 @@ static int as3645a_probe(struct i2c_client *client)
if (rval)
goto out_mutex_destroy;
- rval = as3645a_led_class_setup(flash, &names);
+ rval = as3645a_led_class_setup(flash);
if (rval)
goto out_mutex_destroy;
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index 0e4262462cb9..2da448ae718e 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -8,7 +8,6 @@
#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
-#include <uapi/linux/uleds.h>
/*
* CR0014114 SPI protocol descrtiption:
@@ -40,8 +39,9 @@
#define CR_FW_DELAY_MSEC 10
#define CR_RECOUNT_DELAY (HZ * 3600)
+#define CR_DEV_NAME "cr0014114"
+
struct cr0014114_led {
- char name[LED_MAX_NAME_SIZE];
struct cr0014114 *priv;
struct led_classdev ldev;
u8 brightness;
@@ -167,8 +167,7 @@ static int cr0014114_set_sync(struct led_classdev *ldev,
struct cr0014114_led,
ldev);
- dev_dbg(led->priv->dev, "Set brightness of %s to %d\n",
- led->name, brightness);
+ dev_dbg(led->priv->dev, "Set brightness to %d\n", brightness);
mutex_lock(&led->priv->lock);
led->brightness = (u8)brightness;
@@ -183,42 +182,32 @@ static int cr0014114_probe_dt(struct cr0014114 *priv)
size_t i = 0;
struct cr0014114_led *led;
struct fwnode_handle *child;
- struct device_node *np;
+ struct led_init_data init_data = {};
int ret;
- const char *str;
device_for_each_child_node(priv->dev, child) {
- np = to_of_node(child);
led = &priv->leds[i];
- ret = fwnode_property_read_string(child, "label", &str);
- if (ret)
- snprintf(led->name, sizeof(led->name),
- "cr0014114::");
- else
- snprintf(led->name, sizeof(led->name),
- "cr0014114:%s", str);
-
fwnode_property_read_string(child, "linux,default-trigger",
&led->ldev.default_trigger);
led->priv = priv;
- led->ldev.name = led->name;
led->ldev.max_brightness = CR_MAX_BRIGHTNESS;
led->ldev.brightness_set_blocking = cr0014114_set_sync;
- ret = devm_of_led_classdev_register(priv->dev, np,
- &led->ldev);
+ init_data.fwnode = child;
+ init_data.devicename = CR_DEV_NAME;
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->ldev,
+ &init_data);
if (ret) {
dev_err(priv->dev,
- "failed to register LED device %s, err %d",
- led->name, ret);
+ "failed to register LED device, err %d", ret);
fwnode_handle_put(child);
return ret;
}
- led->ldev.dev->of_node = np;
-
i++;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index bdc98ddca1dc..a5c73f3d5f79 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -73,11 +73,11 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
static int create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
- struct device_node *np, gpio_blink_set_t blink_set)
+ struct fwnode_handle *fwnode, gpio_blink_set_t blink_set)
{
+ struct led_init_data init_data = {};
int ret, state;
- led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->can_sleep = gpiod_cansleep(led_dat->gpiod);
if (!led_dat->can_sleep)
@@ -108,7 +108,16 @@ static int create_gpio_led(const struct gpio_led *template,
if (ret < 0)
return ret;
- return devm_of_led_classdev_register(parent, np, &led_dat->cdev);
+ if (template->name) {
+ led_dat->cdev.name = template->name;
+ ret = devm_led_classdev_register(parent, &led_dat->cdev);
+ } else {
+ init_data.fwnode = fwnode;
+ ret = devm_led_classdev_register_ext(parent, &led_dat->cdev,
+ &init_data);
+ }
+
+ return ret;
}
struct gpio_leds_priv {
@@ -141,15 +150,6 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
struct gpio_led_data *led_dat = &priv->leds[priv->num_leds];
struct gpio_led led = {};
const char *state = NULL;
- struct device_node *np = to_of_node(child);
-
- ret = fwnode_property_read_string(child, "label", &led.name);
- if (ret && IS_ENABLED(CONFIG_OF) && np)
- led.name = np->name;
- if (!led.name) {
- fwnode_handle_put(child);
- return ERR_PTR(-EINVAL);
- }
led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
GPIOD_ASIS,
@@ -181,7 +181,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
if (fwnode_property_present(child, "panic-indicator"))
led.panic_indicator = 1;
- ret = create_gpio_led(&led, led_dat, dev, np, NULL);
+ ret = create_gpio_led(&led, led_dat, dev, child, NULL);
if (ret < 0) {
fwnode_handle_put(child);
return ERR_PTR(ret);
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 2d077b8edd0e..ca6634b8683c 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -333,12 +333,11 @@ static int is31fl319x_probe(struct i2c_client *client,
{
struct is31fl319x_chip *is31;
struct device *dev = &client->dev;
- struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
int err;
int i = 0;
u32 aggregated_led_microamp = IS31FL319X_CURRENT_MAX;
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
is31 = devm_kzalloc(&client->dev, sizeof(*is31), GFP_KERNEL);
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 6fbab70dfb04..6f29b8943913 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -324,12 +324,6 @@ static int is31fl32xx_init_regs(struct is31fl32xx_priv *priv)
return 0;
}
-static inline size_t sizeof_is31fl32xx_priv(int num_leds)
-{
- return sizeof(struct is31fl32xx_priv) +
- (sizeof(struct is31fl32xx_led_data) * num_leds);
-}
-
static int is31fl32xx_parse_child_dt(const struct device *dev,
const struct device_node *child,
struct is31fl32xx_led_data *led_data)
@@ -450,7 +444,7 @@ static int is31fl32xx_probe(struct i2c_client *client,
if (!count)
return -EINVAL;
- priv = devm_kzalloc(dev, sizeof_is31fl32xx_priv(count),
+ priv = devm_kzalloc(dev, struct_size(priv, leds, count),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index f63918206bfb..670efee9b131 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -19,7 +19,7 @@
/* Value related the movie mode */
#define KTD2692_MOVIE_MODE_CURRENT_LEVELS 16
#define KTD2692_MM_TO_FL_RATIO(x) ((x) / 3)
-#define KTD2962_MM_MIN_CURR_THRESHOLD_SCALE 8
+#define KTD2692_MM_MIN_CURR_THRESHOLD_SCALE 8
/* Value related the flash mode */
#define KTD2692_FLASH_MODE_TIMEOUT_LEVELS 8
@@ -250,7 +250,7 @@ static void ktd2692_setup(struct ktd2692_context *led)
ktd2692_expresswire_reset(led);
gpiod_direction_output(led->aux_gpio, KTD2692_LOW);
- ktd2692_expresswire_write(led, (KTD2962_MM_MIN_CURR_THRESHOLD_SCALE - 1)
+ ktd2692_expresswire_write(led, (KTD2692_MM_MIN_CURR_THRESHOLD_SCALE - 1)
| KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE);
ktd2692_expresswire_write(led, KTD2692_FLASH_MODE_CURR_PERCENT(45)
| KTD2692_REG_FLASH_CURRENT_BASE);
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 180895b83b88..0507c6575c08 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -23,11 +23,11 @@
#define LM3532_REG_PWM_B_CFG 0x14
#define LM3532_REG_PWM_C_CFG 0x15
#define LM3532_REG_ZONE_CFG_A 0x16
-#define LM3532_REG_CTRL_A_BRT 0x17
+#define LM3532_REG_CTRL_A_FS_CURR 0x17
#define LM3532_REG_ZONE_CFG_B 0x18
-#define LM3532_REG_CTRL_B_BRT 0x19
+#define LM3532_REG_CTRL_B_FS_CURR 0x19
#define LM3532_REG_ZONE_CFG_C 0x1a
-#define LM3532_REG_CTRL_C_BRT 0x1b
+#define LM3532_REG_CTRL_C_FS_CURR 0x1b
#define LM3532_REG_ENABLE 0x1d
#define LM3532_ALS_CONFIG 0x23
#define LM3532_REG_ZN_0_HI 0x60
@@ -38,9 +38,12 @@
#define LM3532_REG_ZN_2_LO 0x65
#define LM3532_REG_ZN_3_HI 0x66
#define LM3532_REG_ZN_3_LO 0x67
+#define LM3532_REG_ZONE_TRGT_A 0x70
+#define LM3532_REG_ZONE_TRGT_B 0x75
+#define LM3532_REG_ZONE_TRGT_C 0x7a
#define LM3532_REG_MAX 0x7e
-/* Contorl Enable */
+/* Control Enable */
#define LM3532_CTRL_A_ENABLE BIT(0)
#define LM3532_CTRL_B_ENABLE BIT(1)
#define LM3532_CTRL_C_ENABLE BIT(2)
@@ -86,6 +89,10 @@
#define LM3532_NUM_AVG_VALS 8
#define LM3532_NUM_IMP_VALS 32
+#define LM3532_FS_CURR_MIN 5000
+#define LM3532_FS_CURR_MAX 29800
+#define LM3532_FS_CURR_STEP 800
+
/*
* struct lm3532_als_data
* @config - value of ALS configuration register
@@ -116,8 +123,11 @@ struct lm3532_als_data {
* @priv - Pointer the device data structure
* @control_bank - Control bank the LED is associated to
* @mode - Mode of the LED string
+ * @ctrl_brt_pointer - Zone target register that controls the sink
* @num_leds - Number of LED strings are supported in this array
+ * @full_scale_current - The full-scale current setting for the current sink.
* @led_strings - The LED strings supported in this array
+ * @enabled - Enabled status
* @label - LED label
*/
struct lm3532_led {
@@ -126,7 +136,10 @@ struct lm3532_led {
int control_bank;
int mode;
+ int ctrl_brt_pointer;
int num_leds;
+ int full_scale_current;
+ int enabled:1;
u32 led_strings[LM3532_MAX_CONTROL_BANKS];
char label[LED_MAX_NAME_SIZE];
};
@@ -168,11 +181,11 @@ static const struct reg_default lm3532_reg_defs[] = {
{LM3532_REG_PWM_B_CFG, 0x82},
{LM3532_REG_PWM_C_CFG, 0x82},
{LM3532_REG_ZONE_CFG_A, 0xf1},
- {LM3532_REG_CTRL_A_BRT, 0xf3},
+ {LM3532_REG_CTRL_A_FS_CURR, 0xf3},
{LM3532_REG_ZONE_CFG_B, 0xf1},
- {LM3532_REG_CTRL_B_BRT, 0xf3},
+ {LM3532_REG_CTRL_B_FS_CURR, 0xf3},
{LM3532_REG_ZONE_CFG_C, 0xf1},
- {LM3532_REG_CTRL_C_BRT, 0xf3},
+ {LM3532_REG_CTRL_C_FS_CURR, 0xf3},
{LM3532_REG_ENABLE, 0xf8},
{LM3532_ALS_CONFIG, 0x44},
{LM3532_REG_ZN_0_HI, 0x35},
@@ -195,7 +208,7 @@ static const struct regmap_config lm3532_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
-const static int als_imp_table[LM3532_NUM_IMP_VALS] = {37000, 18500, 12330,
+static const int als_imp_table[LM3532_NUM_IMP_VALS] = {37000, 18500, 12330,
92500, 7400, 6170, 5290,
4630, 4110, 3700, 3360,
3080, 2850, 2640, 2440,
@@ -252,7 +265,7 @@ static int lm3532_get_index(const int table[], int size, int value)
return -EINVAL;
}
-const static int als_avrg_table[LM3532_NUM_AVG_VALS] = {17920, 35840, 71680,
+static const int als_avrg_table[LM3532_NUM_AVG_VALS] = {17920, 35840, 71680,
1433360, 286720, 573440,
1146880, 2293760};
static int lm3532_get_als_avg_index(int avg_time)
@@ -267,7 +280,7 @@ static int lm3532_get_als_avg_index(int avg_time)
avg_time);
}
-const static int ramp_table[LM3532_NUM_RAMP_VALS] = { 8, 1024, 2048, 4096, 8192,
+static const int ramp_table[LM3532_NUM_RAMP_VALS] = { 8, 1024, 2048, 4096, 8192,
16384, 32768, 65536};
static int lm3532_get_ramp_index(int ramp_time)
{
@@ -281,11 +294,15 @@ static int lm3532_get_ramp_index(int ramp_time)
ramp_time);
}
+/* Caller must take care of locking */
static int lm3532_led_enable(struct lm3532_led *led_data)
{
int ctrl_en_val = BIT(led_data->control_bank);
int ret;
+ if (led_data->enabled)
+ return 0;
+
ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
ctrl_en_val, ctrl_en_val);
if (ret) {
@@ -293,22 +310,38 @@ static int lm3532_led_enable(struct lm3532_led *led_data)
return ret;
}
- return regulator_enable(led_data->priv->regulator);
+ ret = regulator_enable(led_data->priv->regulator);
+ if (ret < 0)
+ return ret;
+
+ led_data->enabled = 1;
+
+ return 0;
}
+/* Caller must take care of locking */
static int lm3532_led_disable(struct lm3532_led *led_data)
{
int ctrl_en_val = BIT(led_data->control_bank);
int ret;
+ if (!led_data->enabled)
+ return 0;
+
ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
- ctrl_en_val, ~ctrl_en_val);
+ ctrl_en_val, 0);
if (ret) {
dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
return ret;
}
- return regulator_disable(led_data->priv->regulator);
+ ret = regulator_disable(led_data->priv->regulator);
+ if (ret < 0)
+ return ret;
+
+ led_data->enabled = 0;
+
+ return 0;
}
static int lm3532_brightness_set(struct led_classdev *led_cdev,
@@ -321,7 +354,7 @@ static int lm3532_brightness_set(struct led_classdev *led_cdev,
mutex_lock(&led->priv->lock);
- if (led->mode == LM3532_BL_MODE_ALS) {
+ if (led->mode == LM3532_ALS_CTRL) {
if (brt_val > LED_OFF)
ret = lm3532_led_enable(led);
else
@@ -339,8 +372,8 @@ static int lm3532_brightness_set(struct led_classdev *led_cdev,
if (ret)
goto unlock;
- brightness_reg = LM3532_REG_CTRL_A_BRT + led->control_bank * 2;
- brt_val = brt_val / LM3532_BRT_VAL_ADJUST;
+ brightness_reg = LM3532_REG_ZONE_TRGT_A + led->control_bank * 5 +
+ (led->ctrl_brt_pointer >> 2);
ret = regmap_write(led->priv->regmap, brightness_reg, brt_val);
@@ -356,8 +389,43 @@ static int lm3532_init_registers(struct lm3532_led *led)
unsigned int output_cfg_val = 0;
unsigned int output_cfg_shift = 0;
unsigned int output_cfg_mask = 0;
+ unsigned int brightness_config_reg;
+ unsigned int brightness_config_val;
+ int fs_current_reg;
+ int fs_current_val;
int ret, i;
+ if (drvdata->enable_gpio)
+ gpiod_direction_output(drvdata->enable_gpio, 1);
+
+ brightness_config_reg = LM3532_REG_ZONE_CFG_A + led->control_bank * 2;
+ /*
+ * This could be hard coded to the default value but the control
+ * brightness register may have changed during boot.
+ */
+ ret = regmap_read(drvdata->regmap, brightness_config_reg,
+ &led->ctrl_brt_pointer);
+ if (ret)
+ return ret;
+
+ led->ctrl_brt_pointer &= LM3532_ZONE_MASK;
+ brightness_config_val = led->ctrl_brt_pointer | led->mode;
+ ret = regmap_write(drvdata->regmap, brightness_config_reg,
+ brightness_config_val);
+ if (ret)
+ return ret;
+
+ if (led->full_scale_current) {
+ fs_current_reg = LM3532_REG_CTRL_A_FS_CURR + led->control_bank * 2;
+ fs_current_val = (led->full_scale_current - LM3532_FS_CURR_MIN) /
+ LM3532_FS_CURR_STEP;
+
+ ret = regmap_write(drvdata->regmap, fs_current_reg,
+ fs_current_val);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < led->num_leds; i++) {
output_cfg_shift = led->led_strings[i] * 2;
output_cfg_val |= (led->control_bank << output_cfg_shift);
@@ -382,7 +450,6 @@ static int lm3532_als_configure(struct lm3532_data *priv,
struct lm3532_als_data *als = priv->als_data;
u32 als_vmin, als_vmax, als_vstep;
int zone_reg = LM3532_REG_ZN_0_HI;
- int brightnes_config_reg;
int ret;
int i;
@@ -411,14 +478,7 @@ static int lm3532_als_configure(struct lm3532_data *priv,
als->config = (als->als_avrg_time | (LM3532_ENABLE_ALS) |
(als->als_input_mode << LM3532_ALS_SEL_SHIFT));
- ret = regmap_write(priv->regmap, LM3532_ALS_CONFIG, als->config);
- if (ret)
- return ret;
-
- brightnes_config_reg = LM3532_REG_ZONE_CFG_A + led->control_bank * 2;
-
- return regmap_update_bits(priv->regmap, brightnes_config_reg,
- LM3532_I2C_CTRL, LM3532_ALS_CTRL);
+ return regmap_write(priv->regmap, LM3532_ALS_CONFIG, als->config);
}
static int lm3532_parse_als(struct lm3532_data *priv)
@@ -541,18 +601,27 @@ static int lm3532_parse_node(struct lm3532_data *priv)
goto child_out;
}
+ if (fwnode_property_present(child, "led-max-microamp") &&
+ fwnode_property_read_u32(child, "led-max-microamp",
+ &led->full_scale_current))
+ dev_err(&priv->client->dev,
+ "Failed getting led-max-microamp\n");
+ else
+ led->full_scale_current = min(led->full_scale_current,
+ LM3532_FS_CURR_MAX);
+
if (led->mode == LM3532_BL_MODE_ALS) {
+ led->mode = LM3532_ALS_CTRL;
ret = lm3532_parse_als(priv);
if (ret)
dev_err(&priv->client->dev, "Failed to parse als\n");
else
lm3532_als_configure(priv, led);
+ } else {
+ led->mode = LM3532_I2C_CTRL;
}
- led->num_leds = fwnode_property_read_u32_array(child,
- "led-sources",
- NULL, 0);
-
+ led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3532_MAX_LED_STRINGS) {
dev_err(&priv->client->dev, "To many LED string defined\n");
continue;
@@ -590,7 +659,13 @@ static int lm3532_parse_node(struct lm3532_data *priv)
goto child_out;
}
- lm3532_init_registers(led);
+ ret = lm3532_init_registers(led);
+ if (ret) {
+ dev_err(&priv->client->dev, "register init err: %d\n",
+ ret);
+ fwnode_handle_put(child);
+ goto child_out;
+ }
i++;
}
@@ -637,9 +712,6 @@ static int lm3532_probe(struct i2c_client *client,
return ret;
}
- if (drvdata->enable_gpio)
- gpiod_direction_output(drvdata->enable_gpio, 1);
-
return ret;
}
diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c
index 081aa71e43a3..b02972f1a341 100644
--- a/drivers/leds/leds-lm3601x.c
+++ b/drivers/leds/leds-lm3601x.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <uapi/linux/uleds.h>
#define LM3601X_LED_IR 0x0
#define LM3601X_LED_TORCH 0x1
@@ -90,8 +89,6 @@ struct lm3601x_led {
struct regmap *regmap;
struct mutex lock;
- char led_name[LED_MAX_NAME_SIZE];
-
unsigned int flash_timeout;
unsigned int last_flag;
@@ -322,10 +319,12 @@ static const struct led_flash_ops flash_ops = {
.fault_get = lm3601x_flash_fault_get,
};
-static int lm3601x_register_leds(struct lm3601x_led *led)
+static int lm3601x_register_leds(struct lm3601x_led *led,
+ struct fwnode_handle *fwnode)
{
struct led_classdev *led_cdev;
struct led_flash_setting *setting;
+ struct led_init_data init_data = {};
led->fled_cdev.ops = &flash_ops;
@@ -342,20 +341,25 @@ static int lm3601x_register_leds(struct lm3601x_led *led)
setting->val = led->flash_current_max;
led_cdev = &led->fled_cdev.led_cdev;
- led_cdev->name = led->led_name;
led_cdev->brightness_set_blocking = lm3601x_brightness_set;
led_cdev->max_brightness = DIV_ROUND_UP(led->torch_current_max,
LM3601X_TORCH_REG_DIV);
led_cdev->flags |= LED_DEV_CAP_FLASH;
- return led_classdev_flash_register(&led->client->dev, &led->fled_cdev);
+ init_data.fwnode = fwnode;
+ init_data.devicename = led->client->name;
+ init_data.default_label = (led->led_mode == LM3601X_LED_TORCH) ?
+ "torch" : "infrared";
+
+ return led_classdev_flash_register_ext(&led->client->dev,
+ &led->fled_cdev, &init_data);
}
-static int lm3601x_parse_node(struct lm3601x_led *led)
+static int lm3601x_parse_node(struct lm3601x_led *led,
+ struct fwnode_handle **fwnode)
{
struct fwnode_handle *child = NULL;
int ret = -ENODEV;
- const char *name;
child = device_get_next_child_node(&led->client->dev, child);
if (!child) {
@@ -376,17 +380,6 @@ static int lm3601x_parse_node(struct lm3601x_led *led)
goto out_err;
}
- ret = fwnode_property_read_string(child, "label", &name);
- if (ret) {
- if (led->led_mode == LM3601X_LED_TORCH)
- name = "torch";
- else
- name = "infrared";
- }
-
- snprintf(led->led_name, sizeof(led->led_name),
- "%s:%s", led->client->name, name);
-
ret = fwnode_property_read_u32(child, "led-max-microamp",
&led->torch_current_max);
if (ret) {
@@ -411,6 +404,8 @@ static int lm3601x_parse_node(struct lm3601x_led *led)
goto out_err;
}
+ *fwnode = child;
+
out_err:
fwnode_handle_put(child);
return ret;
@@ -419,6 +414,7 @@ out_err:
static int lm3601x_probe(struct i2c_client *client)
{
struct lm3601x_led *led;
+ struct fwnode_handle *fwnode;
int ret;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
@@ -428,7 +424,7 @@ static int lm3601x_probe(struct i2c_client *client)
led->client = client;
i2c_set_clientdata(client, led);
- ret = lm3601x_parse_node(led);
+ ret = lm3601x_parse_node(led, &fwnode);
if (ret)
return -ENODEV;
@@ -442,7 +438,7 @@ static int lm3601x_probe(struct i2c_client *client)
mutex_init(&led->lock);
- return lm3601x_register_leds(led);
+ return lm3601x_register_leds(led, fwnode);
}
static int lm3601x_remove(struct i2c_client *client)
diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
index ed9dc857ec8f..836b60c9a2b8 100644
--- a/drivers/leds/leds-lm36274.c
+++ b/drivers/leds/leds-lm36274.c
@@ -90,9 +90,7 @@ static int lm36274_parse_dt(struct lm36274 *lm36274_data)
snprintf(label, sizeof(label),
"%s:%s", lm36274_data->pdev->name, name);
- lm36274_data->num_leds = fwnode_property_read_u32_array(child,
- "led-sources",
- NULL, 0);
+ lm36274_data->num_leds = fwnode_property_count_u32(child, "led-sources");
if (lm36274_data->num_leds <= 0)
return -ENODEV;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 4f413a7c5f05..3d381f2f73d0 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <uapi/linux/uleds.h>
#define LM36922_MODEL 0
#define LM36923_MODEL 1
@@ -103,7 +102,6 @@
* @regmap - Devices register map
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
- * @label - LED label
* @led_enable - LED sync to be enabled
* @model_id - Current device model ID enumerated
*/
@@ -114,7 +112,6 @@ struct lm3692x_led {
struct regmap *regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- char label[LED_MAX_NAME_SIZE];
int led_enable;
int model_id;
};
@@ -325,7 +322,7 @@ out:
static int lm3692x_probe_dt(struct lm3692x_led *led)
{
struct fwnode_handle *child = NULL;
- const char *name;
+ struct led_init_data init_data = {};
int ret;
led->enable_gpio = devm_gpiod_get_optional(&led->client->dev,
@@ -350,30 +347,23 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
fwnode_property_read_string(child, "linux,default-trigger",
&led->led_dev.default_trigger);
- ret = fwnode_property_read_string(child, "label", &name);
- if (ret)
- snprintf(led->label, sizeof(led->label),
- "%s::", led->client->name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s:%s", led->client->name, name);
-
ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
if (ret) {
dev_err(&led->client->dev, "reg DT property missing\n");
return ret;
}
- led->led_dev.name = led->label;
+ init_data.fwnode = child;
+ init_data.devicename = led->client->name;
+ init_data.default_label = ":";
- ret = devm_led_classdev_register(&led->client->dev, &led->led_dev);
+ ret = devm_led_classdev_register_ext(&led->client->dev, &led->led_dev,
+ &init_data);
if (ret) {
dev_err(&led->client->dev, "led register err: %d\n", ret);
return ret;
}
- led->led_dev.dev->of_node = to_of_node(child);
-
return 0;
}
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 54e0e35df824..b71711aff8a3 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -244,10 +244,7 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led->lmu_data.lsb_brightness_reg = LM3697_CTRL_A_BRT_LSB +
led->control_bank * 2;
- led->num_leds = fwnode_property_read_u32_array(child,
- "led-sources",
- NULL, 0);
-
+ led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3697_MAX_LED_STRINGS) {
dev_err(&priv->client->dev, "To many LED strings defined\n");
continue;
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 37632fc63741..edb57c42e8b1 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -260,7 +260,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
{
const struct firmware *fw = chip->fw;
- if (fw->size > LP5562_PROGRAM_LENGTH) {
+ /*
+ * the firmware is encoded in ascii hex character, with 2 chars
+ * per byte
+ */
+ if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
fw->size);
return;
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index ed680d0c15b0..ac2f5d6272dc 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -18,7 +18,6 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
-#include <uapi/linux/uleds.h>
#define LP8860_DISP_CL1_BRT_MSB 0x00
#define LP8860_DISP_CL1_BRT_LSB 0x01
@@ -83,6 +82,8 @@
#define LP8860_CLEAR_FAULTS 0x01
+#define LP8860_NAME "lp8860"
+
/**
* struct lp8860_led -
* @lock - Lock for reading/writing the device
@@ -92,7 +93,6 @@
* @eeprom_regmap - EEPROM register map
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
- * @label - LED label
*/
struct lp8860_led {
struct mutex lock;
@@ -102,7 +102,6 @@ struct lp8860_led {
struct regmap *eeprom_regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- char label[LED_MAX_NAME_SIZE];
};
struct lp8860_eeprom_reg {
@@ -383,25 +382,19 @@ static int lp8860_probe(struct i2c_client *client,
struct lp8860_led *led;
struct device_node *np = client->dev.of_node;
struct device_node *child_node;
- const char *name;
+ struct led_init_data init_data = {};
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- for_each_available_child_of_node(np, child_node) {
- led->led_dev.default_trigger = of_get_property(child_node,
- "linux,default-trigger",
- NULL);
-
- ret = of_property_read_string(child_node, "label", &name);
- if (!ret)
- snprintf(led->label, sizeof(led->label), "%s:%s",
- id->name, name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s::display_cluster", id->name);
- }
+ child_node = of_get_next_available_child(np, NULL);
+ if (!child_node)
+ return -EINVAL;
+
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
"enable", GPIOD_OUT_LOW);
@@ -416,7 +409,6 @@ static int lp8860_probe(struct i2c_client *client,
led->regulator = NULL;
led->client = client;
- led->led_dev.name = led->label;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
mutex_init(&led->lock);
@@ -443,7 +435,12 @@ static int lp8860_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = devm_led_classdev_register(&client->dev, &led->led_dev);
+ init_data.fwnode = of_fwnode_handle(child_node);
+ init_data.devicename = LP8860_NAME;
+ init_data.default_label = ":display_cluster";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_dev,
+ &init_data);
if (ret) {
dev_err(&client->dev, "led register err: %d\n", ret);
return ret;
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 83e8e58d81cb..c94995f0daa2 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -10,10 +10,10 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <uapi/linux/uleds.h>
+
+#define LED_LT3593_NAME "lt3593"
struct lt3593_led_data {
- char name[LED_MAX_NAME_SIZE];
struct led_classdev cdev;
struct gpio_desc *gpiod;
};
@@ -66,6 +66,7 @@ static int lt3593_led_probe(struct platform_device *pdev)
struct lt3593_led_data *led_data;
struct fwnode_handle *child;
int ret, state = LEDS_GPIO_DEFSTATE_OFF;
+ struct led_init_data init_data = {};
const char *tmp;
if (!dev->of_node)
@@ -86,14 +87,6 @@ static int lt3593_led_probe(struct platform_device *pdev)
child = device_get_next_child_node(dev, NULL);
- ret = fwnode_property_read_string(child, "label", &tmp);
- if (ret < 0)
- snprintf(led_data->name, sizeof(led_data->name),
- "lt3593::");
- else
- snprintf(led_data->name, sizeof(led_data->name),
- "lt3593:%s", tmp);
-
fwnode_property_read_string(child, "linux,default-trigger",
&led_data->cdev.default_trigger);
@@ -102,11 +95,14 @@ static int lt3593_led_probe(struct platform_device *pdev)
state = LEDS_GPIO_DEFSTATE_ON;
}
- led_data->cdev.name = led_data->name;
led_data->cdev.brightness_set_blocking = lt3593_led_set;
led_data->cdev.brightness = state ? LED_FULL : LED_OFF;
- ret = devm_led_classdev_register(dev, &led_data->cdev);
+ init_data.fwnode = child;
+ init_data.devicename = LED_LT3593_NAME;
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
if (ret < 0) {
fwnode_handle_put(child);
return ret;
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index 8a8e5c65b157..4c2d0b3c6dad 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -62,7 +62,7 @@ static int max77650_led_brightness_set(struct led_classdev *cdev,
static int max77650_led_probe(struct platform_device *pdev)
{
- struct device_node *of_node, *child;
+ struct fwnode_handle *child;
struct max77650_led *leds, *led;
struct device *dev;
struct regmap *map;
@@ -71,10 +71,6 @@ static int max77650_led_probe(struct platform_device *pdev)
u32 reg;
dev = &pdev->dev;
- of_node = dev->of_node;
-
- if (!of_node)
- return -ENODEV;
leds = devm_kcalloc(dev, sizeof(*leds),
MAX77650_LED_NUM_LEDS, GFP_KERNEL);
@@ -85,14 +81,16 @@ static int max77650_led_probe(struct platform_device *pdev)
if (!map)
return -ENODEV;
- num_leds = of_get_child_count(of_node);
+ num_leds = device_get_child_node_count(dev);
if (!num_leds || num_leds > MAX77650_LED_NUM_LEDS)
return -ENODEV;
- for_each_child_of_node(of_node, child) {
- rv = of_property_read_u32(child, "reg", &reg);
- if (rv || reg >= MAX77650_LED_NUM_LEDS)
- return -EINVAL;
+ device_for_each_child_node(dev, child) {
+ rv = fwnode_property_read_u32(child, "reg", &reg);
+ if (rv || reg >= MAX77650_LED_NUM_LEDS) {
+ rv = -EINVAL;
+ goto err_node_put;
+ }
led = &leds[reg];
led->map = map;
@@ -101,35 +99,40 @@ static int max77650_led_probe(struct platform_device *pdev)
led->cdev.brightness_set_blocking = max77650_led_brightness_set;
led->cdev.max_brightness = MAX77650_LED_MAX_BRIGHTNESS;
- label = of_get_property(child, "label", NULL);
- if (!label) {
+ rv = fwnode_property_read_string(child, "label", &label);
+ if (rv) {
led->cdev.name = "max77650::";
} else {
led->cdev.name = devm_kasprintf(dev, GFP_KERNEL,
"max77650:%s", label);
- if (!led->cdev.name)
- return -ENOMEM;
+ if (!led->cdev.name) {
+ rv = -ENOMEM;
+ goto err_node_put;
+ }
}
- of_property_read_string(child, "linux,default-trigger",
- &led->cdev.default_trigger);
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
- rv = devm_of_led_classdev_register(dev, child, &led->cdev);
+ rv = devm_led_classdev_register(dev, &led->cdev);
if (rv)
- return rv;
+ goto err_node_put;
rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT);
if (rv)
- return rv;
+ goto err_node_put;
rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT);
if (rv)
- return rv;
+ goto err_node_put;
}
return regmap_write(map,
MAX77650_REG_CNFG_LED_TOP,
MAX77650_LED_TOP_DEFAULT);
+err_node_put:
+ fwnode_handle_put(child);
+ return rv;
}
static struct platform_driver max77650_led_driver = {
@@ -143,3 +146,4 @@ module_platform_driver(max77650_led_driver);
MODULE_DESCRIPTION("MAXIM 77650/77651 LED driver");
MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:max77650-led");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 10497a466775..14ef4ccdda3a 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -15,7 +15,48 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/leds.h>
-#include <linux/platform_data/leds-kirkwood-netxbig.h>
+
+struct netxbig_gpio_ext {
+ unsigned int *addr;
+ int num_addr;
+ unsigned int *data;
+ int num_data;
+ unsigned int enable;
+};
+
+enum netxbig_led_mode {
+ NETXBIG_LED_OFF,
+ NETXBIG_LED_ON,
+ NETXBIG_LED_SATA,
+ NETXBIG_LED_TIMER1,
+ NETXBIG_LED_TIMER2,
+ NETXBIG_LED_MODE_NUM,
+};
+
+#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM
+
+struct netxbig_led_timer {
+ unsigned long delay_on;
+ unsigned long delay_off;
+ enum netxbig_led_mode mode;
+};
+
+struct netxbig_led {
+ const char *name;
+ const char *default_trigger;
+ int mode_addr;
+ int *mode_val;
+ int bright_addr;
+ int bright_max;
+};
+
+struct netxbig_led_platform_data {
+ struct netxbig_gpio_ext *gpio_ext;
+ struct netxbig_led_timer *timer;
+ int num_timer;
+ struct netxbig_led *leds;
+ int num_leds;
+};
/*
* GPIO extension bus.
@@ -306,7 +347,6 @@ static int create_netxbig_led(struct platform_device *pdev,
return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
}
-#ifdef CONFIG_OF_GPIO
static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
struct netxbig_gpio_ext *gpio_ext)
{
@@ -388,12 +428,14 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
- if (!gpio_ext)
+ if (!gpio_ext) {
+ of_node_put(gpio_ext_np);
return -ENOMEM;
+ }
ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
+ of_node_put(gpio_ext_np);
if (ret)
return ret;
- of_node_put(gpio_ext_np);
pdata->gpio_ext = gpio_ext;
/* Timers (optional) */
@@ -522,30 +564,20 @@ static const struct of_device_id of_netxbig_leds_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_netxbig_leds_match);
-#else
-static inline int
-netxbig_leds_get_of_pdata(struct device *dev,
- struct netxbig_led_platform_data *pdata)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_OF_GPIO */
static int netxbig_led_probe(struct platform_device *pdev)
{
- struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct netxbig_led_platform_data *pdata;
struct netxbig_led_data *leds_data;
int i;
int ret;
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
- ret = netxbig_leds_get_of_pdata(&pdev->dev, pdata);
- if (ret)
- return ret;
- }
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ ret = netxbig_leds_get_of_pdata(&pdev->dev, pdata);
+ if (ret)
+ return ret;
leds_data = devm_kcalloc(&pdev->dev,
pdata->num_leds, sizeof(*leds_data),
@@ -571,7 +603,7 @@ static struct platform_driver netxbig_led_driver = {
.probe = netxbig_led_probe,
.driver = {
.name = "leds-netxbig",
- .of_match_table = of_match_ptr(of_netxbig_leds_match),
+ .of_match_table = of_netxbig_leds_match,
},
};
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index f92e2c07c1c6..7c500dfdcfa3 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -245,7 +245,7 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
struct device_node *np = dev->of_node;
struct device_node *child;
struct ns2_led *led, *leds;
- int num_leds = 0;
+ int ret, num_leds = 0;
num_leds = of_get_child_count(np);
if (!num_leds)
@@ -259,16 +259,16 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
led = leds;
for_each_child_of_node(np, child) {
const char *string;
- int ret, i, num_modes;
+ int i, num_modes;
struct ns2_led_modval *modval;
ret = of_get_named_gpio(child, "cmd-gpio", 0);
if (ret < 0)
- return ret;
+ goto err_node_put;
led->cmd = ret;
ret = of_get_named_gpio(child, "slow-gpio", 0);
if (ret < 0)
- return ret;
+ goto err_node_put;
led->slow = ret;
ret = of_property_read_string(child, "label", &string);
led->name = (ret == 0) ? string : child->name;
@@ -281,7 +281,8 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
if (ret < 0 || ret % 3) {
dev_err(dev,
"Missing or malformed modes-map property\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
num_modes = ret / 3;
@@ -289,8 +290,10 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
num_modes,
sizeof(struct ns2_led_modval),
GFP_KERNEL);
- if (!modval)
- return -ENOMEM;
+ if (!modval) {
+ ret = -ENOMEM;
+ goto err_node_put;
+ }
for (i = 0; i < num_modes; i++) {
of_property_read_u32_index(child,
@@ -314,6 +317,10 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
pdata->num_leds = num_leds;
return 0;
+
+err_node_put:
+ of_node_put(child);
+ return ret;
}
static const struct of_device_id of_ns2_leds_match[] = {
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 290871072d65..c7c7199e8ebd 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -16,7 +16,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/leds-pca9532.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 48d068f80f11..8b6965a563e9 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -65,12 +65,6 @@ static int led_pwm_set(struct led_classdev *led_cdev,
return 0;
}
-static inline size_t sizeof_pwm_leds_priv(int num_leds)
-{
- return sizeof(struct led_pwm_priv) +
- (sizeof(struct led_pwm_data) * num_leds);
-}
-
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
@@ -111,8 +105,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
if (!led_data->period && (led->pwm_period_ns > 0))
led_data->period = led->pwm_period_ns;
- ret = devm_of_led_classdev_register(dev, to_of_node(fwnode),
- &led_data->cdev);
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {
priv->num_leds++;
led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
@@ -175,7 +168,7 @@ static int led_pwm_probe(struct platform_device *pdev)
if (!count)
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count),
+ priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c
index fecf27fb1cdc..0ede87420bfc 100644
--- a/drivers/leds/leds-sc27xx-bltc.c
+++ b/drivers/leds/leds-sc27xx-bltc.c
@@ -6,7 +6,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <uapi/linux/uleds.h>
/* PMIC global control register definition */
#define SC27XX_MODULE_EN0 0xc08
@@ -46,7 +45,7 @@
#define SC27XX_DELTA_T_MAX (SC27XX_LEDS_STEP * 255)
struct sc27xx_led {
- char name[LED_MAX_NAME_SIZE];
+ struct fwnode_handle *fwnode;
struct led_classdev ldev;
struct sc27xx_led_priv *priv;
u8 line;
@@ -249,19 +248,24 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
for (i = 0; i < SC27XX_LEDS_MAX; i++) {
struct sc27xx_led *led = &priv->leds[i];
+ struct led_init_data init_data = {};
if (!led->active)
continue;
led->line = i;
led->priv = priv;
- led->ldev.name = led->name;
led->ldev.brightness_set_blocking = sc27xx_led_set;
led->ldev.pattern_set = sc27xx_led_pattern_set;
led->ldev.pattern_clear = sc27xx_led_pattern_clear;
led->ldev.default_trigger = "pattern";
- err = devm_led_classdev_register(dev, &led->ldev);
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "sc27xx";
+ init_data.default_label = ":";
+
+ err = devm_led_classdev_register_ext(dev, &led->ldev,
+ &init_data);
if (err)
return err;
}
@@ -274,7 +278,6 @@ static int sc27xx_led_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node, *child;
struct sc27xx_led_priv *priv;
- const char *str;
u32 base, count, reg;
int err;
@@ -316,15 +319,8 @@ static int sc27xx_led_probe(struct platform_device *pdev)
return -EINVAL;
}
+ priv->leds[reg].fwnode = of_fwnode_handle(child);
priv->leds[reg].active = true;
-
- err = of_property_read_string(child, "label", &str);
- if (err)
- snprintf(priv->leds[reg].name, LED_MAX_NAME_SIZE,
- "sc27xx::");
- else
- snprintf(priv->leds[reg].name, LED_MAX_NAME_SIZE,
- "sc27xx:%s", str);
}
err = sc27xx_led_register(dev, priv);
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index e35dff0050f0..b58f3cafe16f 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -115,7 +115,7 @@ static int syscon_led_probe(struct platform_device *pdev)
}
sled->cdev.brightness_set = syscon_led_set;
- ret = led_classdev_register(dev, &sled->cdev);
+ ret = devm_led_classdev_register(dev, &sled->cdev);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-ti-lmu-common.c b/drivers/leds/leds-ti-lmu-common.c
index adc7293004f1..d7f10ad721ba 100644
--- a/drivers/leds/leds-ti-lmu-common.c
+++ b/drivers/leds/leds-ti-lmu-common.c
@@ -11,10 +11,10 @@
#include <linux/leds-ti-lmu-common.h>
-const static int ramp_table[16] = {2048, 262000, 524000, 1049000, 2090000,
- 4194000, 8389000, 16780000, 33550000, 41940000,
- 50330000, 58720000, 67110000, 83880000,
- 100660000, 117440000};
+static const unsigned int ramp_table[16] = {2048, 262000, 524000, 1049000,
+ 2090000, 4194000, 8389000, 16780000, 33550000,
+ 41940000, 50330000, 58720000, 67110000,
+ 83880000, 100660000, 117440000};
static int ti_lmu_common_update_brightness(struct ti_lmu_bank *lmu_bank,
int brightness)
@@ -54,7 +54,7 @@ int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness)
}
EXPORT_SYMBOL(ti_lmu_common_set_brightness);
-static int ti_lmu_common_convert_ramp_to_index(unsigned int usec)
+static unsigned int ti_lmu_common_convert_ramp_to_index(unsigned int usec)
{
int size = ARRAY_SIZE(ramp_table);
int i;
@@ -78,7 +78,7 @@ static int ti_lmu_common_convert_ramp_to_index(unsigned int usec)
}
}
- return -EINVAL;
+ return 0;
}
int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank)
@@ -94,9 +94,6 @@ int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank)
ramp_down = ti_lmu_common_convert_ramp_to_index(lmu_bank->ramp_down_usec);
}
- if (ramp_up < 0 || ramp_down < 0)
- return -EINVAL;
-
ramp = (ramp_up << 4) | ramp_down;
return regmap_write(regmap, lmu_bank->runtime_ramp_reg, ramp);
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 47b229469069..0b577cece8f7 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -27,5 +27,6 @@ void led_set_brightness_nosleep(struct led_classdev *led_cdev,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
extern struct list_head trigger_list;
+extern const char * const led_colors[LED_COLOR_ID_MAX];
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 33cc99a1a16a..dc64679b1a92 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -131,10 +131,10 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
if (gpio_data->gpio == gpio)
return n;
- if (!gpio) {
- if (gpio_data->gpio != 0)
+ if (!gpio_is_valid(gpio)) {
+ if (gpio_is_valid(gpio_data->gpio))
free_irq(gpio_to_irq(gpio_data->gpio), led);
- gpio_data->gpio = 0;
+ gpio_data->gpio = gpio;
return n;
}
@@ -144,7 +144,7 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
if (ret) {
dev_err(dev, "request_irq failed with error %d\n", ret);
} else {
- if (gpio_data->gpio != 0)
+ if (gpio_is_valid(gpio_data->gpio))
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = gpio;
/* After changing the GPIO, we need to update the LED. */
@@ -172,6 +172,8 @@ static int gpio_trig_activate(struct led_classdev *led)
return -ENOMEM;
gpio_data->led = led;
+ gpio_data->gpio = -ENOENT;
+
led_set_trigger_data(led, gpio_data);
return 0;
@@ -181,7 +183,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led_get_trigger_data(led);
- if (gpio_data->gpio != 0)
+ if (gpio_is_valid(gpio_data->gpio))
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
}
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index a600934fdd9c..7543e395a2c6 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -4,6 +4,8 @@
* Initial release: Matias Bjorling <m@bjorling.me>
*/
+#define pr_fmt(fmt) "nvm: " fmt
+
#include <linux/list.h>
#include <linux/types.h>
#include <linux/sem.h>
@@ -74,7 +76,7 @@ static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
for (i = lun_begin; i <= lun_end; i++) {
if (test_and_set_bit(i, dev->lun_map)) {
- pr_err("nvm: lun %d already allocated\n", i);
+ pr_err("lun %d already allocated\n", i);
goto err;
}
}
@@ -264,7 +266,7 @@ static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
int lun_end)
{
if (lun_begin > lun_end || lun_end >= geo->all_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ pr_err("lun out of bound (%u:%u > %u)\n",
lun_begin, lun_end, geo->all_luns - 1);
return -EINVAL;
}
@@ -297,7 +299,7 @@ static int __nvm_config_extended(struct nvm_dev *dev,
if (e->op == 0xFFFF) {
e->op = NVM_TARGET_DEFAULT_OP;
} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
- pr_err("nvm: invalid over provisioning value\n");
+ pr_err("invalid over provisioning value\n");
return -EINVAL;
}
@@ -334,23 +336,23 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
e = create->conf.e;
break;
default:
- pr_err("nvm: config type not valid\n");
+ pr_err("config type not valid\n");
return -EINVAL;
}
tt = nvm_find_target_type(create->tgttype);
if (!tt) {
- pr_err("nvm: target type %s not found\n", create->tgttype);
+ pr_err("target type %s not found\n", create->tgttype);
return -EINVAL;
}
if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
- pr_err("nvm: device is incompatible with target L2P type.\n");
+ pr_err("device is incompatible with target L2P type.\n");
return -EINVAL;
}
if (nvm_target_exists(create->tgtname)) {
- pr_err("nvm: target name already exists (%s)\n",
+ pr_err("target name already exists (%s)\n",
create->tgtname);
return -EINVAL;
}
@@ -367,7 +369,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
- pr_err("nvm: could not create target device\n");
+ pr_err("could not create target device\n");
ret = -ENOMEM;
goto err_t;
}
@@ -493,8 +495,11 @@ static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
}
up_read(&nvm_lock);
- if (!t)
+ if (!t) {
+ pr_err("failed to remove target %s\n",
+ remove->tgtname);
return 1;
+ }
__nvm_remove_target(t, true);
kref_put(&dev->ref, nvm_free);
@@ -686,7 +691,7 @@ static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
rqd->nr_ppas = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
- pr_err("nvm: failed to allocate dma memory\n");
+ pr_err("failed to allocate dma memory\n");
return -ENOMEM;
}
@@ -731,7 +736,7 @@ static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
return flags;
}
-int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
{
struct nvm_dev *dev = tgt_dev->parent;
int ret;
@@ -745,19 +750,45 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */
- ret = dev->ops->submit_io(dev, rqd);
+ ret = dev->ops->submit_io(dev, rqd, buf);
if (ret)
nvm_rq_dev_to_tgt(tgt_dev, rqd);
return ret;
}
EXPORT_SYMBOL(nvm_submit_io);
-int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+static void nvm_sync_end_io(struct nvm_rq *rqd)
+{
+ struct completion *waiting = rqd->private;
+
+ complete(waiting);
+}
+
+static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
+ void *buf)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int ret = 0;
+
+ rqd->end_io = nvm_sync_end_io;
+ rqd->private = &wait;
+
+ ret = dev->ops->submit_io(dev, rqd, buf);
+ if (ret)
+ return ret;
+
+ wait_for_completion_io(&wait);
+
+ return 0;
+}
+
+int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ void *buf)
{
struct nvm_dev *dev = tgt_dev->parent;
int ret;
- if (!dev->ops->submit_io_sync)
+ if (!dev->ops->submit_io)
return -ENODEV;
nvm_rq_tgt_to_dev(tgt_dev, rqd);
@@ -765,9 +796,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
rqd->dev = tgt_dev;
rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
- /* In case of error, fail with right address format */
- ret = dev->ops->submit_io_sync(dev, rqd);
- nvm_rq_dev_to_tgt(tgt_dev, rqd);
+ ret = nvm_submit_io_wait(dev, rqd, buf);
return ret;
}
@@ -788,12 +817,13 @@ EXPORT_SYMBOL(nvm_end_io);
static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
{
- if (!dev->ops->submit_io_sync)
+ if (!dev->ops->submit_io)
return -ENODEV;
+ rqd->dev = NULL;
rqd->flags = nvm_set_flags(&dev->geo, rqd);
- return dev->ops->submit_io_sync(dev, rqd);
+ return nvm_submit_io_wait(dev, rqd, NULL);
}
static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
@@ -1048,7 +1078,7 @@ int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
return 0;
if (nr_ppas > NVM_MAX_VLBA) {
- pr_err("nvm: unable to update all blocks atomically\n");
+ pr_err("unable to update all blocks atomically\n");
return -EINVAL;
}
@@ -1111,27 +1141,26 @@ static int nvm_init(struct nvm_dev *dev)
int ret = -EINVAL;
if (dev->ops->identity(dev)) {
- pr_err("nvm: device could not be identified\n");
+ pr_err("device could not be identified\n");
goto err;
}
- pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
- geo->major_ver_id, geo->minor_ver_id,
- geo->vmnt);
+ pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
+ geo->minor_ver_id, geo->vmnt);
ret = nvm_core_init(dev);
if (ret) {
- pr_err("nvm: could not initialize core structures.\n");
+ pr_err("could not initialize core structures.\n");
goto err;
}
- pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
+ pr_info("registered %s [%u/%u/%u/%u/%u]\n",
dev->name, dev->geo.ws_min, dev->geo.ws_opt,
dev->geo.num_chk, dev->geo.all_luns,
dev->geo.num_ch);
return 0;
err:
- pr_err("nvm: failed to initialize nvm\n");
+ pr_err("failed to initialize nvm\n");
return ret;
}
@@ -1169,7 +1198,7 @@ int nvm_register(struct nvm_dev *dev)
dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
exp_pool_size);
if (!dev->dma_pool) {
- pr_err("nvm: could not create dma pool\n");
+ pr_err("could not create dma pool\n");
kref_put(&dev->ref, nvm_free);
return -ENOMEM;
}
@@ -1214,7 +1243,7 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
up_write(&nvm_lock);
if (!dev) {
- pr_err("nvm: device not found\n");
+ pr_err("device not found\n");
return -EINVAL;
}
@@ -1288,7 +1317,7 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
i++;
if (i > 31) {
- pr_err("nvm: max 31 devices can be reported.\n");
+ pr_err("max 31 devices can be reported.\n");
break;
}
}
@@ -1315,7 +1344,7 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
create.conf.e.rsv != 0) {
- pr_err("nvm: reserved config field in use\n");
+ pr_err("reserved config field in use\n");
return -EINVAL;
}
@@ -1331,7 +1360,7 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
flags &= ~NVM_TARGET_FACTORY;
if (flags) {
- pr_err("nvm: flag not supported\n");
+ pr_err("flag not supported\n");
return -EINVAL;
}
}
@@ -1349,7 +1378,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
remove.tgtname[DISK_NAME_LEN - 1] = '\0';
if (remove.flags != 0) {
- pr_err("nvm: no flags supported\n");
+ pr_err("no flags supported\n");
return -EINVAL;
}
@@ -1365,7 +1394,7 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
return -EFAULT;
if (init.flags != 0) {
- pr_err("nvm: no flags supported\n");
+ pr_err("no flags supported\n");
return -EINVAL;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index f546e6f28b8a..b413bafe93fd 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -507,7 +507,7 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
pblk->sec_per_write = sec_per_write;
}
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
+int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -518,7 +518,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
#endif
- return nvm_submit_io(dev, rqd);
+ return nvm_submit_io(dev, rqd, buf);
}
void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
@@ -541,7 +541,7 @@ void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
}
}
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
{
struct nvm_tgt_dev *dev = pblk->dev;
int ret;
@@ -553,7 +553,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
#endif
- ret = nvm_submit_io_sync(dev, rqd);
+ ret = nvm_submit_io_sync(dev, rqd, buf);
if (trace_pblk_chunk_state_enabled() && !ret &&
rqd->opcode == NVM_OP_PWRITE)
@@ -562,65 +562,19 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
return ret;
}
-int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
+static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
+ void *buf)
{
struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int ret;
pblk_down_chunk(pblk, ppa_list[0]);
- ret = pblk_submit_io_sync(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd, buf);
pblk_up_chunk(pblk, ppa_list[0]);
return ret;
}
-static void pblk_bio_map_addr_endio(struct bio *bio)
-{
- bio_put(bio);
-}
-
-struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
- unsigned int nr_secs, unsigned int len,
- int alloc_type, gfp_t gfp_mask)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- void *kaddr = data;
- struct page *page;
- struct bio *bio;
- int i, ret;
-
- if (alloc_type == PBLK_KMALLOC_META)
- return bio_map_kern(dev->q, kaddr, len, gfp_mask);
-
- bio = bio_kmalloc(gfp_mask, nr_secs);
- if (!bio)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < nr_secs; i++) {
- page = vmalloc_to_page(kaddr);
- if (!page) {
- pblk_err(pblk, "could not map vmalloc bio\n");
- bio_put(bio);
- bio = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
- pblk_err(pblk, "could not add page to bio\n");
- bio_put(bio);
- bio = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- kaddr += PAGE_SIZE;
- }
-
- bio->bi_end_io = pblk_bio_map_addr_endio;
-out:
- return bio;
-}
-
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
unsigned long secs_to_flush, bool skip_meta)
{
@@ -722,9 +676,7 @@ u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
- struct bio *bio;
struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = pblk_line_smeta_start(pblk, line);
@@ -736,16 +688,6 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
if (ret)
return ret;
- bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- goto clear_rqd;
- }
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
- rqd.bio = bio;
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
@@ -754,10 +696,9 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
for (i = 0; i < lm->smeta_sec; i++, paddr++)
ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
- ret = pblk_submit_io_sync(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
if (ret) {
pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
- bio_put(bio);
goto clear_rqd;
}
@@ -776,9 +717,7 @@ clear_rqd:
static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
u64 paddr)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
- struct bio *bio;
struct ppa_addr *ppa_list;
struct nvm_rq rqd;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
@@ -791,16 +730,6 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
if (ret)
return ret;
- bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- goto clear_rqd;
- }
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
- rqd.bio = bio;
rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
@@ -814,10 +743,9 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
meta->lba = lba_list[paddr] = addr_empty;
}
- ret = pblk_submit_io_sync_sem(pblk, &rqd);
+ ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
if (ret) {
pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
- bio_put(bio);
goto clear_rqd;
}
@@ -838,10 +766,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
void *ppa_list_buf, *meta_list;
- struct bio *bio;
struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = line->emeta_ssec;
@@ -867,17 +793,6 @@ next_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
rq_len = rq_ppas * geo->csecs;
- bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- goto free_rqd_dma;
- }
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
- rqd.bio = bio;
rqd.meta_list = meta_list;
rqd.ppa_list = ppa_list_buf;
rqd.dma_meta_list = dma_meta_list;
@@ -896,7 +811,6 @@ next_rq:
while (test_bit(pos, line->blk_bitmap)) {
paddr += min;
if (pblk_boundary_paddr_checks(pblk, paddr)) {
- bio_put(bio);
ret = -EINTR;
goto free_rqd_dma;
}
@@ -906,7 +820,6 @@ next_rq:
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- bio_put(bio);
ret = -EINTR;
goto free_rqd_dma;
}
@@ -915,10 +828,9 @@ next_rq:
ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
}
- ret = pblk_submit_io_sync(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
if (ret) {
pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
- bio_put(bio);
goto free_rqd_dma;
}
@@ -963,7 +875,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
- ret = pblk_submit_io_sync(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd, NULL);
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@@ -1792,7 +1704,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
- err = pblk_submit_io(pblk, rqd);
+ err = pblk_submit_io(pblk, rqd, NULL);
if (err) {
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -1923,13 +1835,11 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_meta *lm = &pblk->lm;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
unsigned int lba_list_size = lm->emeta_len[2];
struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
struct pblk_emeta *emeta = line->emeta;
- w_err_gc->lba_list = pblk_malloc(lba_list_size,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
lba_list_size);
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 63ee205b41c4..2581eebcfc41 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -132,14 +132,12 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
struct pblk_line *line)
{
struct line_emeta *emeta_buf;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
unsigned int lba_list_size = lm->emeta_len[2];
__le64 *lba_list;
int ret;
- emeta_buf = pblk_malloc(lm->emeta_len[0],
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
if (!emeta_buf)
return NULL;
@@ -147,7 +145,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
if (ret) {
pblk_err(pblk, "line %d read emeta failed (%d)\n",
line->id, ret);
- pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ kvfree(emeta_buf);
return NULL;
}
@@ -161,16 +159,16 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
if (ret) {
pblk_err(pblk, "inconsistent emeta (line %d)\n",
line->id);
- pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ kvfree(emeta_buf);
return NULL;
}
- lba_list = pblk_malloc(lba_list_size,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
+
if (lba_list)
memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
- pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+ kvfree(emeta_buf);
return lba_list;
}
@@ -181,7 +179,6 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
ws);
struct pblk *pblk = line_ws->pblk;
struct pblk_line *line = line_ws->line;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -272,7 +269,7 @@ next_rq:
goto next_rq;
out:
- pblk_mfree(lba_list, l_mg->emeta_alloc_type);
+ kvfree(lba_list);
kfree(line_ws);
kfree(invalid_bitmap);
@@ -286,7 +283,7 @@ fail_free_gc_data:
fail_free_gc_rq:
kfree(gc_rq);
fail_free_lba_list:
- pblk_mfree(lba_list, l_mg->emeta_alloc_type);
+ kvfree(lba_list);
fail_free_invalid_bitmap:
kfree(invalid_bitmap);
fail_free_ws:
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index b351c7f002de..9a967a2e83dd 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -543,7 +543,7 @@ static void pblk_line_mg_free(struct pblk *pblk)
for (i = 0; i < PBLK_DATA_LINES; i++) {
kfree(l_mg->sline_meta[i]);
- pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
+ kvfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
@@ -560,7 +560,7 @@ static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
kfree(line->erase_bitmap);
kfree(line->chks);
- pblk_mfree(w_err_gc->lba_list, l_mg->emeta_alloc_type);
+ kvfree(w_err_gc->lba_list);
kfree(w_err_gc);
}
@@ -890,29 +890,14 @@ static int pblk_line_mg_init(struct pblk *pblk)
if (!emeta)
goto fail_free_emeta;
- if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
- l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
-
- emeta->buf = vmalloc(lm->emeta_len[0]);
- if (!emeta->buf) {
- kfree(emeta);
- goto fail_free_emeta;
- }
-
- emeta->nr_entries = lm->emeta_sec[0];
- l_mg->eline_meta[i] = emeta;
- } else {
- l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
-
- emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
- if (!emeta->buf) {
- kfree(emeta);
- goto fail_free_emeta;
- }
-
- emeta->nr_entries = lm->emeta_sec[0];
- l_mg->eline_meta[i] = emeta;
+ emeta->buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
+ if (!emeta->buf) {
+ kfree(emeta);
+ goto fail_free_emeta;
}
+
+ emeta->nr_entries = lm->emeta_sec[0];
+ l_mg->eline_meta[i] = emeta;
}
for (i = 0; i < l_mg->nr_lines; i++)
@@ -926,10 +911,7 @@ static int pblk_line_mg_init(struct pblk *pblk)
fail_free_emeta:
while (--i >= 0) {
- if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
- vfree(l_mg->eline_meta[i]->buf);
- else
- kfree(l_mg->eline_meta[i]->buf);
+ kvfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index d98ea392fe33..8efd14e683dc 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -342,7 +342,7 @@ split_retry:
bio_put(int_bio);
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
goto split_retry;
- } else if (pblk_submit_io(pblk, rqd)) {
+ } else if (pblk_submit_io(pblk, rqd, NULL)) {
/* Submitting IO to drive failed, let's report an error */
rqd->error = -ENODEV;
pblk_end_io_read(rqd);
@@ -417,11 +417,7 @@ out:
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct bio *bio;
struct nvm_rq rqd;
- int data_len;
int ret = NVM_IO_OK;
memset(&rqd, 0, sizeof(struct nvm_rq));
@@ -446,26 +442,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (!(gc_rq->secs_to_gc))
goto out;
- data_len = (gc_rq->secs_to_gc) * geo->csecs;
- bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
- PBLK_VMALLOC_META, GFP_KERNEL);
- if (IS_ERR(bio)) {
- pblk_err(pblk, "could not allocate GC bio (%lu)\n",
- PTR_ERR(bio));
- ret = PTR_ERR(bio);
- goto err_free_dma;
- }
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = gc_rq->secs_to_gc;
- rqd.bio = bio;
- if (pblk_submit_io_sync(pblk, &rqd)) {
+ if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
ret = -EIO;
- goto err_free_bio;
+ goto err_free_dma;
}
pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
@@ -489,8 +471,6 @@ out:
pblk_free_rqd_meta(pblk, &rqd);
return ret;
-err_free_bio:
- bio_put(bio);
err_free_dma:
pblk_free_rqd_meta(pblk, &rqd);
return ret;
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index e6dda04de144..299ef47a17b2 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -178,12 +178,11 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
void *meta_list;
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
- struct bio *bio;
struct ppa_addr *ppa_list;
void *data;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
- int left_line_ppas, rq_ppas, rq_len;
+ int left_line_ppas, rq_ppas;
int i, j;
int ret = 0;
@@ -212,28 +211,15 @@ next_pad_rq:
goto fail_complete;
}
- rq_len = rq_ppas * geo->csecs;
-
- bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
- PBLK_VMALLOC_META, GFP_KERNEL);
- if (IS_ERR(bio)) {
- ret = PTR_ERR(bio);
- goto fail_complete;
- }
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
ret = pblk_alloc_rqd_meta(pblk, rqd);
if (ret) {
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
- bio_put(bio);
goto fail_complete;
}
- rqd->bio = bio;
+ rqd->bio = NULL;
rqd->opcode = NVM_OP_PWRITE;
rqd->is_seq = 1;
rqd->nr_ppas = rq_ppas;
@@ -275,13 +261,12 @@ next_pad_rq:
kref_get(&pad_rq->ref);
pblk_down_chunk(pblk, ppa_list[0]);
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io(pblk, rqd, data);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
pblk_up_chunk(pblk, ppa_list[0]);
kref_put(&pad_rq->ref, pblk_recov_complete);
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
- bio_put(bio);
goto fail_complete;
}
@@ -375,13 +360,12 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
struct ppa_addr *ppa_list;
void *meta_list;
struct nvm_rq *rqd;
- struct bio *bio;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
__le64 *lba_list;
u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
bool padded = false;
- int rq_ppas, rq_len;
+ int rq_ppas;
int i, j;
int ret;
u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
@@ -404,18 +388,9 @@ next_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (!rq_ppas)
rq_ppas = pblk->min_write_pgs;
- rq_len = rq_ppas * geo->csecs;
retry_rq:
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio_get(bio);
-
- rqd->bio = bio;
+ rqd->bio = NULL;
rqd->opcode = NVM_OP_PREAD;
rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas;
@@ -445,10 +420,9 @@ retry_rq:
addr_to_gen_ppa(pblk, paddr + j, line->id);
}
- ret = pblk_submit_io_sync(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd, data);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- bio_put(bio);
return ret;
}
@@ -460,24 +434,20 @@ retry_rq:
if (padded) {
pblk_log_read_err(pblk, rqd);
- bio_put(bio);
return -EINTR;
}
pad_distance = pblk_pad_distance(pblk, line);
ret = pblk_recov_pad_line(pblk, line, pad_distance);
if (ret) {
- bio_put(bio);
return ret;
}
padded = true;
- bio_put(bio);
goto retry_rq;
}
pblk_get_packed_meta(pblk, rqd);
- bio_put(bio);
for (i = 0; i < rqd->nr_ppas; i++) {
struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 4e63f9b5954c..b9a2aeba95ab 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -373,7 +373,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
struct pblk_emeta *emeta = meta_line->emeta;
struct ppa_addr *ppa_list;
struct pblk_g_ctx *m_ctx;
- struct bio *bio;
struct nvm_rq *rqd;
void *data;
u64 paddr;
@@ -391,20 +390,9 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
rq_len = rq_ppas * geo->csecs;
data = ((void *)emeta->buf) + emeta->mem;
- bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
- if (IS_ERR(bio)) {
- pblk_err(pblk, "failed to map emeta io");
- ret = PTR_ERR(bio);
- goto fail_free_rqd;
- }
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- rqd->bio = bio;
-
ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
if (ret)
- goto fail_free_bio;
+ goto fail_free_rqd;
ppa_list = nvm_rq_to_ppa_list(rqd);
for (i = 0; i < rqd->nr_ppas; ) {
@@ -423,7 +411,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
pblk_down_chunk(pblk, ppa_list[0]);
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io(pblk, rqd, data);
if (ret) {
pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
goto fail_rollback;
@@ -437,8 +425,6 @@ fail_rollback:
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
spin_unlock(&l_mg->close_lock);
-fail_free_bio:
- bio_put(bio);
fail_free_rqd:
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
return ret;
@@ -523,7 +509,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
meta_line = pblk_should_submit_meta_io(pblk, rqd);
/* Submit data write for current data line */
- err = pblk_submit_io(pblk, rqd);
+ err = pblk_submit_io(pblk, rqd, NULL);
if (err) {
pblk_err(pblk, "data I/O submission failed: %d\n", err);
return NVM_IO_ERR;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index a67855387f53..86ffa875bfe1 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -482,11 +482,6 @@ struct pblk_line {
#define PBLK_DATA_LINES 4
enum {
- PBLK_KMALLOC_META = 1,
- PBLK_VMALLOC_META = 2,
-};
-
-enum {
PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
@@ -521,9 +516,6 @@ struct pblk_line_mgmt {
__le32 *vsc_list; /* Valid sector counts for all lines */
- /* Metadata allocation type: VMALLOC | KMALLOC */
- int emeta_alloc_type;
-
/* Pre-allocated metadata for data lines */
struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
@@ -783,14 +775,10 @@ struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
struct ppa_addr ppa);
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
-int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
-struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
- unsigned int nr_secs, unsigned int len,
- int alloc_type, gfp_t gfp_mask);
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
@@ -938,21 +926,6 @@ void pblk_rl_werr_line_out(struct pblk_rl *rl);
int pblk_sysfs_init(struct gendisk *tdisk);
void pblk_sysfs_exit(struct gendisk *tdisk);
-static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
-{
- if (type == PBLK_KMALLOC_META)
- return kmalloc(size, flags);
- return vmalloc(size);
-}
-
-static inline void pblk_mfree(void *ptr, int type)
-{
- if (type == PBLK_KMALLOC_META)
- kfree(ptr);
- else
- vfree(ptr);
-}
-
static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
{
return c_ctx - sizeof(struct nvm_rq);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 276065c888bc..23f1f41c8602 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
+ /* fall through */
case SMU_I2C_TRANSFER_STDSUB:
if (cmd->info.sublen > 3)
return -EINVAL;
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 52fd5fca89a0..705c6200814b 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -3,9 +3,11 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <asm/prom.h>
@@ -16,36 +18,24 @@
static int clamped;
static struct wf_control *clamp_control;
-
-static int clamp_notifier_call(struct notifier_block *self,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *p = data;
- unsigned long max_freq;
-
- if (event != CPUFREQ_ADJUST)
- return 0;
-
- max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq);
- cpufreq_verify_within_limits(p, 0, max_freq);
-
- return 0;
-}
-
-static struct notifier_block clamp_notifier = {
- .notifier_call = clamp_notifier_call,
-};
+static struct dev_pm_qos_request qos_req;
+static unsigned int min_freq, max_freq;
static int clamp_set(struct wf_control *ct, s32 value)
{
- if (value)
+ unsigned int freq;
+
+ if (value) {
+ freq = min_freq;
printk(KERN_INFO "windfarm: Clamping CPU frequency to "
"minimum !\n");
- else
+ } else {
+ freq = max_freq;
printk(KERN_INFO "windfarm: CPU frequency unclamped !\n");
+ }
clamped = value;
- cpufreq_update_policy(0);
- return 0;
+
+ return dev_pm_qos_update_request(&qos_req, freq);
}
static int clamp_get(struct wf_control *ct, s32 *value)
@@ -74,27 +64,60 @@ static const struct wf_control_ops clamp_ops = {
static int __init wf_cpufreq_clamp_init(void)
{
+ struct cpufreq_policy *policy;
struct wf_control *clamp;
+ struct device *dev;
+ int ret;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_warn("%s: cpufreq policy not found cpu0\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ min_freq = policy->cpuinfo.min_freq;
+ max_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+
+ dev = get_cpu_device(0);
+ if (unlikely(!dev)) {
+ pr_warn("%s: No cpu device for cpu0\n", __func__);
+ return -ENODEV;
+ }
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
if (clamp == NULL)
return -ENOMEM;
- cpufreq_register_notifier(&clamp_notifier, CPUFREQ_POLICY_NOTIFIER);
+
+ ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
+ max_freq);
+ if (ret < 0) {
+ pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+ ret);
+ goto free;
+ }
+
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
- if (wf_register_control(clamp))
+ ret = wf_register_control(clamp);
+ if (ret)
goto fail;
clamp_control = clamp;
return 0;
fail:
+ dev_pm_qos_remove_request(&qos_req);
+
+ free:
kfree(clamp);
- return -ENODEV;
+ return ret;
}
static void __exit wf_cpufreq_clamp_exit(void)
{
- if (clamp_control)
+ if (clamp_control) {
wf_unregister_control(clamp_control);
+ dev_pm_qos_remove_request(&qos_req);
+ }
}
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 73f5319295bc..c12cd809ab19 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -105,8 +105,14 @@ struct closure_syncer {
static void closure_sync_fn(struct closure *cl)
{
- cl->s->done = 1;
- wake_up_process(cl->s->task);
+ struct closure_syncer *s = cl->s;
+ struct task_struct *p;
+
+ rcu_read_lock();
+ p = READ_ONCE(s->task);
+ s->done = 1;
+ wake_up_process(p);
+ rcu_read_unlock();
}
void __sched __closure_sync(struct closure *cl)
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 8b123be05254..336f43910383 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -178,10 +178,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
while (size) {
struct keybuf_key *w;
unsigned int bytes = min(i->bytes, size);
- int err = copy_to_user(buf, i->buf, bytes);
- if (err)
- return err;
+ if (copy_to_user(buf, i->buf, bytes))
+ return -EFAULT;
ret += bytes;
buf += bytes;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 26e374fbf57c..20ed838e9413 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
if (dc->io_disable) {
pr_err("I/O disabled on cached dev %s",
dc->backing_dev_name);
+ kfree(env[1]);
+ kfree(env[2]);
+ kfree(buf);
return -EIO;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 9f0826712845..627dcea0f5b6 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -23,24 +23,28 @@ static const char * const bch_cache_modes[] = {
"writethrough",
"writeback",
"writearound",
- "none"
+ "none",
+ NULL
};
/* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
- "always"
+ "always",
+ NULL
};
static const char * const cache_replacement_policies[] = {
"lru",
"fifo",
- "random"
+ "random",
+ NULL
};
static const char * const error_actions[] = {
"unregister",
- "panic"
+ "panic",
+ NULL
};
write_attribute(attach);
@@ -338,7 +342,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_cache_mode) {
- v = sysfs_match_string(bch_cache_modes, buf);
+ v = __sysfs_match_string(bch_cache_modes, -1, buf);
if (v < 0)
return v;
@@ -349,7 +353,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_stop_when_cache_set_failed) {
- v = sysfs_match_string(bch_stop_on_failure_modes, buf);
+ v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
if (v < 0)
return v;
@@ -816,7 +820,7 @@ STORE(__bch_cache_set)
0, UINT_MAX);
if (attr == &sysfs_errors) {
- v = sysfs_match_string(error_actions, buf);
+ v = __sysfs_match_string(error_actions, -1, buf);
if (v < 0)
return v;
@@ -960,6 +964,7 @@ KTYPE(bch_cache_set_internal);
static int __bch_cache_cmp(const void *l, const void *r)
{
+ cond_resched();
return *((uint16_t *)r) - *((uint16_t *)l);
}
@@ -1088,7 +1093,7 @@ STORE(__bch_cache)
}
if (attr == &sysfs_cache_replacement_policy) {
- v = sysfs_match_string(cache_replacement_policies, buf);
+ v = __sysfs_match_string(cache_replacement_policies, -1, buf);
if (v < 0)
return v;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long freed;
c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (!dm_bufio_trylock(c))
+ if (sc->gfp_mask & __GFP_FS)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
return SHRINK_STOP;
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
unsigned long long badblock_count;
spinlock_t dust_lock;
unsigned int blksz;
+ int sect_per_block_shift;
unsigned int sect_per_block;
sector_t start;
bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock == NULL) {
if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
}
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock->bb = block * dd->sect_per_block;
+ bblock->bb = block;
if (!dust_rb_insert(&dd->badblocklist, bblock)) {
if (!dd->quiet_mode) {
DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
unsigned long flags;
spin_lock_irqsave(&dd->dust_lock, flags);
- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ bblock = dust_rb_search(&dd->badblocklist, block);
if (bblock != NULL)
DMINFO("%s: block %llu found in badblocklist", __func__, block);
else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
int ret = DM_MAPIO_REMAPPED;
if (fail_read_on_bb) {
+ thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
ret = __dust_map_read(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
unsigned long flags;
if (fail_read_on_bb) {
+ thisblock >>= dd->sect_per_block_shift;
spin_lock_irqsave(&dd->dust_lock, flags);
__dust_map_write(dd, thisblock);
spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dd->blksz = blksz;
dd->start = tmp;
+ dd->sect_per_block_shift = __ffs(sect_per_block);
+
/*
* Whether to fail a read on a "bad" block.
* Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
queue_work(ic->wait_wq, &dio->work);
return;
}
+ if (journal_read_pos != NOT_FOUND)
+ dio->range.n_sectors = ic->sectors_per_block;
wait_and_add_new_range(ic, &dio->range);
+ /*
+ * wait_and_add_new_range drops the spinlock, so the journal
+ * may have been changed arbitrarily. We need to recheck.
+ * To simplify the code, we restrict I/O size to just one block.
+ */
+ if (journal_read_pos != NOT_FOUND) {
+ sector_t next_sector;
+ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ if (unlikely(new_pos != journal_read_pos)) {
+ remove_range_unlocked(ic, &dio->range);
+ goto retry;
+ }
+ }
}
spin_unlock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
* no point in continuing.
*/
if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
- job->master_job->write_err)
+ job->master_job->write_err) {
+ job->write_err = job->master_job->write_err;
return -EIO;
+ }
io_job_start(job->kc->throttle);
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
else
job->read_err = 1;
push(&kc->complete_jobs, job);
+ wake(kc);
break;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
r = rs_prepare_reshape(rs);
if (r)
- return r;
+ goto bad;
/* Reshaping ain't recovery, so disable recovery */
rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index c9e44ac1f9a6..3f8577e2c13b 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -408,6 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
+ blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
@@ -562,7 +563,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
if (err)
goto out_kfree_tag_set;
- q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto out_tag_set;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index caaee8032afe..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+ sector_t start, sector_t len, void *data)
{
int blocksize = *(int *) data;
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
- start, len);
+ start, len);
}
/* Check devices support synchronous DAX */
-static int device_synchronous(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- return dax_synchronous(dev->dax_dev);
+ return dev->dax_dev && dax_synchronous(dev->dax_dev);
}
bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn, int *blocksize)
+ iterate_devices_callout_fn iterate_fn, int *blocksize)
{
struct dm_target *ti;
unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ !ti->type->iterate_devices(ti, iterate_fn, blocksize))
return false;
}
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_event);
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
unsigned int l, n = 0, k = 0;
sector_t *node;
+ if (unlikely(sector >= dm_table_get_size(t)))
+ return &t->targets[t->num_targets];
+
for (l = 0; l < t->depth; l++) {
n = get_child(n, k);
node = get_node(t, l, n);
@@ -1921,7 +1924,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_synchronous, NULL))
+ if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
set_dax_synchronous(t->md->dax_dev);
}
else
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
@@ -34,7 +35,7 @@
* (1) Super block (1 block)
* (2) Chunk mapping table (nr_map_blocks)
* (3) Bitmap blocks (nr_bitmap_blocks)
- * All metadata blocks are stored in conventional zones, starting from the
+ * All metadata blocks are stored in conventional zones, starting from
* the first conventional zone found on disk.
*/
struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
* Lock/unlock metadata access. This is a "read" lock on a semaphore
* that prevents metadata flush from running while metadata are being
* modified. The actual metadata write mutual exclusion is achieved with
- * the map lock and zone styate management (active and reclaim state are
+ * the map lock and zone state management (active and reclaim state are
* mutually exclusive).
*/
void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return ERR_PTR(-EIO);
+
/* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
if (!mblk) {
/* Cache miss: read the block from disk */
mblk = dmz_get_mblock_slow(zmd, mblk_no);
- if (!mblk)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(mblk))
+ return mblk;
}
/* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
/*
* Issue a metadata block write BIO.
*/
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
- unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+ unsigned int set)
{
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
set_bit(DMZ_META_ERROR, &mblk->state);
- return;
+ return -ENOMEM;
}
set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
+
+ return 0;
}
/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
struct bio *bio;
int ret;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio)
return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
{
struct dmz_mblock *mblk;
struct blk_plug plug;
- int ret = 0;
+ int ret = 0, nr_mblks_submitted = 0;
/* Issue writes */
blk_start_plug(&plug);
- list_for_each_entry(mblk, write_list, link)
- dmz_write_mblock(zmd, mblk, set);
+ list_for_each_entry(mblk, write_list, link) {
+ ret = dmz_write_mblock(zmd, mblk, set);
+ if (ret)
+ break;
+ nr_mblks_submitted++;
+ }
blk_finish_plug(&plug);
/* Wait for completion */
list_for_each_entry(mblk, write_list, link) {
+ if (!nr_mblks_submitted)
+ break;
wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
ret = -EIO;
}
+ nr_mblks_submitted--;
}
/* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
dmz_lock_flush(zmd);
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ ret = -EIO;
+ goto out;
+ }
+
/* Get dirty blocks */
spin_lock(&zmd->mblk_lock);
list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
struct dm_zone *zone;
if (list_empty(&zmd->map_rnd_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_rnd_list, link) {
if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
return dzone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
struct dm_zone *zone;
if (list_empty(&zmd->map_seq_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_seq_list, link) {
if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
return zone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
@@ -1628,9 +1652,13 @@ again:
if (op != REQ_OP_WRITE)
goto out;
- /* Alloate a random zone */
+ /* Allocate a random zone */
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!dzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
@@ -1725,9 +1753,13 @@ again:
if (bzone)
goto out;
- /* Alloate a random zone */
+ /* Allocate a random zone */
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!bzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ bzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
@@ -37,7 +38,7 @@ enum {
/*
* Number of seconds of target BIO inactivity to consider the target idle.
*/
-#define DMZ_IDLE_PERIOD (10UL * HZ)
+#define DMZ_IDLE_PERIOD (10UL * HZ)
/*
* Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
/*
* Find a candidate zone for reclaim and process it.
*/
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
{
struct dmz_metadata *zmd = zrc->metadata;
struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
/* Get a data zone */
dzone = dmz_get_zone_for_reclaim(zmd);
- if (!dzone)
- return;
+ if (IS_ERR(dzone))
+ return PTR_ERR(dzone);
start = jiffies;
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
out:
if (ret) {
dmz_unlock_zone_reclaim(dzone);
- return;
+ return ret;
}
- (void) dmz_flush_metadata(zrc->metadata);
+ ret = dmz_flush_metadata(zrc->metadata);
+ if (ret) {
+ dmz_dev_debug(zrc->dev,
+ "Metadata flush for zone %u failed, err %d\n",
+ dmz_id(zmd, rzone), ret);
+ return ret;
+ }
dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ return 0;
}
/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
return false;
/*
- * If the percentage of unmappped random zones is low,
+ * If the percentage of unmapped random zones is low,
* reclaim even if the target is busy.
*/
return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
struct dmz_metadata *zmd = zrc->metadata;
unsigned int nr_rnd, nr_unmap_rnd;
unsigned int p_unmap_rnd;
+ int ret;
+
+ if (dmz_bdev_is_dying(zrc->dev))
+ return;
if (!dmz_should_reclaim(zrc)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
(dmz_target_idle(zrc) ? "Idle" : "Busy"),
p_unmap_rnd, nr_unmap_rnd, nr_rnd);
- dmz_reclaim(zrc);
+ ret = dmz_do_reclaim(zrc);
+ if (ret) {
+ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+ if (ret == -EIO)
+ /*
+ * LLD might be performing some error handling sequence
+ * at the underlying device. To not interfere, do not
+ * attempt to schedule the next reclaim run immediately.
+ */
+ return;
+ }
dmz_schedule_reclaim(zrc);
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
refcount_inc(&bioctx->ref);
generic_make_request(clone);
+ if (clone->bi_status == BLK_STS_IOERR)
+ return -EIO;
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
/* Get the buffer zone. One will be allocated if needed */
bzone = dmz_get_chunk_buffer(zmd, zone);
- if (!bzone)
- return -ENOSPC;
+ if (IS_ERR(bzone))
+ return PTR_ERR(bzone);
if (dmz_is_readonly(bzone))
return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
dmz_lock_metadata(zmd);
+ if (dmz->dev->flags & DMZ_BDEV_DYING) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
+ if (ret)
+ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
/* Process queued flush requests */
while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
* Get a chunk work and start it to process a new BIO.
* If the BIO chunk has no work yet, create one.
*/
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
struct dm_chunk_work *cw;
+ int ret = 0;
mutex_lock(&dmz->chunk_lock);
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
if (!cw) {
- int ret;
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
- if (!cw)
+ if (unlikely(!cw)) {
+ ret = -ENOMEM;
goto out;
+ }
INIT_WORK(&cw->work, dmz_chunk_work);
refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
if (unlikely(ret)) {
kfree(cw);
- cw = NULL;
goto out;
}
}
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio);
dmz_get_chunk_work(cw);
+ dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
mutex_unlock(&dmz->chunk_lock);
+ return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+ disk = dmz_dev->bdev->bd_disk;
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ } else if (disk->fops->check_events) {
+ if (disk->fops->check_events(disk, 0) &
+ DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+ }
+ }
+
+ return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
+ int ret;
+
+ if (dmz_bdev_is_dying(dmz->dev))
+ return DM_MAPIO_KILL;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
/* Now ready to handle this BIO */
- dmz_reclaim_bio_acc(dmz->reclaim);
- dmz_queue_chunk_work(dmz, bio);
+ ret = dmz_queue_chunk_work(dmz, bio);
+ if (ret) {
+ dmz_dev_debug(dmz->dev,
+ "BIO op %d, can't process chunk %llu, err %i\n",
+ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+ ret);
+ return DM_MAPIO_REQUEUE;
+ }
return DM_MAPIO_SUBMITTED;
}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
+ if (dmz_bdev_is_dying(dmz->dev))
+ return -ENODEV;
+
*bdev = dmz->dev->bdev;
return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
@@ -56,6 +57,8 @@ struct dmz_dev {
unsigned int nr_zones;
+ unsigned int flags;
+
sector_t zone_nr_sectors;
unsigned int zone_nr_sectors_shift;
@@ -67,6 +70,9 @@ struct dmz_dev {
(dev)->zone_nr_sectors_shift)
#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+/* Device flags. */
+#define DMZ_BDEV_DYING (1 << 0)
+
/*
* Zone descriptor.
*/
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
#endif /* DM_ZONED_H */
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 7354466ddc90..c766c559d36d 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -258,6 +258,11 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio_sector < start_sector))
goto out_of_bounds;
+ if (unlikely(is_mddev_broken(tmp_dev->rdev, "linear"))) {
+ bio_io_error(bio);
+ return true;
+ }
+
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to split it */
struct bio *split = bio_split(bio, end_sector - bio_sector,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 24638ccedce4..1be7abeb24fd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -376,6 +376,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
struct mddev *mddev = q->queuedata;
unsigned int sectors;
+ if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
+ }
+
blk_queue_split(q, &bio);
if (mddev == NULL || mddev->pers == NULL) {
@@ -1232,6 +1237,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
+ if (mddev->level == 0)
+ mddev->layout = -1;
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
@@ -1647,6 +1654,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
}
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
+ sb->level != 0)
+ return -EINVAL;
+
if (!refdev) {
ret = 1;
} else {
@@ -1757,6 +1768,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
+ if (mddev->level == 0 &&
+ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
+ mddev->layout = -1;
+
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
set_bit(MD_HAS_JOURNAL, &mddev->flags);
@@ -1826,8 +1841,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
if (!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_BITMAP))
rdev->saved_raid_disk = -1;
- } else
- set_bit(In_sync, &rdev->flags);
+ } else {
+ /*
+ * If the array is FROZEN, then the device can't
+ * be in_sync with rest of array.
+ */
+ if (!test_bit(MD_RECOVERY_FROZEN,
+ &mddev->recovery))
+ set_bit(In_sync, &rdev->flags);
+ }
rdev->raid_disk = role;
break;
}
@@ -3664,11 +3686,7 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
return -EINVAL;
if (decimals < 0)
decimals = 0;
- while (decimals < scale) {
- result *= 10;
- decimals ++;
- }
- *res = result;
+ *res = result * int_pow(10, scale - decimals);
return 0;
}
@@ -4155,12 +4173,17 @@ __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
+ * broken
+ * RAID0/LINEAR-only: same as clean, but array is missing a member.
+ * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
+ * when a member is gone, so this state will at least alert the
+ * user that something is wrong.
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
- write_pending, active_idle, bad_word};
+ write_pending, active_idle, broken, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
- "write-pending", "active-idle", NULL };
+ "write-pending", "active-idle", "broken", NULL };
static int match_word(const char *word, char **list)
{
@@ -4176,7 +4199,7 @@ array_state_show(struct mddev *mddev, char *page)
{
enum array_state st = inactive;
- if (mddev->pers)
+ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
switch(mddev->ro) {
case 1:
st = readonly;
@@ -4196,7 +4219,10 @@ array_state_show(struct mddev *mddev, char *page)
st = active;
spin_unlock(&mddev->lock);
}
- else {
+
+ if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
+ st = broken;
+ } else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
@@ -4310,6 +4336,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break;
case write_pending:
case active_idle:
+ case broken:
/* these cannot be set */
break;
}
@@ -5182,6 +5209,34 @@ static struct md_sysfs_entry md_consistency_policy =
__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
consistency_policy_store);
+static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%d\n", mddev->fail_last_dev);
+}
+
+/*
+ * Setting fail_last_dev to true to allow last device to be forcibly removed
+ * from RAID1/RAID10.
+ */
+static ssize_t
+fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int ret;
+ bool value;
+
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+
+ if (value != mddev->fail_last_dev)
+ mddev->fail_last_dev = value;
+
+ return len;
+}
+static struct md_sysfs_entry md_fail_last_dev =
+__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
+ fail_last_dev_store);
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
@@ -5198,6 +5253,7 @@ static struct attribute *md_default_attrs[] = {
&md_array_size.attr,
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
+ &md_fail_last_dev.attr,
NULL,
};
@@ -5744,9 +5800,6 @@ int md_run(struct mddev *mddev)
md_update_sb(mddev, 0);
md_new_event(mddev);
- sysfs_notify_dirent_safe(mddev->sysfs_state);
- sysfs_notify_dirent_safe(mddev->sysfs_action);
- sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
bitmap_abort:
@@ -5767,6 +5820,7 @@ static int do_md_run(struct mddev *mddev)
{
int err;
+ set_bit(MD_NOT_READY, &mddev->flags);
err = md_run(mddev);
if (err)
goto out;
@@ -5787,9 +5841,14 @@ static int do_md_run(struct mddev *mddev)
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ clear_bit(MD_NOT_READY, &mddev->flags);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
+ sysfs_notify(&mddev->kobj, NULL, "degraded");
out:
+ clear_bit(MD_NOT_READY, &mddev->flags);
return err;
}
@@ -6849,6 +6908,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->external = 0;
mddev->layout = info->layout;
+ if (mddev->level == 0)
+ /* Cannot trust RAID0 layout info here */
+ mddev->layout = -1;
mddev->chunk_sectors = info->chunk_size >> 9;
if (mddev->persistent) {
@@ -8900,6 +8962,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ bool try_set_sync = mddev->safemode != 0;
if (!mddev->external && mddev->safemode == 1)
mddev->safemode = 0;
@@ -8945,7 +9008,7 @@ void md_check_recovery(struct mddev *mddev)
}
}
- if (!mddev->external && !mddev->in_sync) {
+ if (try_set_sync && !mddev->external && !mddev->in_sync) {
spin_lock(&mddev->lock);
set_in_sync(mddev);
spin_unlock(&mddev->lock);
@@ -9043,7 +9106,8 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+ mddev->degraded != mddev->raid_disks) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10f98200e2f8..c5e3ff398b59 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -248,6 +248,12 @@ enum mddev_flags {
MD_UPDATING_SB, /* md_check_recovery is updating the metadata
* without explicitly holding reconfig_mutex.
*/
+ MD_NOT_READY, /* do_md_run() is active, so 'array_state'
+ * must not report that array is ready yet
+ */
+ MD_BROKEN, /* This is used in RAID-0/LINEAR only, to stop
+ * I/O in case an array member is gone/failed.
+ */
};
enum mddev_sb_flags {
@@ -487,6 +493,7 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
bool has_superblocks:1;
+ bool fail_last_dev:1;
};
enum recovery_flags {
@@ -735,6 +742,19 @@ extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
+static inline bool is_mddev_broken(struct md_rdev *rdev, const char *md_type)
+{
+ int flags = rdev->bdev->bd_disk->flags;
+
+ if (!(flags & GENHD_FL_UP)) {
+ if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags))
+ pr_warn("md: %s: %s array has a missing/failed member\n",
+ mdname(rdev->mddev), md_type);
+ return true;
+ }
+ return false;
+}
+
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
int faulty = test_bit(Faulty, &rdev->flags);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
new_parent = shadow_current(s);
+ pn = dm_block_data(new_parent);
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+
+ /* create & init the left block */
r = new_block(s->info, &left);
if (r < 0)
return r;
+ ln = dm_block_data(left);
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+ /* create & init the right block */
r = new_block(s->info, &right);
if (r < 0) {
unlock_block(s->info, left);
return r;
}
- pn = dm_block_data(new_parent);
- ln = dm_block_data(left);
rn = dm_block_data(right);
-
- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
- ln->header.flags = pn->header.flags;
- ln->header.nr_entries = cpu_to_le32(nr_left);
- ln->header.max_entries = pn->header.max_entries;
- ln->header.value_size = pn->header.value_size;
-
rn->header.flags = pn->header.flags;
rn->header.nr_entries = cpu_to_le32(nr_right);
rn->header.max_entries = pn->header.max_entries;
rn->header.value_size = pn->header.value_size;
-
- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
- sizeof(__le64) : s->info->value_type.size;
- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
nr_right * size);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
}
if (smm->recursion_count == 1)
- apply_bops(smm);
+ r = apply_bops(smm);
smm->recursion_count--;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index bf5cf184a260..f61693e59684 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -19,6 +19,9 @@
#include "raid0.h"
#include "raid5.h"
+static int default_layout = 0;
+module_param(default_layout, int, 0644);
+
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
(1L << MD_JOURNAL_CLEAN) | \
@@ -139,6 +142,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
}
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
+
+ if (conf->nr_strip_zones == 1) {
+ conf->layout = RAID0_ORIG_LAYOUT;
+ } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
+ mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = mddev->layout;
+ } else if (default_layout == RAID0_ORIG_LAYOUT ||
+ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
+ conf->layout = default_layout;
+ } else {
+ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
+ mdname(mddev));
+ pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+ err = -ENOTSUPP;
+ goto abort;
+ }
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
@@ -547,10 +566,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
{
+ struct r0conf *conf = mddev->private;
struct strip_zone *zone;
struct md_rdev *tmp_dev;
sector_t bio_sector;
sector_t sector;
+ sector_t orig_sector;
unsigned chunk_sects;
unsigned sectors;
@@ -584,8 +605,26 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
bio = split;
}
+ orig_sector = sector;
zone = find_zone(mddev->private, &sector);
- tmp_dev = map_sector(mddev, zone, sector, &sector);
+ switch (conf->layout) {
+ case RAID0_ORIG_LAYOUT:
+ tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
+ break;
+ case RAID0_ALT_MULTIZONE_LAYOUT:
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ break;
+ default:
+ WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
+ bio_io_error(bio);
+ return true;
+ }
+
+ if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
+ bio_io_error(bio);
+ return true;
+ }
+
bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 540e65d92642..3816e5477db1 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -8,11 +8,25 @@ struct strip_zone {
int nb_dev; /* # of devices attached to the zone */
};
+/* Linux 3.14 (20d0189b101) made an unintended change to
+ * the RAID0 layout for multi-zone arrays (where devices aren't all
+ * the same size.
+ * RAID0_ORIG_LAYOUT restores the original layout
+ * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
+ * The layouts are identical when there is only one zone (all
+ * devices the same size).
+ */
+
+enum r0layout {
+ RAID0_ORIG_LAYOUT = 1,
+ RAID0_ALT_MULTIZONE_LAYOUT = 2,
+};
struct r0conf {
struct strip_zone *strip_zone;
struct md_rdev **devlist; /* lists of rdevs, pointed to
* by strip_zone->dev */
int nr_strip_zones;
+ enum r0layout layout;
};
#endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 34e26834ad28..0466ee2453b4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -447,19 +447,21 @@ static void raid1_end_write_request(struct bio *bio)
/* We never try FailFast to WriteMostly devices */
!test_bit(WriteMostly, &rdev->flags)) {
md_error(r1_bio->mddev, rdev);
- if (!test_bit(Faulty, &rdev->flags))
- /* This is the only remaining device,
- * We need to retry the write without
- * FailFast
- */
- set_bit(R1BIO_WriteError, &r1_bio->state);
- else {
- /* Finished with this branch */
- r1_bio->bios[mirror] = NULL;
- to_put = bio;
- }
- } else
+ }
+
+ /*
+ * When the device is faulty, it is not necessary to
+ * handle write error.
+ * For failfast, this is the only remaining device,
+ * We need to retry the write without FailFast.
+ */
+ if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+ }
} else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
@@ -872,8 +874,11 @@ static void flush_pending_writes(struct r1conf *conf)
* backgroup IO calls must call raise_barrier. Once that returns
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
+ *
+ * If resync/recovery is interrupted, returns -EINTR;
+ * Otherwise, returns 0.
*/
-static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
+static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
int idx = sector_to_idx(sector_nr);
@@ -1612,12 +1617,12 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
/*
* If it is not operational, then we have already marked it as dead
- * else if it is the last working disks, ignore the error, let the
- * next level up know.
+ * else if it is the last working disks with "fail_last_dev == false",
+ * ignore the error, let the next level up know.
* else mark the drive as failed
*/
spin_lock_irqsave(&conf->device_lock, flags);
- if (test_bit(In_sync, &rdev->flags)
+ if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
&& (conf->raid_disks - mddev->degraded) == 1) {
/*
* Don't fail the drive, act as though we were just a
@@ -1901,6 +1906,22 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
} while (sectors_to_go > 0);
}
+static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
+{
+ if (atomic_dec_and_test(&r1_bio->remaining)) {
+ struct mddev *mddev = r1_bio->mddev;
+ int s = r1_bio->sectors;
+
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ put_buf(r1_bio);
+ md_done_sync(mddev, s, uptodate);
+ }
+ }
+}
+
static void end_sync_write(struct bio *bio)
{
int uptodate = !bio->bi_status;
@@ -1927,16 +1948,7 @@ static void end_sync_write(struct bio *bio)
)
set_bit(R1BIO_MadeGood, &r1_bio->state);
- if (atomic_dec_and_test(&r1_bio->remaining)) {
- int s = r1_bio->sectors;
- if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
- test_bit(R1BIO_WriteError, &r1_bio->state))
- reschedule_retry(r1_bio);
- else {
- put_buf(r1_bio);
- md_done_sync(mddev, s, uptodate);
- }
- }
+ put_sync_write_buf(r1_bio, uptodate);
}
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
@@ -2219,17 +2231,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
generic_make_request(wbio);
}
- if (atomic_dec_and_test(&r1_bio->remaining)) {
- /* if we're here, all write(s) have completed, so clean up */
- int s = r1_bio->sectors;
- if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
- test_bit(R1BIO_WriteError, &r1_bio->state))
- reschedule_retry(r1_bio);
- else {
- put_buf(r1_bio);
- md_done_sync(mddev, s, 1);
- }
- }
+ put_sync_write_buf(r1_bio, 1);
}
/*
@@ -3127,6 +3129,13 @@ static int raid1_run(struct mddev *mddev)
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
mddev->degraded++;
+ /*
+ * RAID1 needs at least one disk in active
+ */
+ if (conf->raid_disks - mddev->degraded < 1) {
+ ret = -EINVAL;
+ goto abort;
+ }
if (conf->raid_disks - mddev->degraded == 1)
mddev->recovery_cp = MaxSector;
@@ -3160,8 +3169,12 @@ static int raid1_run(struct mddev *mddev)
ret = md_integrity_register(mddev);
if (ret) {
md_unregister_thread(&mddev->thread);
- raid1_free(mddev, conf);
+ goto abort;
}
+ return 0;
+
+abort:
+ raid1_free(mddev, conf);
return ret;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8a1354a08a1a..299c7b1c9718 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -465,19 +465,21 @@ static void raid10_end_write_request(struct bio *bio)
if (test_bit(FailFast, &rdev->flags) &&
(bio->bi_opf & MD_FAILFAST)) {
md_error(rdev->mddev, rdev);
- if (!test_bit(Faulty, &rdev->flags))
- /* This is the only remaining device,
- * We need to retry the write without
- * FailFast
- */
- set_bit(R10BIO_WriteError, &r10_bio->state);
- else {
- r10_bio->devs[slot].bio = NULL;
- to_put = bio;
- dec_rdev = 1;
- }
- } else
+ }
+
+ /*
+ * When the device is faulty, it is not necessary to
+ * handle write error.
+ * For failfast, this is the only remaining device,
+ * We need to retry the write without FailFast.
+ */
+ if (!test_bit(Faulty, &rdev->flags))
set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+ }
}
} else {
/*
@@ -1638,12 +1640,12 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
/*
* If it is not operational, then we have already marked it as dead
- * else if it is the last working disks, ignore the error, let the
- * next level up know.
+ * else if it is the last working disks with "fail_last_dev == false",
+ * ignore the error, let the next level up know.
* else mark the drive as failed
*/
spin_lock_irqsave(&conf->device_lock, flags);
- if (test_bit(In_sync, &rdev->flags)
+ if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
&& !enough(conf, rdev->raid_disk)) {
/*
* Don't fail the drive, just return an IO error.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3de4e13bde98..223e97ab27e6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2526,7 +2526,8 @@ static void raid5_end_read_request(struct bio * bi)
int set_bad = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
- atomic_inc(&rdev->read_errors);
+ if (!(bi->bi_status == BLK_STS_PROTECTION))
+ atomic_inc(&rdev->read_errors);
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
pr_warn_ratelimited(
"md/raid:%s: read error on replacement device (sector %llu on %s).\n",
@@ -2549,16 +2550,24 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)s,
bdn);
} else if (atomic_read(&rdev->read_errors)
- > conf->max_nr_stripes)
- pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
- mdname(conf->mddev), bdn);
- else
+ > conf->max_nr_stripes) {
+ if (!test_bit(Faulty, &rdev->flags)) {
+ pr_warn("md/raid:%s: %d read_errors > %d stripes\n",
+ mdname(conf->mddev),
+ atomic_read(&rdev->read_errors),
+ conf->max_nr_stripes);
+ pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
+ mdname(conf->mddev), bdn);
+ }
+ } else
retry = 1;
if (set_bad && test_bit(In_sync, &rdev->flags)
&& !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
retry = 1;
if (retry)
- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ if (sh->qd_idx >= 0 && sh->pd_idx == i)
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
} else
@@ -4612,7 +4621,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_FULL_WRITE) |
(1 << STRIPE_BIOFILL_RUN) |
(1 << STRIPE_COMPUTE_RUN) |
- (1 << STRIPE_OPS_REQ_PENDING) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
@@ -5491,7 +5499,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
return;
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
- last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
+ last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
@@ -5718,7 +5726,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
do_flush = false;
}
- set_bit(STRIPE_HANDLE, &sh->state);
+ if (!sh->batch_head)
+ set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
(bi->bi_opf & REQ_SYNC) &&
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index cf991f13403e..f90e0704bed9 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -357,7 +357,6 @@ enum {
STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
STRIPE_BIOFILL_RUN,
STRIPE_COMPUTE_RUN,
- STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
STRIPE_ON_RELEASE_LIST,
@@ -493,9 +492,7 @@ struct disk_info {
*/
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
- int sectors = bio_sectors(bio);
-
- if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+ if (bio_end_sector(bio) < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 21cd9c02960b..b36a41332867 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -190,7 +190,7 @@ config MEDIA_SUBDRV_AUTOSELECT
depends on HAS_IOMEM
select I2C
select I2C_MUX
- default y
+ default y if !EMBEDDED
help
By default, a media driver auto-selects all possible ancillary
devices such as tuners, sensors, video encoders/decoders and
@@ -207,6 +207,11 @@ config MEDIA_SUBDRV_AUTOSELECT
If unsure say Y.
+config MEDIA_HIDE_ANCILLARY_SUBDRV
+ bool
+ depends on MEDIA_SUBDRV_AUTOSELECT && !COMPILE_TEST && !EXPERT
+ default y
+
config MEDIA_ATTACH
bool
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 451c61bde4d4..5ef7daeb8cbd 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1614,6 +1614,9 @@ EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
void cec_s_conn_info(struct cec_adapter *adap,
const struct cec_connector_info *conn_info)
{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+
if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
return;
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 52a867bde15f..4d82a5522072 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -218,6 +218,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
mutex_lock(&n->lock);
n->callback = NULL;
+ n->cec_adap->notifier = NULL;
+ n->cec_adap = NULL;
mutex_unlock(&n->lock);
cec_notifier_put(n);
}
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index 4c399a42e874..d16122039b0c 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -20,62 +20,52 @@ MODULE_PARM_DESC(max_memory, "maximum memory usage for capture buffers (default:
/* format descriptions for capture and preview */
static struct saa7146_format formats[] = {
{
- .name = "RGB-8 (3-3-2)",
.pixelformat = V4L2_PIX_FMT_RGB332,
.trans = RGB08_COMPOSED,
.depth = 8,
.flags = 0,
}, {
- .name = "RGB-16 (5/B-6/G-5/R)",
.pixelformat = V4L2_PIX_FMT_RGB565,
.trans = RGB16_COMPOSED,
.depth = 16,
.flags = 0,
}, {
- .name = "RGB-24 (B-G-R)",
.pixelformat = V4L2_PIX_FMT_BGR24,
.trans = RGB24_COMPOSED,
.depth = 24,
.flags = 0,
}, {
- .name = "RGB-32 (B-G-R)",
.pixelformat = V4L2_PIX_FMT_BGR32,
.trans = RGB32_COMPOSED,
.depth = 32,
.flags = 0,
}, {
- .name = "RGB-32 (R-G-B)",
.pixelformat = V4L2_PIX_FMT_RGB32,
.trans = RGB32_COMPOSED,
.depth = 32,
.flags = 0,
.swap = 0x2,
}, {
- .name = "Greyscale-8",
.pixelformat = V4L2_PIX_FMT_GREY,
.trans = Y8,
.depth = 8,
.flags = 0,
}, {
- .name = "YUV 4:2:2 planar (Y-Cb-Cr)",
.pixelformat = V4L2_PIX_FMT_YUV422P,
.trans = YUV422_DECOMPOSED,
.depth = 16,
.flags = FORMAT_BYTE_SWAP|FORMAT_IS_PLANAR,
}, {
- .name = "YVU 4:2:0 planar (Y-Cb-Cr)",
.pixelformat = V4L2_PIX_FMT_YVU420,
.trans = YUV420_DECOMPOSED,
.depth = 12,
.flags = FORMAT_BYTE_SWAP|FORMAT_IS_PLANAR,
}, {
- .name = "YUV 4:2:0 planar (Y-Cb-Cr)",
.pixelformat = V4L2_PIX_FMT_YUV420,
.trans = YUV420_DECOMPOSED,
.depth = 12,
.flags = FORMAT_IS_PLANAR,
}, {
- .name = "YUV 4:2:2 (U-Y-V-Y)",
.pixelformat = V4L2_PIX_FMT_UYVY,
.trans = YUV422_COMPOSED,
.depth = 16,
@@ -147,10 +137,10 @@ int saa7146_start_preview(struct saa7146_fh *fh)
}
vv->ov.win = fmt.fmt.win;
- DEB_D("%dx%d+%d+%d %s field=%s\n",
+ DEB_D("%dx%d+%d+%d 0x%08x field=%s\n",
vv->ov.win.w.width, vv->ov.win.w.height,
vv->ov.win.w.left, vv->ov.win.w.top,
- vv->ov_fmt->name, v4l2_field_names[vv->ov.win.field]);
+ vv->ov_fmt->pixelformat, v4l2_field_names[vv->ov.win.field]);
if (0 != (ret = saa7146_enable_overlay(fh))) {
DEB_D("enabling overlay failed: %d\n", ret);
@@ -515,8 +505,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtd
{
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
- strscpy((char *)f->description, formats[f->index].name,
- sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
}
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 07e0629af8ed..50f1e0b28b25 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -217,9 +217,21 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_RGBX444:
+ case V4L2_PIX_FMT_RGBA444:
+ case V4L2_PIX_FMT_XBGR444:
+ case V4L2_PIX_FMT_ABGR444:
+ case V4L2_PIX_FMT_BGRX444:
+ case V4L2_PIX_FMT_BGRA444:
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGBX555:
+ case V4L2_PIX_FMT_RGBA555:
+ case V4L2_PIX_FMT_XBGR555:
+ case V4L2_PIX_FMT_ABGR555:
+ case V4L2_PIX_FMT_BGRX555:
+ case V4L2_PIX_FMT_BGRA555:
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
case V4L2_PIX_FMT_ARGB555X:
@@ -232,6 +244,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_BGRX32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_BGRA32:
tpg->color_enc = TGP_COLOR_ENC_RGB;
break;
case V4L2_PIX_FMT_GREY:
@@ -343,9 +359,21 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_RGBX444:
+ case V4L2_PIX_FMT_RGBA444:
+ case V4L2_PIX_FMT_XBGR444:
+ case V4L2_PIX_FMT_ABGR444:
+ case V4L2_PIX_FMT_BGRX444:
+ case V4L2_PIX_FMT_BGRA444:
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGBX555:
+ case V4L2_PIX_FMT_RGBA555:
+ case V4L2_PIX_FMT_XBGR555:
+ case V4L2_PIX_FMT_ABGR555:
+ case V4L2_PIX_FMT_BGRX555:
+ case V4L2_PIX_FMT_BGRA555:
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
case V4L2_PIX_FMT_ARGB555X:
@@ -375,6 +403,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_BGRX32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_BGRA32:
case V4L2_PIX_FMT_YUV32:
case V4L2_PIX_FMT_AYUV32:
case V4L2_PIX_FMT_XYUV32:
@@ -1007,6 +1039,12 @@ static void precalculate_color(struct tpg_data *tpg, int k)
case V4L2_PIX_FMT_RGB444:
case V4L2_PIX_FMT_XRGB444:
case V4L2_PIX_FMT_ARGB444:
+ case V4L2_PIX_FMT_RGBX444:
+ case V4L2_PIX_FMT_RGBA444:
+ case V4L2_PIX_FMT_XBGR444:
+ case V4L2_PIX_FMT_ABGR444:
+ case V4L2_PIX_FMT_BGRX444:
+ case V4L2_PIX_FMT_BGRA444:
r >>= 8;
g >>= 8;
b >>= 8;
@@ -1014,6 +1052,12 @@ static void precalculate_color(struct tpg_data *tpg, int k)
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGBX555:
+ case V4L2_PIX_FMT_RGBA555:
+ case V4L2_PIX_FMT_XBGR555:
+ case V4L2_PIX_FMT_ABGR555:
+ case V4L2_PIX_FMT_BGRX555:
+ case V4L2_PIX_FMT_BGRA555:
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
case V4L2_PIX_FMT_ARGB555X:
@@ -1237,6 +1281,27 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset] = (g_u_s << 4) | b_v;
buf[0][offset + 1] = (alpha & 0xf0) | r_y_h;
break;
+ case V4L2_PIX_FMT_RGBX444:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_RGBA444:
+ buf[0][offset] = (b_v << 4) | (alpha >> 4);
+ buf[0][offset + 1] = (r_y_h << 4) | g_u_s;
+ break;
+ case V4L2_PIX_FMT_XBGR444:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ABGR444:
+ buf[0][offset] = (g_u_s << 4) | r_y_h;
+ buf[0][offset + 1] = (alpha & 0xf0) | b_v;
+ break;
+ case V4L2_PIX_FMT_BGRX444:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_BGRA444:
+ buf[0][offset] = (r_y_h << 4) | (alpha >> 4);
+ buf[0][offset + 1] = (b_v << 4) | g_u_s;
+ break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_XRGB555:
alpha = 0;
@@ -1247,6 +1312,30 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 1] = (alpha & 0x80) | (r_y_h << 2)
| (g_u_s >> 3);
break;
+ case V4L2_PIX_FMT_RGBX555:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_RGBA555:
+ buf[0][offset] = (g_u_s << 6) | (b_v << 1) |
+ ((alpha & 0x80) >> 7);
+ buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 2);
+ break;
+ case V4L2_PIX_FMT_XBGR555:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ABGR555:
+ buf[0][offset] = (g_u_s << 5) | r_y_h;
+ buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2)
+ | (g_u_s >> 3);
+ break;
+ case V4L2_PIX_FMT_BGRX555:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_BGRA555:
+ buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) |
+ ((alpha & 0x80) >> 7);
+ buf[0][offset + 1] = (b_v << 3) | (g_u_s >> 2);
+ break;
case V4L2_PIX_FMT_RGB555X:
case V4L2_PIX_FMT_XRGB555X:
alpha = 0;
@@ -1286,6 +1375,15 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 2] = g_u_s;
buf[0][offset + 3] = b_v;
break;
+ case V4L2_PIX_FMT_RGBX32:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_RGBA32:
+ buf[0][offset] = r_y_h;
+ buf[0][offset + 1] = g_u_s;
+ buf[0][offset + 2] = b_v;
+ buf[0][offset + 3] = alpha;
+ break;
case V4L2_PIX_FMT_BGR32:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_VUYX32:
@@ -1298,6 +1396,15 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset + 2] = r_y_h;
buf[0][offset + 3] = alpha;
break;
+ case V4L2_PIX_FMT_BGRX32:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_BGRA32:
+ buf[0][offset] = alpha;
+ buf[0][offset + 1] = b_v;
+ buf[0][offset + 2] = g_u_s;
+ buf[0][offset + 3] = r_y_h;
+ break;
case V4L2_PIX_FMT_SBGGR8:
buf[0][offset] = odd ? g_u_s : b_v;
buf[1][offset] = odd ? r_y_h : g_u_s;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 7d77e4d30c8a..44cd0e530bbd 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -267,8 +267,14 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ /*
+ * Cache sync can be skipped here, as the vb2_dc memory is
+ * allocated from device coherent memory, which means the
+ * memory locations do not require any explicit cache
+ * maintenance prior or after being used by the device.
+ */
+ dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -293,14 +299,17 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
attach->dma_dir = DMA_NONE;
}
- /* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
+ /*
+ * mapping to the client with new direction, no cache sync
+ * required see comment in vb2_dc_dmabuf_ops_detach()
+ */
+ sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 40d76eb4c2fe..5a9ba3846f0a 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -872,17 +872,19 @@ EXPORT_SYMBOL_GPL(vb2_queue_release);
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- __poll_t res = 0;
+ __poll_t res;
+
+ res = vb2_core_poll(q, file, wait);
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
- res = EPOLLPRI;
+ res |= EPOLLPRI;
}
- return res | vb2_core_poll(q, file, wait);
+ return res;
}
EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 209186c5cd9b..06ea30a689d7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -152,6 +152,9 @@ static void dvb_frontend_free(struct kref *ref)
static void dvb_frontend_put(struct dvb_frontend *fe)
{
+ /* call detach before dropping the reference count */
+ if (fe->ops.detach)
+ fe->ops.detach(fe);
/*
* Check if the frontend was registered, as otherwise
* kref was not initialized yet.
@@ -3040,7 +3043,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
dvb_frontend_invoke_release(fe, fe->ops.release_sec);
dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
- dvb_frontend_invoke_release(fe, fe->ops.detach);
dvb_frontend_put(fe);
}
EXPORT_SYMBOL(dvb_frontend_detach);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index a3393cd4e584..917fe034af37 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
if (npads) {
dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
GFP_KERNEL);
- if (!dvbdev->pads)
+ if (!dvbdev->pads) {
+ kfree(dvbdev->entity);
return -ENOMEM;
+ }
}
switch (type) {
@@ -476,7 +478,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
return -ENOMEM;
}
- dvbdevfops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
+ dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
if (!dvbdevfops){
kfree (dvbdev);
@@ -492,7 +494,6 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdev->fops = dvbdevfops;
init_waitqueue_head (&dvbdev->wait_queue);
- memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
dvbdevfops->owner = adap->module;
list_add_tail (&dvbdev->list_head, &adap->device_list);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index dc43749177df..a29e9ddf9c82 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -1,5 +1,8 @@
+comment "DVB Frontend drivers hidden by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+
menu "Customise DVB Frontends"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
comment "Multistandard (satellite) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 42697a5999f7..9fccc906d85a 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -619,8 +619,10 @@ static int cx24117_load_firmware(struct dvb_frontend *fe,
/* send fw */
ret = i2c_transfer(state->priv->i2c, &msg, 1);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(buf);
return ret;
+ }
kfree(buf);
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index ac519c3eff18..3d84ee17e54c 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -431,7 +431,7 @@ static u32 cx24123_int_log2(u32 a, u32 b)
u32 div = a / b;
if (a % b >= b / 2)
++div;
- if (div < (1 << 31)) {
+ if (div < (1UL << 31)) {
for (exp = 1; div > exp; nearest++)
exp += exp;
}
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index 5264e873850e..f88b5355493e 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -594,7 +594,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
return ecount;
}
-static struct dvb_ca_en50221 en_templ = {
+static const struct dvb_ca_en50221 en_templ = {
.read_attribute_mem = read_attribute_mem,
.write_attribute_mem = write_attribute_mem,
.read_cam_control = read_cam_control,
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 1f006f8e8cc2..d137199e13e6 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -632,12 +632,11 @@ static int cxd2820r_probe(struct i2c_client *client,
* one dummy I2C client in in order to get own I2C client for each
* register bank.
*/
- priv->client[1] = i2c_new_dummy(client->adapter, client->addr | (1 << 1));
- if (!priv->client[1]) {
- ret = -ENODEV;
+ priv->client[1] = i2c_new_dummy_device(client->adapter, client->addr | (1 << 1));
+ if (IS_ERR(priv->client[1])) {
+ ret = PTR_ERR(priv->client[1]);
dev_err(&client->dev, "I2C registration failed\n");
- if (ret)
- goto err_regmap_0_regmap_exit;
+ goto err_regmap_0_regmap_exit;
}
priv->regmap[1] = regmap_init_i2c(priv->client[1], &regmap_config1);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 52f5e697c5dc..0d22c700016d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2036,7 +2036,8 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
dprintk("-D- DiB7000PC detected\n");
- return 1;
+ ret = 1;
+ goto out;
}
msg[0].addr = msg[1].addr = 0x40;
@@ -2044,11 +2045,13 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
if (i2c_transfer(i2c_adap, msg, 2) == 2)
if (rx[0] == 0x01 && rx[1] == 0xb3) {
dprintk("-D- DiB7000PC detected\n");
- return 1;
+ ret = 1;
+ goto out;
}
dprintk("-D- DiB7000PC not detected\n");
+out:
kfree(rx);
rx_memory_error:
kfree(tx);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index a6876fa48753..2f5af4813a74 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12287,7 +12287,8 @@ struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
if (state == NULL)
goto error;
- demod = kmalloc(sizeof(struct drx_demod_instance), GFP_KERNEL);
+ demod = kmemdup(&drxj_default_demod_g,
+ sizeof(struct drx_demod_instance), GFP_KERNEL);
if (demod == NULL)
goto error;
@@ -12311,8 +12312,6 @@ struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
state->demod = demod;
/* setup the demod data */
- memcpy(demod, &drxj_default_demod_g, sizeof(struct drx_demod_instance));
-
demod->my_i2c_dev_addr = demod_addr;
demod->my_common_attr = demod_comm_attr;
demod->my_i2c_dev_addr->user_data = state;
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index ba0c49107bd2..d45b4ddc8f91 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/idr.h>
#include <linux/dvb/frontend.h>
#include <asm/types.h>
@@ -34,8 +35,7 @@ struct dvb_pll_priv {
};
#define DVB_PLL_MAX 64
-
-static unsigned int dvb_pll_devcount;
+static DEFINE_IDA(pll_ida);
static int debug;
module_param(debug, int, 0644);
@@ -787,6 +787,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
+ int nr;
b1 = kmalloc(1, GFP_KERNEL);
if (!b1)
@@ -795,9 +796,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
b1[0] = 0;
msg.buf = b1;
- if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
- (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
- pll_desc_id = id[dvb_pll_devcount];
+ nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
+ if (nr < 0) {
+ kfree(b1);
+ return NULL;
+ }
+
+ if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
+ pll_desc_id = id[nr];
BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
@@ -808,24 +814,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer (i2c, &msg, 1);
- if (ret != 1) {
- kfree(b1);
- return NULL;
- }
+ if (ret != 1)
+ goto out;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
- if (!priv) {
- kfree(b1);
- return NULL;
- }
+ if (!priv)
+ goto out;
priv->pll_i2c_address = pll_addr;
priv->i2c = i2c;
priv->pll_desc = desc;
- priv->nr = dvb_pll_devcount++;
+ priv->nr = nr;
memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
sizeof(struct dvb_tuner_ops));
@@ -858,6 +860,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
kfree(b1);
return fe;
+out:
+ kfree(b1);
+ ida_simple_remove(&pll_ida, nr);
+
+ return NULL;
}
EXPORT_SYMBOL(dvb_pll_attach);
@@ -894,9 +901,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
static int dvb_pll_remove(struct i2c_client *client)
{
- struct dvb_frontend *fe;
+ struct dvb_frontend *fe = i2c_get_clientdata(client);
+ struct dvb_pll_priv *priv = fe->tuner_priv;
- fe = i2c_get_clientdata(client);
+ ida_simple_remove(&pll_ida, priv->nr);
dvb_pll_release(fe);
return 0;
}
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index 9ec1aeef03d5..e4528784f847 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -722,9 +722,9 @@ static int mn88443x_probe(struct i2c_client *client,
* Chip has two I2C addresses for each satellite/terrestrial system.
* ISDB-T uses address ISDB-S + 4, so we register a dummy client.
*/
- chip->client_t = i2c_new_dummy(client->adapter, client->addr + 4);
- if (!chip->client_t)
- return -ENODEV;
+ chip->client_t = i2c_new_dummy_device(client->adapter, client->addr + 4);
+ if (IS_ERR(chip->client_t))
+ return PTR_ERR(chip->client_t);
chip->regmap_t = devm_regmap_init_i2c(chip->client_t, &regmap_config);
if (IS_ERR(chip->regmap_t)) {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 731b44b9b74c..73922fc8f39c 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -612,12 +612,11 @@ static int mn88472_probe(struct i2c_client *client,
* Also, register bank 2 do not support sequential I/O. Only single
* register write or read is allowed to that bank.
*/
- dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
- if (!dev->client[1]) {
- ret = -ENODEV;
+ dev->client[1] = i2c_new_dummy_device(client->adapter, 0x1a);
+ if (IS_ERR(dev->client[1])) {
+ ret = PTR_ERR(dev->client[1]);
dev_err(&client->dev, "I2C registration failed\n");
- if (ret)
- goto err_regmap_0_regmap_exit;
+ goto err_regmap_0_regmap_exit;
}
dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
if (IS_ERR(dev->regmap[1])) {
@@ -626,12 +625,11 @@ static int mn88472_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[1], dev);
- dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
- if (!dev->client[2]) {
- ret = -ENODEV;
+ dev->client[2] = i2c_new_dummy_device(client->adapter, 0x1c);
+ if (IS_ERR(dev->client[2])) {
+ ret = PTR_ERR(dev->client[2]);
dev_err(&client->dev, "2nd I2C registration failed\n");
- if (ret)
- goto err_regmap_1_regmap_exit;
+ goto err_regmap_1_regmap_exit;
}
dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
if (IS_ERR(dev->regmap[2])) {
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 08118b38533b..4838969ef735 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -657,12 +657,11 @@ static int mn88473_probe(struct i2c_client *client,
* Also, register bank 2 do not support sequential I/O. Only single
* register write or read is allowed to that bank.
*/
- dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
- if (dev->client[1] == NULL) {
- ret = -ENODEV;
+ dev->client[1] = i2c_new_dummy_device(client->adapter, 0x1a);
+ if (IS_ERR(dev->client[1])) {
+ ret = PTR_ERR(dev->client[1]);
dev_err(&client->dev, "I2C registration failed\n");
- if (ret)
- goto err_regmap_0_regmap_exit;
+ goto err_regmap_0_regmap_exit;
}
dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
if (IS_ERR(dev->regmap[1])) {
@@ -671,12 +670,11 @@ static int mn88473_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[1], dev);
- dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
- if (dev->client[2] == NULL) {
- ret = -ENODEV;
+ dev->client[2] = i2c_new_dummy_device(client->adapter, 0x1c);
+ if (IS_ERR(dev->client[2])) {
+ ret = PTR_ERR(dev->client[2]);
dev_err(&client->dev, "2nd I2C registration failed\n");
- if (ret)
- goto err_regmap_1_regmap_exit;
+ goto err_regmap_1_regmap_exit;
}
dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
if (IS_ERR(dev->regmap[2])) {
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index e05c21d35dc8..60d1e59d2292 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -81,11 +81,9 @@ struct rtl2832_sdr_format {
static struct rtl2832_sdr_format formats[] = {
{
- .name = "Complex U8",
.pixelformat = V4L2_SDR_FMT_CU8,
.buffersize = BULK_BUFFER_SIZE,
}, {
- .name = "Complex U16LE (emulated)",
.pixelformat = V4L2_SDR_FMT_CU16LE,
.buffersize = BULK_BUFFER_SIZE * 2,
},
@@ -1116,7 +1114,6 @@ static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
if (f->index >= dev->num_formats)
return -EINVAL;
- strscpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 168c503e9154..14b93a7d3358 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -11,6 +11,13 @@
static const struct dvb_frontend_ops si2168_ops;
+static void cmd_init(struct si2168_cmd *cmd, const u8 *buf, int wlen, int rlen)
+{
+ memcpy(cmd->args, buf, wlen);
+ cmd->wlen = wlen;
+ cmd->rlen = rlen;
+}
+
/* execute firmware command */
static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
{
@@ -82,16 +89,23 @@ static int si2168_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
dev_dbg(&client->dev, "%s acquire: %d\n", __func__, acquire);
+ /* set manual value */
+ if (dev->ts_mode & SI2168_TS_CLK_MANUAL) {
+ cmd_init(&cmd, "\x14\x00\x0d\x10\xe8\x03", 6, 4);
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ return ret;
+ }
/* set TS_MODE property */
- memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
+ cmd_init(&cmd, "\x14\x00\x01\x10\x10\x00", 6, 4);
+ if (dev->ts_mode & SI2168_TS_CLK_MANUAL)
+ cmd.args[4] = SI2168_TS_CLK_MANUAL;
if (acquire)
cmd.args[4] |= dev->ts_mode;
else
cmd.args[4] |= SI2168_TS_TRISTATE;
if (dev->ts_clock_gapped)
cmd.args[4] |= 0x40;
- cmd.wlen = 6;
- cmd.rlen = 4;
ret = si2168_cmd_execute(client, &cmd);
return ret;
@@ -115,19 +129,13 @@ static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
switch (c->delivery_system) {
case SYS_DVBT:
- memcpy(cmd.args, "\xa0\x01", 2);
- cmd.wlen = 2;
- cmd.rlen = 13;
+ cmd_init(&cmd, "\xa0\x01", 2, 13);
break;
case SYS_DVBC_ANNEX_A:
- memcpy(cmd.args, "\x90\x01", 2);
- cmd.wlen = 2;
- cmd.rlen = 9;
+ cmd_init(&cmd, "\x90\x01", 2, 9);
break;
case SYS_DVBT2:
- memcpy(cmd.args, "\x50\x01", 2);
- cmd.wlen = 2;
- cmd.rlen = 14;
+ cmd_init(&cmd, "\x50\x01", 2, 14);
break;
default:
ret = -EINVAL;
@@ -164,9 +172,7 @@ static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
/* BER */
if (*status & FE_HAS_VITERBI) {
- memcpy(cmd.args, "\x82\x00", 2);
- cmd.wlen = 2;
- cmd.rlen = 3;
+ cmd_init(&cmd, "\x82\x00", 2, 3);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -197,9 +203,7 @@ static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
/* UCB */
if (*status & FE_HAS_SYNC) {
- memcpy(cmd.args, "\x84\x01", 2);
- cmd.wlen = 2;
- cmd.rlen = 3;
+ cmd_init(&cmd, "\x84\x01", 2, 3);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -285,22 +289,18 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
goto err;
}
- memcpy(cmd.args, "\x88\x02\x02\x02\x02", 5);
- cmd.wlen = 5;
- cmd.rlen = 5;
+ cmd_init(&cmd, "\x88\x02\x02\x02\x02", 5, 5);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
/* that has no big effect */
if (c->delivery_system == SYS_DVBT)
- memcpy(cmd.args, "\x89\x21\x06\x11\xff\x98", 6);
+ cmd_init(&cmd, "\x89\x21\x06\x11\xff\x98", 6, 3);
else if (c->delivery_system == SYS_DVBC_ANNEX_A)
- memcpy(cmd.args, "\x89\x21\x06\x11\x89\xf0", 6);
+ cmd_init(&cmd, "\x89\x21\x06\x11\x89\xf0", 6, 3);
else if (c->delivery_system == SYS_DVBT2)
- memcpy(cmd.args, "\x89\x21\x06\x11\x89\x20", 6);
- cmd.wlen = 6;
- cmd.rlen = 3;
+ cmd_init(&cmd, "\x89\x21\x06\x11\x89\x20", 6, 3);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -317,103 +317,77 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
goto err;
}
- memcpy(cmd.args, "\x51\x03", 2);
- cmd.wlen = 2;
- cmd.rlen = 12;
+ cmd_init(&cmd, "\x51\x03", 2, 12);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x12\x08\x04", 3);
- cmd.wlen = 3;
- cmd.rlen = 3;
+ cmd_init(&cmd, "\x12\x08\x04", 3, 3);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x0c\x10\x12\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x0c\x10\x12\x00", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x06\x10\x24\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x06\x10\x24\x00", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x07\x10\x00\x24", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x07\x10\x00\x24", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x0a\x10\x00\x00", 6);
+ cmd_init(&cmd, "\x14\x00\x0a\x10\x00\x00", 6, 4);
cmd.args[4] = delivery_system | bandwidth;
if (dev->spectral_inversion)
cmd.args[5] |= 1;
- cmd.wlen = 6;
- cmd.rlen = 4;
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
/* set DVB-C symbol rate */
if (c->delivery_system == SYS_DVBC_ANNEX_A) {
- memcpy(cmd.args, "\x14\x00\x02\x11", 4);
+ cmd_init(&cmd, "\x14\x00\x02\x11\x00\x00", 6, 4);
cmd.args[4] = ((c->symbol_rate / 1000) >> 0) & 0xff;
cmd.args[5] = ((c->symbol_rate / 1000) >> 8) & 0xff;
- cmd.wlen = 6;
- cmd.rlen = 4;
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
}
- memcpy(cmd.args, "\x14\x00\x0f\x10\x10\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x0f\x10\x10\x00", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x08", 6);
+ cmd_init(&cmd, "\x14\x00\x09\x10\xe3\x08", 6, 4);
cmd.args[5] |= dev->ts_clock_inv ? 0x00 : 0x10;
- cmd.wlen = 6;
- cmd.rlen = 4;
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x08\x10\xd7\x05", 6);
+ cmd_init(&cmd, "\x14\x00\x08\x10\xd7\x05", 6, 4);
cmd.args[5] |= dev->ts_clock_inv ? 0x00 : 0x10;
- cmd.wlen = 6;
- cmd.rlen = 4;
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x01\x12\x00\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x01\x12\x00\x00", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x01\x03\x0c\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
+ cmd_init(&cmd, "\x14\x00\x01\x03\x0c\x00", 6, 4);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
- memcpy(cmd.args, "\x85", 1);
- cmd.wlen = 1;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\x85", 1, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -443,26 +417,21 @@ static int si2168_init(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
/* initialize */
- memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
- cmd.wlen = 13;
- cmd.rlen = 0;
+ cmd_init(&cmd, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00",
+ 13, 0);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
if (dev->warm) {
/* resume */
- memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
- cmd.wlen = 8;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
udelay(100);
- memcpy(cmd.args, "\x85", 1);
- cmd.wlen = 1;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\x85", 1, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -471,9 +440,7 @@ static int si2168_init(struct dvb_frontend *fe)
}
/* power up */
- memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
- cmd.wlen = 8;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -511,9 +478,8 @@ static int si2168_init(struct dvb_frontend *fe)
ret = -EINVAL;
break;
}
- memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
- cmd.wlen = len;
- cmd.rlen = 1;
+ cmd_init(&cmd, &fw->data[(fw->size - remaining) + 1],
+ len, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
break;
@@ -521,10 +487,7 @@ static int si2168_init(struct dvb_frontend *fe)
} else if (fw->size % 8 == 0) {
/* firmware is in the old format */
for (remaining = fw->size; remaining > 0; remaining -= 8) {
- len = 8;
- memcpy(cmd.args, &fw->data[fw->size - remaining], len);
- cmd.wlen = len;
- cmd.rlen = 1;
+ cmd_init(&cmd, &fw->data[fw->size - remaining], 8, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
break;
@@ -541,17 +504,13 @@ static int si2168_init(struct dvb_frontend *fe)
release_firmware(fw);
- memcpy(cmd.args, "\x01\x01", 2);
- cmd.wlen = 2;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\x01\x01", 2, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
/* query firmware version */
- memcpy(cmd.args, "\x11", 1);
- cmd.wlen = 1;
- cmd.rlen = 10;
+ cmd_init(&cmd, "\x11", 1, 10);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -609,9 +568,7 @@ static int si2168_sleep(struct dvb_frontend *fe)
if (dev->version > ('B' << 24 | 4 << 16 | 0 << 8 | 11 << 0))
dev->warm = false;
- memcpy(cmd.args, "\x13", 1);
- cmd.wlen = 1;
- cmd.rlen = 0;
+ cmd_init(&cmd, "\x13", 1, 0);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -637,9 +594,7 @@ static int si2168_select(struct i2c_mux_core *muxc, u32 chan)
struct si2168_cmd cmd;
/* open I2C gate */
- memcpy(cmd.args, "\xc0\x0d\x01", 3);
- cmd.wlen = 3;
- cmd.rlen = 0;
+ cmd_init(&cmd, "\xc0\x0d\x01", 3, 0);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -657,9 +612,7 @@ static int si2168_deselect(struct i2c_mux_core *muxc, u32 chan)
struct si2168_cmd cmd;
/* close I2C gate */
- memcpy(cmd.args, "\xc0\x0d\x00", 3);
- cmd.wlen = 3;
- cmd.rlen = 0;
+ cmd_init(&cmd, "\xc0\x0d\x00", 3, 0);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -730,25 +683,20 @@ static int si2168_probe(struct i2c_client *client,
mutex_init(&dev->i2c_mutex);
/* Initialize */
- memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
- cmd.wlen = 13;
- cmd.rlen = 0;
+ cmd_init(&cmd, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00",
+ 13, 0);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err_kfree;
/* Power up */
- memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
- cmd.wlen = 8;
- cmd.rlen = 1;
+ cmd_init(&cmd, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8, 1);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err_kfree;
/* Query chip revision */
- memcpy(cmd.args, "\x02", 1);
- cmd.wlen = 1;
- cmd.rlen = 13;
+ cmd_init(&cmd, "\x02", 1, 13);
ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err_kfree;
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 3b04f84272d9..50dccb394efa 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -30,6 +30,7 @@ struct si2168_config {
#define SI2168_TS_PARALLEL 0x06
#define SI2168_TS_SERIAL 0x03
#define SI2168_TS_TRISTATE 0x00
+#define SI2168_TS_CLK_MANUAL 0x20
u8 ts_mode;
/* TS clock inverted */
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 0c50740e7bb8..7d93a1617e86 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -270,7 +270,7 @@ static enum fe_stv0900_error stv0900_initialize(struct stv0900_internal *intp)
static u32 stv0900_get_mclk_freq(struct stv0900_internal *intp, u32 ext_clk)
{
- u32 mclk = 90000000, div = 0, ad_div = 0;
+ u32 mclk, div, ad_div;
div = stv0900_get_bits(intp, F0900_M_DIV);
ad_div = ((stv0900_get_bits(intp, F0900_SELX1RATIO) == 1) ? 4 : 6);
diff --git a/drivers/media/dvb-frontends/zd1301_demod.c b/drivers/media/dvb-frontends/zd1301_demod.c
index 96adbba7a82b..bbabe6a2d4f4 100644
--- a/drivers/media/dvb-frontends/zd1301_demod.c
+++ b/drivers/media/dvb-frontends/zd1301_demod.c
@@ -421,8 +421,7 @@ static int zd1301_demod_i2c_master_xfer(struct i2c_adapter *adapter,
} else {
dev_dbg(&pdev->dev, "unknown msg[0].len=%u\n", msg[0].len);
ret = -EOPNOTSUPP;
- if (ret)
- goto err;
+ goto err;
}
return num;
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index a960a0ce9deb..9363d005e2b6 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -217,7 +217,7 @@ static const struct file_operations fdtv_ca_fops = {
.llseek = noop_llseek,
};
-static struct dvb_device fdtv_ca = {
+static const struct dvb_device fdtv_ca = {
.users = 1,
.readers = 1,
.writers = 1,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 79ce9ec6fc1b..7eee1812bba3 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -22,8 +22,11 @@ config VIDEO_IR_I2C
# Encoder / Decoder module configuration
#
+comment "I2C drivers hidden by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+
menu "I2C Encoders, decoders, sensors and other helper chips"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
comment "Audio decoders, processors and mixers"
@@ -723,6 +726,19 @@ config VIDEO_OV5670
To compile this driver as a module, choose M here: the
module will be called ov5670.
+config VIDEO_OV5675
+ tristate "OmniVision OV5675 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV5675 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5675.
+
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index fd4ea86dedd5..beb170b002dc 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
+obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index aa8b04cfed0f..8679a44e6413 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1148,10 +1148,10 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_dbg(1, debug, sd, "reg 0x41 0x%x, chip version (reg 0x00) 0x%x\n",
ad9389b_rd(sd, 0x41), state->chip_revision);
- state->edid_i2c_client = i2c_new_dummy(client->adapter, (0x7e>>1));
- if (state->edid_i2c_client == NULL) {
+ state->edid_i2c_client = i2c_new_dummy_device(client->adapter, (0x7e >> 1));
+ if (IS_ERR(state->edid_i2c_client)) {
v4l2_err(sd, "failed to register edid i2c client\n");
- err = -ENOMEM;
+ err = PTR_ERR(state->edid_i2c_client);
goto err_entity;
}
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 6f3dc8862622..e780969cc2f2 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1329,17 +1329,17 @@ static int adv7180_probe(struct i2c_client *client,
}
if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
- state->csi_client = i2c_new_dummy(client->adapter,
+ state->csi_client = i2c_new_dummy_device(client->adapter,
ADV7180_DEFAULT_CSI_I2C_ADDR);
- if (!state->csi_client)
- return -ENOMEM;
+ if (IS_ERR(state->csi_client))
+ return PTR_ERR(state->csi_client);
}
if (state->chip_info->flags & ADV7180_FLAG_I2P) {
- state->vpp_client = i2c_new_dummy(client->adapter,
+ state->vpp_client = i2c_new_dummy_device(client->adapter,
ADV7180_DEFAULT_VPP_I2C_ADDR);
- if (!state->vpp_client) {
- ret = -ENOMEM;
+ if (IS_ERR(state->vpp_client)) {
+ ret = PTR_ERR(state->vpp_client);
goto err_unregister_csi_client;
}
}
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 4a441ee99dd8..63e94dfcb5d3 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -428,8 +428,7 @@ done:
return pdata;
}
-static int adv7343_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adv7343_probe(struct i2c_client *client)
{
struct adv7343_state *state;
int err;
@@ -524,7 +523,7 @@ static struct i2c_driver adv7343_driver = {
.of_match_table = of_match_ptr(adv7343_of_match),
.name = "adv7343",
},
- .probe = adv7343_probe,
+ .probe_new = adv7343_probe,
.remove = adv7343_remove,
.id_table = adv7343_id,
};
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index f57cd77a32fa..0a47d474e97a 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -668,8 +668,7 @@ static void adv748x_dt_cleanup(struct adv748x_state *state)
of_node_put(state->endpoints[i]);
}
-static int adv748x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adv748x_probe(struct i2c_client *client)
{
struct adv748x_state *state;
int ret;
@@ -797,13 +796,6 @@ static int adv748x_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id adv748x_id[] = {
- { "adv7481", 0 },
- { "adv7482", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, adv748x_id);
-
static const struct of_device_id adv748x_of_table[] = {
{ .compatible = "adi,adv7481", },
{ .compatible = "adi,adv7482", },
@@ -816,9 +808,8 @@ static struct i2c_driver adv748x_driver = {
.name = "adv748x",
.of_match_table = adv748x_of_table,
},
- .probe = adv748x_probe,
+ .probe_new = adv748x_probe,
.remove = adv748x_remove,
- .id_table = adv748x_id,
};
module_i2c_driver(adv748x_driver);
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 2ad6bdf1a9fc..62763ec4cd07 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1872,11 +1872,11 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_entity;
}
- state->i2c_edid = i2c_new_dummy(client->adapter,
+ state->i2c_edid = i2c_new_dummy_device(client->adapter,
state->i2c_edid_addr >> 1);
- if (state->i2c_edid == NULL) {
+ if (IS_ERR(state->i2c_edid)) {
v4l2_err(sd, "failed to register edid i2c client\n");
- err = -ENOMEM;
+ err = PTR_ERR(state->i2c_edid);
goto err_entity;
}
@@ -1889,11 +1889,11 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
}
if (state->pdata.cec_clk) {
- state->i2c_cec = i2c_new_dummy(client->adapter,
+ state->i2c_cec = i2c_new_dummy_device(client->adapter,
state->i2c_cec_addr >> 1);
- if (state->i2c_cec == NULL) {
+ if (IS_ERR(state->i2c_cec)) {
v4l2_err(sd, "failed to register cec i2c client\n");
- err = -ENOMEM;
+ err = PTR_ERR(state->i2c_cec);
goto err_unreg_edid;
}
adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
@@ -1901,10 +1901,10 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
}
- state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
- if (state->i2c_pktmem == NULL) {
+ state->i2c_pktmem = i2c_new_dummy_device(client->adapter, state->i2c_pktmem_addr >> 1);
+ if (IS_ERR(state->i2c_pktmem)) {
v4l2_err(sd, "failed to register pktmem i2c client\n");
- err = -ENOMEM;
+ err = PTR_ERR(state->i2c_pktmem);
goto err_unreg_cec;
}
@@ -1940,8 +1940,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
err_unreg_pktmem:
i2c_unregister_device(state->i2c_pktmem);
err_unreg_cec:
- if (state->i2c_cec)
- i2c_unregister_device(state->i2c_cec);
+ i2c_unregister_device(state->i2c_cec);
err_unreg_edid:
i2c_unregister_device(state->i2c_edid);
err_entity:
@@ -1967,8 +1966,7 @@ static int adv7511_remove(struct i2c_client *client)
adv7511_init_setup(sd);
cancel_delayed_work(&state->edid_handler);
i2c_unregister_device(state->i2c_edid);
- if (state->i2c_cec)
- i2c_unregister_device(state->i2c_cec);
+ i2c_unregister_device(state->i2c_cec);
i2c_unregister_device(state->i2c_pktmem);
destroy_workqueue(state->work_queue);
v4l2_device_unregister_subdev(sd);
@@ -1980,14 +1978,14 @@ static int adv7511_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7511_id[] = {
- { "adv7511", 0 },
+ { "adv7511-v4l2", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_id);
static struct i2c_driver adv7511_driver = {
.driver = {
- .name = "adv7511",
+ .name = "adv7511-v4l2",
},
.probe = adv7511_probe,
.remove = adv7511_remove,
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 11ab2df02dc7..885619841719 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3351,28 +3351,17 @@ static const struct v4l2_ctrl_config adv7842_ctrl_free_run_color = {
static void adv7842_unregister_clients(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
- if (state->i2c_avlink)
- i2c_unregister_device(state->i2c_avlink);
- if (state->i2c_cec)
- i2c_unregister_device(state->i2c_cec);
- if (state->i2c_infoframe)
- i2c_unregister_device(state->i2c_infoframe);
- if (state->i2c_sdp_io)
- i2c_unregister_device(state->i2c_sdp_io);
- if (state->i2c_sdp)
- i2c_unregister_device(state->i2c_sdp);
- if (state->i2c_afe)
- i2c_unregister_device(state->i2c_afe);
- if (state->i2c_repeater)
- i2c_unregister_device(state->i2c_repeater);
- if (state->i2c_edid)
- i2c_unregister_device(state->i2c_edid);
- if (state->i2c_hdmi)
- i2c_unregister_device(state->i2c_hdmi);
- if (state->i2c_cp)
- i2c_unregister_device(state->i2c_cp);
- if (state->i2c_vdp)
- i2c_unregister_device(state->i2c_vdp);
+ i2c_unregister_device(state->i2c_avlink);
+ i2c_unregister_device(state->i2c_cec);
+ i2c_unregister_device(state->i2c_infoframe);
+ i2c_unregister_device(state->i2c_sdp_io);
+ i2c_unregister_device(state->i2c_sdp);
+ i2c_unregister_device(state->i2c_afe);
+ i2c_unregister_device(state->i2c_repeater);
+ i2c_unregister_device(state->i2c_edid);
+ i2c_unregister_device(state->i2c_hdmi);
+ i2c_unregister_device(state->i2c_cp);
+ i2c_unregister_device(state->i2c_vdp);
state->i2c_avlink = NULL;
state->i2c_cec = NULL;
@@ -3400,9 +3389,12 @@ static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd, const cha
return NULL;
}
- cp = i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
- if (!cp)
- v4l2_err(sd, "register %s on i2c addr 0x%x failed\n", desc, addr);
+ cp = i2c_new_dummy_device(client->adapter, io_read(sd, io_reg) >> 1);
+ if (IS_ERR(cp)) {
+ v4l2_err(sd, "register %s on i2c addr 0x%x failed with %ld\n",
+ desc, addr, PTR_ERR(cp));
+ cp = NULL;
+ }
return cp;
}
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index e6c06cb75d33..256acf73d5ea 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1396,8 +1396,7 @@ static int __maybe_unused et8ek8_resume(struct device *dev)
return __et8ek8_set_power(sensor, true);
}
-static int et8ek8_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int et8ek8_probe(struct i2c_client *client)
{
struct et8ek8_sensor *sensor;
struct device *dev = &client->dev;
@@ -1504,7 +1503,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
.pm = &et8ek8_pm_ops,
.of_match_table = et8ek8_of_table,
},
- .probe = et8ek8_probe,
+ .probe_new = et8ek8_probe,
.remove = __exit_p(et8ek8_remove),
.id_table = et8ek8_id_table,
};
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index f3ff1af209f9..6011cec5e351 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1821,8 +1821,7 @@ static const struct i2c_device_id imx274_id[] = {
};
MODULE_DEVICE_TABLE(i2c, imx274_id);
-static int imx274_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int imx274_probe(struct i2c_client *client)
{
struct v4l2_subdev *sd;
struct stimx274 *imx274;
@@ -1984,7 +1983,7 @@ static struct i2c_driver imx274_i2c_driver = {
.name = DRIVER_NAME,
.of_match_table = imx274_of_id_table,
},
- .probe = imx274_probe,
+ .probe_new = imx274_probe,
.remove = imx274_remove,
.id_table = imx274_id,
};
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 876d7587a1da..e8119ad0bc71 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -885,9 +885,11 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
INIT_DELAYED_WORK(&ir->work, ir_work);
if (probe_tx) {
- ir->tx_c = i2c_new_dummy(client->adapter, 0x70);
- if (!ir->tx_c) {
+ ir->tx_c = i2c_new_dummy_device(client->adapter, 0x70);
+ if (IS_ERR(ir->tx_c)) {
dev_err(&client->dev, "failed to setup tx i2c address");
+ err = PTR_ERR(ir->tx_c);
+ goto err_out_free;
} else if (!zilog_init(ir)) {
ir->carrier = 38000;
ir->duty_cycle = 40;
@@ -904,7 +906,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
err_out_free:
- if (ir->tx_c)
+ if (!IS_ERR(ir->tx_c))
i2c_unregister_device(ir->tx_c);
/* Only frees rc if it were allocated internally */
@@ -916,16 +918,12 @@ static int ir_remove(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
- /* kill outstanding polls */
cancel_delayed_work_sync(&ir->work);
- if (ir->tx_c)
- i2c_unregister_device(ir->tx_c);
+ i2c_unregister_device(ir->tx_c);
- /* unregister device */
rc_unregister_device(ir->rc);
- /* free memory */
return 0;
}
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 7b226fadcdb8..19a3ceea3bc2 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1271,8 +1271,7 @@ static int max2175_refout_load_to_bits(struct i2c_client *client, u32 load,
return 0;
}
-static int max2175_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max2175_probe(struct i2c_client *client)
{
bool master = true, am_hiz = false;
u32 refout_load, refout_bits = 0; /* REFOUT disabled */
@@ -1433,7 +1432,7 @@ static struct i2c_driver max2175_driver = {
.name = DRIVER_NAME,
.of_match_table = max2175_of_ids,
},
- .probe = max2175_probe,
+ .probe_new = max2175_probe,
.remove = max2175_remove,
.id_table = max2175_id,
};
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 2df743cbe09d..5613072908ac 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -726,8 +726,7 @@ static const struct v4l2_subdev_ops mt9m001_subdev_ops = {
.pad = &mt9m001_subdev_pad_ops,
};
-static int mt9m001_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
+static int mt9m001_probe(struct i2c_client *client)
{
struct mt9m001 *mt9m001;
struct i2c_adapter *adapter = client->adapter;
@@ -872,7 +871,7 @@ static struct i2c_driver mt9m001_i2c_driver = {
.pm = &mt9m001_pm_ops,
.of_match_table = mt9m001_of_match,
},
- .probe = mt9m001_probe,
+ .probe_new = mt9m001_probe,
.remove = mt9m001_remove,
.id_table = mt9m001_id,
};
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 12cb012d91f7..17e8253f5748 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -533,7 +533,7 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
format->format = *mf;
return 0;
#else
- return -ENOTTY;
+ return -EINVAL;
#endif
}
@@ -1243,8 +1243,7 @@ out_put_fw:
return ret;
}
-static int mt9m111_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
+static int mt9m111_probe(struct i2c_client *client)
{
struct mt9m111 *mt9m111;
struct i2c_adapter *adapter = client->adapter;
@@ -1388,7 +1387,7 @@ static struct i2c_driver mt9m111_i2c_driver = {
.name = "mt9m111",
.of_match_table = of_match_ptr(mt9m111_of_match),
},
- .probe = mt9m111_probe,
+ .probe_new = mt9m111_probe,
.remove = mt9m111_remove,
.id_table = mt9m111_id,
};
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index ecd167d7c4d2..4a4bd5b665a1 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -929,7 +929,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
format->format = *mf;
return 0;
#else
- return -ENOTTY;
+ return -EINVAL;
#endif
}
@@ -1190,8 +1190,7 @@ static int ov2640_probe_dt(struct i2c_client *client,
/*
* i2c_driver functions
*/
-static int ov2640_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
+static int ov2640_probe(struct i2c_client *client)
{
struct ov2640_priv *priv;
struct i2c_adapter *adapter = client->adapter;
@@ -1302,7 +1301,7 @@ static struct i2c_driver ov2640_i2c_driver = {
.name = "ov2640",
.of_match_table = of_match_ptr(ov2640_of_match),
},
- .probe = ov2640_probe,
+ .probe_new = ov2640_probe,
.remove = ov2640_remove,
.id_table = ov2640_id,
};
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 5ed2413eac8a..f4ded0669ff9 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1055,7 +1055,7 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
mutex_unlock(&ov2659->lock);
return 0;
#else
- return -ENOTTY;
+ return -EINVAL;
#endif
}
@@ -1131,8 +1131,6 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
-#else
- ret = -ENOTTY;
#endif
} else {
s64 val;
@@ -1386,8 +1384,7 @@ done:
return pdata;
}
-static int ov2659_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2659_probe(struct i2c_client *client)
{
const struct ov2659_platform_data *pdata = ov2659_get_pdata(client);
struct v4l2_subdev *sd;
@@ -1515,7 +1512,7 @@ static struct i2c_driver ov2659_i2c_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ov2659_of_match),
},
- .probe = ov2659_probe,
+ .probe_new = ov2659_probe,
.remove = ov2659_remove,
.id_table = ov2659_id,
};
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index b10bcfabaeeb..59cdbc33658c 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -675,7 +675,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg, format->pad);
#else
- ret = -ENOTTY;
+ ret = -EINVAL;
#endif
} else {
fmt = &sensor->fmt;
@@ -723,10 +723,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
try_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
format->format = *try_fmt;
-#else
- ret = -ENOTTY;
#endif
-
goto unlock;
}
@@ -1023,7 +1020,7 @@ static int ov2680_check_id(struct ov2680_dev *sensor)
return 0;
}
-static int ov2860_parse_dt(struct ov2680_dev *sensor)
+static int ov2680_parse_dt(struct ov2680_dev *sensor)
{
struct device *dev = ov2680_to_dev(sensor);
int ret;
@@ -1064,7 +1061,7 @@ static int ov2680_probe(struct i2c_client *client)
sensor->i2c_client = client;
- ret = ov2860_parse_dt(sensor);
+ ret = ov2680_parse_dt(sensor);
if (ret < 0)
return -EINVAL;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 759d60c6d630..500d9bbff10b 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -158,8 +158,8 @@ static const int ov5640_framerates[] = {
/* regulator supplies */
static const char * const ov5640_supply_name[] = {
"DOVDD", /* Digital I/O (1.8V) supply */
- "DVDD", /* Digital Core (1.5V) supply */
"AVDD", /* Analog (2.8V) supply */
+ "DVDD", /* Digital Core (1.5V) supply */
};
#define OV5640_NUM_SUPPLIES ARRAY_SIZE(ov5640_supply_name)
@@ -2936,8 +2936,7 @@ power_off:
return ret;
}
-static int ov5640_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5640_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct fwnode_handle *endpoint;
@@ -3022,9 +3021,14 @@ static int ov5640_probe(struct i2c_client *client,
/* request optional power down pin */
sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->pwdn_gpio))
+ return PTR_ERR(sensor->pwdn_gpio);
+
/* request optional reset pin */
sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset_gpio))
+ return PTR_ERR(sensor->reset_gpio);
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
@@ -3050,7 +3054,7 @@ static int ov5640_probe(struct i2c_client *client,
if (ret)
goto entity_cleanup;
- ret = v4l2_async_register_subdev(&sensor->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&sensor->sd);
if (ret)
goto free_ctrls;
@@ -3095,7 +3099,7 @@ static struct i2c_driver ov5640_i2c_driver = {
.of_match_table = ov5640_dt_ids,
},
.id_table = ov5640_id,
- .probe = ov5640_probe,
+ .probe_new = ov5640_probe,
.remove = ov5640_remove,
};
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 124c8df04633..a6c17d15d754 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -34,10 +34,6 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define OV5645_VOLTAGE_ANALOG 2800000
-#define OV5645_VOLTAGE_DIGITAL_CORE 1500000
-#define OV5645_VOLTAGE_DIGITAL_IO 1800000
-
#define OV5645_SYSTEM_CTRL0 0x3008
#define OV5645_SYSTEM_CTRL0_START 0x02
#define OV5645_SYSTEM_CTRL0_STOP 0x42
@@ -45,6 +41,8 @@
#define OV5645_CHIP_ID_HIGH_BYTE 0x56
#define OV5645_CHIP_ID_LOW 0x300b
#define OV5645_CHIP_ID_LOW_BYTE 0x45
+#define OV5645_IO_MIPI_CTRL00 0x300e
+#define OV5645_PAD_OUTPUT00 0x3019
#define OV5645_AWB_MANUAL_CONTROL 0x3406
#define OV5645_AWB_MANUAL_ENABLE BIT(0)
#define OV5645_AEC_PK_MANUAL 0x3503
@@ -55,6 +53,7 @@
#define OV5645_ISP_VFLIP BIT(2)
#define OV5645_TIMING_TC_REG21 0x3821
#define OV5645_SENSOR_MIRROR BIT(1)
+#define OV5645_MIPI_CTRL00 0x4800
#define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
#define OV5645_TEST_PATTERN_MASK 0x3
#define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
@@ -62,6 +61,15 @@
#define OV5645_SDE_SAT_U 0x5583
#define OV5645_SDE_SAT_V 0x5584
+/* regulator supplies */
+static const char * const ov5645_supply_name[] = {
+ "vdddo", /* Digital I/O (1.8V) supply */
+ "vdda", /* Analog (2.8V) supply */
+ "vddd", /* Digital Core (1.5V) supply */
+};
+
+#define OV5645_NUM_SUPPLIES ARRAY_SIZE(ov5645_supply_name)
+
struct reg_value {
u16 reg;
u8 val;
@@ -86,9 +94,7 @@ struct ov5645 {
struct v4l2_rect crop;
struct clk *xclk;
- struct regulator *io_regulator;
- struct regulator *core_regulator;
- struct regulator *analog_regulator;
+ struct regulator_bulk_data supplies[OV5645_NUM_SUPPLIES];
const struct ov5645_mode_info *current_mode;
@@ -121,7 +127,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
{ 0x3503, 0x07 },
{ 0x3002, 0x1c },
{ 0x3006, 0xc3 },
- { 0x300e, 0x45 },
{ 0x3017, 0x00 },
{ 0x3018, 0x00 },
{ 0x302e, 0x0b },
@@ -350,7 +355,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
{ 0x3a1f, 0x14 },
{ 0x0601, 0x02 },
{ 0x3008, 0x42 },
- { 0x3008, 0x02 }
+ { 0x3008, 0x02 },
+ { OV5645_IO_MIPI_CTRL00, 0x40 },
+ { OV5645_MIPI_CTRL00, 0x24 },
+ { OV5645_PAD_OUTPUT00, 0x70 }
};
static const struct reg_value ov5645_setting_sxga[] = {
@@ -533,55 +541,6 @@ static const struct ov5645_mode_info ov5645_mode_info_data[] = {
},
};
-static int ov5645_regulators_enable(struct ov5645 *ov5645)
-{
- int ret;
-
- ret = regulator_enable(ov5645->io_regulator);
- if (ret < 0) {
- dev_err(ov5645->dev, "set io voltage failed\n");
- return ret;
- }
-
- ret = regulator_enable(ov5645->analog_regulator);
- if (ret) {
- dev_err(ov5645->dev, "set analog voltage failed\n");
- goto err_disable_io;
- }
-
- ret = regulator_enable(ov5645->core_regulator);
- if (ret) {
- dev_err(ov5645->dev, "set core voltage failed\n");
- goto err_disable_analog;
- }
-
- return 0;
-
-err_disable_analog:
- regulator_disable(ov5645->analog_regulator);
-err_disable_io:
- regulator_disable(ov5645->io_regulator);
-
- return ret;
-}
-
-static void ov5645_regulators_disable(struct ov5645 *ov5645)
-{
- int ret;
-
- ret = regulator_disable(ov5645->core_regulator);
- if (ret < 0)
- dev_err(ov5645->dev, "core regulator disable failed\n");
-
- ret = regulator_disable(ov5645->analog_regulator);
- if (ret < 0)
- dev_err(ov5645->dev, "analog regulator disable failed\n");
-
- ret = regulator_disable(ov5645->io_regulator);
- if (ret < 0)
- dev_err(ov5645->dev, "io regulator disable failed\n");
-}
-
static int ov5645_write_reg(struct ov5645 *ov5645, u16 reg, u8 val)
{
u8 regbuf[3];
@@ -680,15 +639,14 @@ static int ov5645_set_power_on(struct ov5645 *ov5645)
{
int ret;
- ret = ov5645_regulators_enable(ov5645);
- if (ret < 0) {
+ ret = regulator_bulk_enable(OV5645_NUM_SUPPLIES, ov5645->supplies);
+ if (ret < 0)
return ret;
- }
ret = clk_prepare_enable(ov5645->xclk);
if (ret < 0) {
dev_err(ov5645->dev, "clk prepare enable failed\n");
- ov5645_regulators_disable(ov5645);
+ regulator_bulk_disable(OV5645_NUM_SUPPLIES, ov5645->supplies);
return ret;
}
@@ -708,7 +666,7 @@ static void ov5645_set_power_off(struct ov5645 *ov5645)
gpiod_set_value_cansleep(ov5645->rst_gpio, 1);
gpiod_set_value_cansleep(ov5645->enable_gpio, 0);
clk_disable_unprepare(ov5645->xclk);
- ov5645_regulators_disable(ov5645);
+ regulator_bulk_disable(OV5645_NUM_SUPPLIES, ov5645->supplies);
}
static int ov5645_s_power(struct v4l2_subdev *sd, int on)
@@ -737,13 +695,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
goto exit;
}
- ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
- OV5645_SYSTEM_CTRL0_STOP);
- if (ret < 0) {
- ov5645_set_power_off(ov5645);
- goto exit;
- }
+ usleep_range(500, 1000);
} else {
+ ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
ov5645_set_power_off(ov5645);
}
}
@@ -1049,11 +1003,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
dev_err(ov5645->dev, "could not sync v4l2 controls\n");
return ret;
}
+
+ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
+ if (ret < 0)
+ return ret;
+
ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
OV5645_SYSTEM_CTRL0_START);
if (ret < 0)
return ret;
} else {
+ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
+ if (ret < 0)
+ return ret;
+
ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
OV5645_SYSTEM_CTRL0_STOP);
if (ret < 0)
@@ -1086,13 +1049,13 @@ static const struct v4l2_subdev_ops ov5645_subdev_ops = {
.pad = &ov5645_subdev_pad_ops,
};
-static int ov5645_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5645_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *endpoint;
struct ov5645 *ov5645;
u8 chip_id_high, chip_id_low;
+ unsigned int i;
u32 xclk_freq;
int ret;
@@ -1150,47 +1113,13 @@ static int ov5645_probe(struct i2c_client *client,
return ret;
}
- ov5645->io_regulator = devm_regulator_get(dev, "vdddo");
- if (IS_ERR(ov5645->io_regulator)) {
- dev_err(dev, "cannot get io regulator\n");
- return PTR_ERR(ov5645->io_regulator);
- }
-
- ret = regulator_set_voltage(ov5645->io_regulator,
- OV5645_VOLTAGE_DIGITAL_IO,
- OV5645_VOLTAGE_DIGITAL_IO);
- if (ret < 0) {
- dev_err(dev, "cannot set io voltage\n");
- return ret;
- }
-
- ov5645->core_regulator = devm_regulator_get(dev, "vddd");
- if (IS_ERR(ov5645->core_regulator)) {
- dev_err(dev, "cannot get core regulator\n");
- return PTR_ERR(ov5645->core_regulator);
- }
-
- ret = regulator_set_voltage(ov5645->core_regulator,
- OV5645_VOLTAGE_DIGITAL_CORE,
- OV5645_VOLTAGE_DIGITAL_CORE);
- if (ret < 0) {
- dev_err(dev, "cannot set core voltage\n");
- return ret;
- }
-
- ov5645->analog_regulator = devm_regulator_get(dev, "vdda");
- if (IS_ERR(ov5645->analog_regulator)) {
- dev_err(dev, "cannot get analog regulator\n");
- return PTR_ERR(ov5645->analog_regulator);
- }
+ for (i = 0; i < OV5645_NUM_SUPPLIES; i++)
+ ov5645->supplies[i].supply = ov5645_supply_name[i];
- ret = regulator_set_voltage(ov5645->analog_regulator,
- OV5645_VOLTAGE_ANALOG,
- OV5645_VOLTAGE_ANALOG);
- if (ret < 0) {
- dev_err(dev, "cannot set analog voltage\n");
+ ret = devm_regulator_bulk_get(dev, OV5645_NUM_SUPPLIES,
+ ov5645->supplies);
+ if (ret < 0)
return ret;
- }
ov5645->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(ov5645->enable_gpio)) {
@@ -1355,7 +1284,7 @@ static struct i2c_driver ov5645_i2c_driver = {
.of_match_table = of_match_ptr(ov5645_of_match),
.name = "ov5645",
},
- .probe = ov5645_probe,
+ .probe_new = ov5645_probe,
.remove = ov5645_remove,
.id_table = ov5645_id,
};
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 4589631798c9..e7d2e5b4ad4b 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -547,8 +547,7 @@ static int ov5647_parse_dt(struct device_node *np)
return ret;
}
-static int ov5647_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5647_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov5647 *sensor;
@@ -644,7 +643,7 @@ static struct i2c_driver ov5647_driver = {
.of_match_table = of_match_ptr(ov5647_of_match),
.name = SENSOR_NAME,
},
- .probe = ov5647_probe,
+ .probe_new = ov5647_probe,
.remove = ov5647_remove,
.id_table = ov5647_id,
};
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
new file mode 100644
index 000000000000..1ae252378799
--- /dev/null
+++ b/drivers/media/i2c/ov5675.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV5675_REG_VALUE_08BIT 1
+#define OV5675_REG_VALUE_16BIT 2
+#define OV5675_REG_VALUE_24BIT 3
+
+#define OV5675_LINK_FREQ_450MHZ 450000000ULL
+#define OV5675_SCLK 90000000LL
+#define OV5675_MCLK 19200000
+#define OV5675_DATA_LANES 2
+#define OV5675_RGB_DEPTH 10
+
+#define OV5675_REG_CHIP_ID 0x300a
+#define OV5675_CHIP_ID 0x5675
+
+#define OV5675_REG_MODE_SELECT 0x0100
+#define OV5675_MODE_STANDBY 0x00
+#define OV5675_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV5675_REG_VTS 0x380e
+#define OV5675_VTS_30FPS 0x07e4
+#define OV5675_VTS_30FPS_MIN 0x07e4
+#define OV5675_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OV5675_REG_HTS 0x380c
+
+/* Exposure controls from sensor */
+#define OV5675_REG_EXPOSURE 0x3500
+#define OV5675_EXPOSURE_MIN 4
+#define OV5675_EXPOSURE_MAX_MARGIN 4
+#define OV5675_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV5675_REG_ANALOG_GAIN 0x3508
+#define OV5675_ANAL_GAIN_MIN 128
+#define OV5675_ANAL_GAIN_MAX 2047
+#define OV5675_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV5675_REG_MWB_R_GAIN 0x5019
+#define OV5675_REG_MWB_G_GAIN 0x501b
+#define OV5675_REG_MWB_B_GAIN 0x501d
+#define OV5675_DGTL_GAIN_MIN 0
+#define OV5675_DGTL_GAIN_MAX 4095
+#define OV5675_DGTL_GAIN_STEP 1
+#define OV5675_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV5675_REG_TEST_PATTERN 0x4503
+#define OV5675_TEST_PATTERN_ENABLE BIT(7)
+#define OV5675_TEST_PATTERN_BAR_SHIFT 2
+
+#define to_ov5675(_sd) container_of(_sd, struct ov5675, sd)
+
+enum {
+ OV5675_LINK_FREQ_900MBPS,
+};
+
+struct ov5675_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov5675_reg_list {
+ u32 num_of_regs;
+ const struct ov5675_reg *regs;
+};
+
+struct ov5675_link_freq_config {
+ const struct ov5675_reg_list reg_list;
+};
+
+struct ov5675_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov5675_reg_list reg_list;
+};
+
+static const struct ov5675_reg mipi_data_rate_900mbps[] = {
+ {0x0103, 0x01},
+ {0x0100, 0x00},
+ {0x0300, 0x04},
+ {0x0302, 0x8d},
+ {0x0303, 0x00},
+ {0x030d, 0x26},
+};
+
+static const struct ov5675_reg mode_2592x1944_regs[] = {
+ {0x3002, 0x21},
+ {0x3107, 0x23},
+ {0x3501, 0x20},
+ {0x3503, 0x0c},
+ {0x3508, 0x03},
+ {0x3509, 0x00},
+ {0x3600, 0x66},
+ {0x3602, 0x30},
+ {0x3610, 0xa5},
+ {0x3612, 0x93},
+ {0x3620, 0x80},
+ {0x3642, 0x0e},
+ {0x3661, 0x00},
+ {0x3662, 0x10},
+ {0x3664, 0xf3},
+ {0x3665, 0x9e},
+ {0x3667, 0xa5},
+ {0x366e, 0x55},
+ {0x366f, 0x55},
+ {0x3670, 0x11},
+ {0x3671, 0x11},
+ {0x3672, 0x11},
+ {0x3673, 0x11},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x10},
+ {0x3734, 0x00},
+ {0x373d, 0x24},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x3766, 0x12},
+ {0x37a1, 0x14},
+ {0x37a8, 0x1c},
+ {0x37ab, 0x0f},
+ {0x37c2, 0x04},
+ {0x37cb, 0x00},
+ {0x37cc, 0x00},
+ {0x37cd, 0x00},
+ {0x37ce, 0x00},
+ {0x37d8, 0x02},
+ {0x37d9, 0x08},
+ {0x37dc, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xb3},
+ {0x3808, 0x0a},
+ {0x3809, 0x20},
+ {0x380a, 0x07},
+ {0x380b, 0x98},
+ {0x380c, 0x02},
+ {0x380d, 0xee},
+ {0x380e, 0x07},
+ {0x380f, 0xe4},
+ {0x3811, 0x10},
+ {0x3813, 0x0d},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x381e, 0x02},
+ {0x3820, 0x88},
+ {0x3821, 0x01},
+ {0x3832, 0x04},
+ {0x3c80, 0x01},
+ {0x3c82, 0x00},
+ {0x3c83, 0xc8},
+ {0x3c8c, 0x0f},
+ {0x3c8d, 0xa0},
+ {0x3c90, 0x07},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0xd0},
+ {0x3c95, 0x50},
+ {0x3c96, 0x35},
+ {0x3c97, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x02},
+ {0x4009, 0x0d},
+ {0x400f, 0x80},
+ {0x4013, 0x02},
+ {0x4040, 0x00},
+ {0x4041, 0x07},
+ {0x404c, 0x50},
+ {0x404e, 0x20},
+ {0x4500, 0x06},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x4819, 0x70},
+ {0x4825, 0x32},
+ {0x4826, 0x32},
+ {0x482a, 0x06},
+ {0x4833, 0x08},
+ {0x4837, 0x0d},
+ {0x5000, 0x77},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x5b05, 0x6c},
+ {0x5e10, 0xfc},
+ {0x3500, 0x00},
+ {0x3501, 0x3E},
+ {0x3502, 0x60},
+ {0x3503, 0x08},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x3832, 0x48},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x4003, 0x40},
+ {0x3107, 0x01},
+ {0x3c80, 0x08},
+ {0x3c83, 0xb1},
+ {0x3c8c, 0x10},
+ {0x3c8d, 0x00},
+ {0x3c90, 0x00},
+ {0x3c94, 0x00},
+ {0x3c95, 0x00},
+ {0x3c96, 0x00},
+ {0x37cb, 0x09},
+ {0x37cc, 0x15},
+ {0x37cd, 0x1f},
+ {0x37ce, 0x1f},
+};
+
+static const struct ov5675_reg mode_1296x972_regs[] = {
+ {0x3002, 0x21},
+ {0x3107, 0x23},
+ {0x3501, 0x20},
+ {0x3503, 0x0c},
+ {0x3508, 0x03},
+ {0x3509, 0x00},
+ {0x3600, 0x66},
+ {0x3602, 0x30},
+ {0x3610, 0xa5},
+ {0x3612, 0x93},
+ {0x3620, 0x80},
+ {0x3642, 0x0e},
+ {0x3661, 0x00},
+ {0x3662, 0x08},
+ {0x3664, 0xf3},
+ {0x3665, 0x9e},
+ {0x3667, 0xa5},
+ {0x366e, 0x55},
+ {0x366f, 0x55},
+ {0x3670, 0x11},
+ {0x3671, 0x11},
+ {0x3672, 0x11},
+ {0x3673, 0x11},
+ {0x3714, 0x28},
+ {0x371a, 0x3e},
+ {0x3733, 0x10},
+ {0x3734, 0x00},
+ {0x373d, 0x24},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x3766, 0x12},
+ {0x37a1, 0x14},
+ {0x37a8, 0x1c},
+ {0x37ab, 0x0f},
+ {0x37c2, 0x14},
+ {0x37cb, 0x00},
+ {0x37cc, 0x00},
+ {0x37cd, 0x00},
+ {0x37ce, 0x00},
+ {0x37d8, 0x02},
+ {0x37d9, 0x04},
+ {0x37dc, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0xf4},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x06},
+ {0x3807, 0xb3},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x02},
+ {0x380b, 0xd0},
+ {0x380c, 0x02},
+ {0x380d, 0xee},
+ {0x380e, 0x07},
+ {0x380f, 0xe4},
+ {0x3811, 0x10},
+ {0x3813, 0x09},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x03},
+ {0x3817, 0x01},
+ {0x381e, 0x02},
+ {0x3820, 0x8b},
+ {0x3821, 0x01},
+ {0x3832, 0x04},
+ {0x3c80, 0x01},
+ {0x3c82, 0x00},
+ {0x3c83, 0xc8},
+ {0x3c8c, 0x0f},
+ {0x3c8d, 0xa0},
+ {0x3c90, 0x07},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0xd0},
+ {0x3c95, 0x50},
+ {0x3c96, 0x35},
+ {0x3c97, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x00},
+ {0x4009, 0x07},
+ {0x400f, 0x80},
+ {0x4013, 0x02},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x404c, 0x50},
+ {0x404e, 0x20},
+ {0x4500, 0x06},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x4819, 0x70},
+ {0x4825, 0x32},
+ {0x4826, 0x32},
+ {0x482a, 0x06},
+ {0x4833, 0x08},
+ {0x4837, 0x0d},
+ {0x5000, 0x77},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x5b05, 0x6c},
+ {0x5e10, 0xfc},
+ {0x3500, 0x00},
+ {0x3501, 0x1F},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x3832, 0x48},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x4003, 0x40},
+ {0x3107, 0x01},
+ {0x3c80, 0x08},
+ {0x3c83, 0xb1},
+ {0x3c8c, 0x10},
+ {0x3c8d, 0x00},
+ {0x3c90, 0x00},
+ {0x3c94, 0x00},
+ {0x3c95, 0x00},
+ {0x3c96, 0x00},
+ {0x37cb, 0x09},
+ {0x37cc, 0x15},
+ {0x37cd, 0x1f},
+ {0x37ce, 0x1f},
+};
+
+static const char * const ov5675_test_pattern_menu[] = {
+ "Disabled",
+ "Standard Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Bottom-Top Darker Color Bar"
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV5675_LINK_FREQ_450MHZ,
+};
+
+static const struct ov5675_link_freq_config link_freq_configs[] = {
+ [OV5675_LINK_FREQ_900MBPS] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_900mbps),
+ .regs = mipi_data_rate_900mbps,
+ }
+ }
+};
+
+static const struct ov5675_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .hts = 1500,
+ .vts_def = OV5675_VTS_30FPS,
+ .vts_min = OV5675_VTS_30FPS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = OV5675_LINK_FREQ_900MBPS,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .hts = 1500,
+ .vts_def = OV5675_VTS_30FPS,
+ .vts_min = OV5675_VTS_30FPS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = OV5675_LINK_FREQ_900MBPS,
+ }
+};
+
+struct ov5675 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov5675_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV5675_DATA_LANES;
+
+ do_div(pixel_rate, OV5675_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OV5675_SCLK);
+
+ return ppl;
+}
+
+static int ov5675_read_reg(struct ov5675 *ov5675, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov5675_write_reg(struct ov5675 *ov5675, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int ov5675_write_reg_list(struct ov5675 *ov5675,
+ const struct ov5675_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov5675_write_reg(ov5675, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov5675_update_digital_gain(struct ov5675 *ov5675, u32 d_gain)
+{
+ int ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_MWB_R_GAIN,
+ OV5675_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_MWB_G_GAIN,
+ OV5675_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return ov5675_write_reg(ov5675, OV5675_REG_MWB_B_GAIN,
+ OV5675_REG_VALUE_16BIT, d_gain);
+}
+
+static int ov5675_test_pattern(struct ov5675 *ov5675, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OV5675_TEST_PATTERN_BAR_SHIFT |
+ OV5675_TEST_PATTERN_ENABLE;
+
+ return ov5675_write_reg(ov5675, OV5675_REG_TEST_PATTERN,
+ OV5675_REG_VALUE_08BIT, pattern);
+}
+
+static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5675 *ov5675 = container_of(ctrl->handler,
+ struct ov5675, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = (ov5675->cur_mode->height + ctrl->val -
+ OV5675_EXPOSURE_MAX_MARGIN) / 2;
+ __v4l2_ctrl_modify_range(ov5675->exposure,
+ ov5675->exposure->minimum,
+ exposure_max, ov5675->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov5675_write_reg(ov5675, OV5675_REG_ANALOG_GAIN,
+ OV5675_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov5675_update_digital_gain(ov5675, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ /* 3 least significant bits of expsoure are fractional part */
+ ret = ov5675_write_reg(ov5675, OV5675_REG_EXPOSURE,
+ OV5675_REG_VALUE_24BIT, ctrl->val << 3);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov5675_write_reg(ov5675, OV5675_REG_VTS,
+ OV5675_REG_VALUE_16BIT,
+ ov5675->cur_mode->height + ctrl->val +
+ 10);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov5675_test_pattern(ov5675, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov5675_ctrl_ops = {
+ .s_ctrl = ov5675_set_ctrl,
+};
+
+static int ov5675_init_controls(struct ov5675 *ov5675)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &ov5675->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov5675->mutex;
+ ov5675->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (ov5675->link_freq)
+ ov5675->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov5675->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(OV5675_LINK_FREQ_900MBPS),
+ 1,
+ to_pixel_rate(OV5675_LINK_FREQ_900MBPS));
+ ov5675->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_VBLANK,
+ ov5675->cur_mode->vts_min - ov5675->cur_mode->height,
+ OV5675_VTS_MAX - ov5675->cur_mode->height, 1,
+ ov5675->cur_mode->vts_def - ov5675->cur_mode->height);
+ h_blank = to_pixels_per_line(ov5675->cur_mode->hts,
+ ov5675->cur_mode->link_freq_index) - ov5675->cur_mode->width;
+ ov5675->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (ov5675->hblank)
+ ov5675->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV5675_ANAL_GAIN_MIN, OV5675_ANAL_GAIN_MAX,
+ OV5675_ANAL_GAIN_STEP, OV5675_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV5675_DGTL_GAIN_MIN, OV5675_DGTL_GAIN_MAX,
+ OV5675_DGTL_GAIN_STEP, OV5675_DGTL_GAIN_DEFAULT);
+ exposure_max = (ov5675->cur_mode->vts_def -
+ OV5675_EXPOSURE_MAX_MARGIN) / 2;
+ ov5675->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV5675_EXPOSURE_MIN, exposure_max,
+ OV5675_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov5675_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov5675_test_pattern_menu) - 1,
+ 0, 0, ov5675_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov5675->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov5675_update_pad_format(const struct ov5675_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov5675_start_streaming(struct ov5675 *ov5675)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ const struct ov5675_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = ov5675->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov5675_write_reg_list(ov5675, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov5675->cur_mode->reg_list;
+ ret = ov5675_write_reg_list(ov5675, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov5675->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
+ OV5675_REG_VALUE_08BIT, OV5675_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ov5675_stop_streaming(struct ov5675 *ov5675)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+
+ if (ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
+ OV5675_REG_VALUE_08BIT, OV5675_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5675 *ov5675 = to_ov5675(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov5675->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov5675->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&ov5675->mutex);
+ return ret;
+ }
+
+ ret = ov5675_start_streaming(ov5675);
+ if (ret) {
+ enable = 0;
+ ov5675_stop_streaming(ov5675);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov5675_stop_streaming(ov5675);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov5675->streaming = enable;
+ mutex_unlock(&ov5675->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov5675_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5675 *ov5675 = to_ov5675(sd);
+
+ mutex_lock(&ov5675->mutex);
+ if (ov5675->streaming)
+ ov5675_stop_streaming(ov5675);
+
+ mutex_unlock(&ov5675->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov5675_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5675 *ov5675 = to_ov5675(sd);
+ int ret;
+
+ mutex_lock(&ov5675->mutex);
+ if (ov5675->streaming) {
+ ret = ov5675_start_streaming(ov5675);
+ if (ret) {
+ ov5675->streaming = false;
+ ov5675_stop_streaming(ov5675);
+ mutex_unlock(&ov5675->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&ov5675->mutex);
+
+ return 0;
+}
+
+static int ov5675_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov5675 *ov5675 = to_ov5675(sd);
+ const struct ov5675_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov5675->mutex);
+ ov5675_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ ov5675->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov5675->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov5675->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov5675->vblank,
+ mode->vts_min - mode->height,
+ OV5675_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov5675->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(ov5675->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&ov5675->mutex);
+
+ return 0;
+}
+
+static int ov5675_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov5675 *ov5675 = to_ov5675(sd);
+
+ mutex_lock(&ov5675->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd, cfg,
+ fmt->pad);
+ else
+ ov5675_update_pad_format(ov5675->cur_mode, &fmt->format);
+
+ mutex_unlock(&ov5675->mutex);
+
+ return 0;
+}
+
+static int ov5675_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov5675_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov5675_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov5675 *ov5675 = to_ov5675(sd);
+
+ mutex_lock(&ov5675->mutex);
+ ov5675_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&ov5675->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov5675_video_ops = {
+ .s_stream = ov5675_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov5675_pad_ops = {
+ .set_fmt = ov5675_set_format,
+ .get_fmt = ov5675_get_format,
+ .enum_mbus_code = ov5675_enum_mbus_code,
+ .enum_frame_size = ov5675_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov5675_subdev_ops = {
+ .video = &ov5675_video_ops,
+ .pad = &ov5675_pad_ops,
+};
+
+static const struct media_entity_operations ov5675_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov5675_internal_ops = {
+ .open = ov5675_open,
+};
+
+static int ov5675_identify_module(struct ov5675 *ov5675)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
+ int ret;
+ u32 val;
+
+ ret = ov5675_read_reg(ov5675, OV5675_REG_CHIP_ID,
+ OV5675_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV5675_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV5675_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov5675_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != OV5675_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV5675_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov5675_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5675 *ov5675 = to_ov5675(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov5675->mutex);
+
+ return 0;
+}
+
+static int ov5675_probe(struct i2c_client *client)
+{
+ struct ov5675 *ov5675;
+ int ret;
+
+ ret = ov5675_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ ov5675 = devm_kzalloc(&client->dev, sizeof(*ov5675), GFP_KERNEL);
+ if (!ov5675)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
+ ret = ov5675_identify_module(ov5675);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&ov5675->mutex);
+ ov5675->cur_mode = &supported_modes[0];
+ ret = ov5675_init_controls(ov5675);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov5675->sd.internal_ops = &ov5675_internal_ops;
+ ov5675->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5675->sd.entity.ops = &ov5675_subdev_entity_ops;
+ ov5675->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov5675->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov5675->sd.entity, 1, &ov5675->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&ov5675->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov5675->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov5675->sd.ctrl_handler);
+ mutex_destroy(&ov5675->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ov5675_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov5675_suspend, ov5675_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov5675_acpi_ids[] = {
+ {"OVTI5675"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov5675_acpi_ids);
+#endif
+
+static struct i2c_driver ov5675_i2c_driver = {
+ .driver = {
+ .name = "ov5675",
+ .pm = &ov5675_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov5675_acpi_ids),
+ },
+ .probe_new = ov5675_probe,
+ .remove = ov5675_remove,
+};
+
+module_i2c_driver(ov5675_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV5675 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index e65a94353175..34b7046d9702 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -823,9 +823,6 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
*v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
-#else
- mutex_unlock(&ov5695->mutex);
- return -ENOTTY;
#endif
} else {
ov5695->cur_mode = mode;
@@ -856,7 +853,7 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
#else
mutex_unlock(&ov5695->mutex);
- return -ENOTTY;
+ return -EINVAL;
#endif
} else {
fmt->format.width = mode->width;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 53385c277792..b42b289faaef 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1110,10 +1110,8 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
*mbus_fmt = format->format;
- return 0;
-#else
- return -ENOTTY;
#endif
+ return 0;
}
ret = ov7670_try_fmt_internal(sd, &format->format, &info->fmt, &info->wsize);
@@ -1146,7 +1144,7 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
format->format = *mbus_fmt;
return 0;
#else
- return -ENOTTY;
+ return -EINVAL;
#endif
} else {
format->format = info->format;
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 2e9a758736a1..2cc6a678069a 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1352,8 +1352,7 @@ static const struct v4l2_subdev_ops ov772x_subdev_ops = {
* i2c_driver function
*/
-static int ov772x_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
+static int ov772x_probe(struct i2c_client *client)
{
struct ov772x_priv *priv;
int ret;
@@ -1486,7 +1485,7 @@ static struct i2c_driver ov772x_i2c_driver = {
.name = "ov772x",
.of_match_table = ov772x_of_match,
},
- .probe = ov772x_probe,
+ .probe_new = ov772x_probe,
.remove = ov772x_remove,
.id_table = ov772x_id,
};
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 70bb870b1d08..732655fe4ba3 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -827,13 +827,9 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
*mbus_fmt = format->format;
-
+#endif
mutex_unlock(&ov7740->mutex);
return 0;
-#else
- ret = -ENOTTY;
- goto error;
-#endif
}
ret = ov7740_try_fmt_internal(sd, &format->format, &ovfmt, &fsize);
@@ -868,7 +864,7 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
format->format = *mbus_fmt;
ret = 0;
#else
- ret = -ENOTTY;
+ ret = -EINVAL;
#endif
} else {
format->format = ov7740->format;
@@ -1066,8 +1062,7 @@ static const struct regmap_config ov7740_regmap_config = {
.max_register = OV7740_MAX_REGISTER,
};
-static int ov7740_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov7740_probe(struct i2c_client *client)
{
struct ov7740 *ov7740;
struct v4l2_subdev *sd;
@@ -1229,7 +1224,7 @@ static struct i2c_driver ov7740_i2c_driver = {
.pm = &ov7740_pm_ops,
.of_match_table = of_match_ptr(ov7740_of_match),
},
- .probe = ov7740_probe,
+ .probe_new = ov7740_probe,
.remove = ov7740_remove,
.id_table = ov7740_id,
};
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index cd347d6b7b9d..8655842af275 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1106,7 +1106,10 @@ static int ov8856_check_hwcfg(struct device *dev)
if (!fwnode)
return -ENXIO;
- fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret)
+ return ret;
+
if (mclk != OV8856_MCLK) {
dev_err(dev, "external clock %d is not supported", mclk);
return -EINVAL;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 30ab2225fbd0..4fe68aa55789 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -703,6 +703,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
for (m = 6; m >= 0; m--)
if (gain >= (1 << m) * 16)
break;
+
+ /* Sanity check: don't adjust the gain with a negative value */
+ if (m < 0)
+ return -EINVAL;
+
rgain = (gain - ((1 << m) * 16)) / (1 << m);
rgain |= (((1 << m) - 1) << 4);
@@ -1485,8 +1490,7 @@ out:
return ret;
}
-static int ov965x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov965x_probe(struct i2c_client *client)
{
const struct ov9650_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
@@ -1613,7 +1617,7 @@ static struct i2c_driver ov965x_i2c_driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ov965x_of_match),
},
- .probe = ov965x_probe,
+ .probe_new = ov965x_probe,
.remove = ov965x_remove,
.id_table = ov965x_id,
};
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 7633aebd8c06..5b4c4a3547c9 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1650,8 +1650,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
return 0;
}
-static int s5c73m3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s5c73m3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct v4l2_subdev *sd;
@@ -1806,7 +1805,7 @@ static struct i2c_driver s5c73m3_i2c_driver = {
.of_match_table = of_match_ptr(s5c73m3_of_match),
.name = DRIVER_NAME,
},
- .probe = s5c73m3_probe,
+ .probe_new = s5c73m3_probe,
.remove = s5c73m3_remove,
.id_table = s5c73m3_id,
};
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 8e6de06b3e72..cdfe008ba39f 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1946,8 +1946,7 @@ static int s5k5baf_configure_regulators(struct s5k5baf *state)
return ret;
}
-static int s5k5baf_probe(struct i2c_client *c,
- const struct i2c_device_id *id)
+static int s5k5baf_probe(struct i2c_client *c)
{
struct s5k5baf *state;
int ret;
@@ -2046,7 +2045,7 @@ static struct i2c_driver s5k5baf_i2c_driver = {
.of_match_table = s5k5baf_of_match,
.name = S5K5BAF_DRIVER_NAME
},
- .probe = s5k5baf_probe,
+ .probe_new = s5k5baf_probe,
.remove = s5k5baf_remove,
.id_table = s5k5baf_id,
};
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 3b7721f81be2..bc6cc5a558db 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -275,8 +275,7 @@ static const struct v4l2_subdev_ops s5k6a3_subdev_ops = {
.pad = &s5k6a3_pad_ops,
};
-static int s5k6a3_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int s5k6a3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct s5k6a3 *sensor;
@@ -378,7 +377,7 @@ static struct i2c_driver s5k6a3_driver = {
.of_match_table = of_match_ptr(s5k6a3_of_match),
.name = S5K6A3_DRV_NAME,
},
- .probe = s5k6a3_probe,
+ .probe_new = s5k6a3_probe,
.remove = s5k6a3_remove,
.id_table = s5k6a3_ids,
};
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 2d78e846d822..9adf8e034e7d 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2847,8 +2847,7 @@ out_err:
return NULL;
}
-static int smiapp_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int smiapp_probe(struct i2c_client *client)
{
struct smiapp_sensor *sensor;
struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
@@ -3172,7 +3171,7 @@ static struct i2c_driver smiapp_i2c_driver = {
.name = SMIAPP_NAME,
.pm = &smiapp_pm_ops,
},
- .probe = smiapp_probe,
+ .probe_new = smiapp_probe,
.remove = smiapp_remove,
.id_table = smiapp_id_table,
};
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index bc2e35e5ce61..dbbab75f135e 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2026,8 +2026,7 @@ static inline int tc358743_probe_of(struct tc358743_state *state)
}
#endif
-static int tc358743_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tc358743_probe(struct i2c_client *client)
{
static struct v4l2_dv_timings default_timing =
V4L2_DV_BT_CEA_640X480P59_94;
@@ -2222,7 +2221,7 @@ static struct i2c_driver tc358743_driver = {
.name = "tc358743",
.of_match_table = of_match_ptr(tc358743_of_match),
},
- .probe = tc358743_probe,
+ .probe_new = tc358743_probe,
.remove = tc358743_remove,
.id_table = tc358743_id,
};
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index a62ede096636..5e68182001ec 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2691,7 +2691,13 @@ static int tda1997x_probe(struct i2c_client *client,
}
ret = 0x34 + ((io_read(sd, REG_SLAVE_ADDR)>>4) & 0x03);
- state->client_cec = i2c_new_dummy(client->adapter, ret);
+ state->client_cec = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter, ret);
+ if (IS_ERR(state->client_cec)) {
+ ret = PTR_ERR(state->client_cec);
+ goto err_free_mutex;
+ }
+
v4l_info(client, "CEC slave address 0x%02x\n", ret);
ret = tda1997x_core_init(sd);
@@ -2798,7 +2804,6 @@ static int tda1997x_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&state->hdl);
regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
- i2c_unregister_device(state->client_cec);
cancel_delayed_work(&state->delayed_work_enable_hpd);
mutex_destroy(&state->page_lock);
mutex_destroy(&state->lock);
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index f5ee28058ea2..c52fe84cba1b 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -436,8 +436,7 @@ static const struct v4l2_subdev_ops ths8200_ops = {
.pad = &ths8200_pad_ops,
};
-static int ths8200_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ths8200_probe(struct i2c_client *client)
{
struct ths8200_state *state;
struct v4l2_subdev *sd;
@@ -502,7 +501,7 @@ static struct i2c_driver ths8200_driver = {
.name = "ths8200",
.of_match_table = of_match_ptr(ths8200_of_match),
},
- .probe = ths8200_probe,
+ .probe_new = ths8200_probe,
.remove = ths8200_remove,
.id_table = ths8200_id,
};
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index eaddd977ba40..edad49cebcdf 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1636,11 +1636,13 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
dev_err(decoder->sd.dev,
"missing type property in node %pOFn\n",
child);
+ of_node_put(child);
goto err_connector;
}
if (input_type >= TVP5150_INPUT_NUM) {
ret = -EINVAL;
+ of_node_put(child);
goto err_connector;
}
@@ -1651,6 +1653,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
dev_err(decoder->sd.dev,
"input %s with same type already exists\n",
input->name);
+ of_node_put(child);
ret = -EINVAL;
goto err_connector;
}
@@ -1672,6 +1675,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
dev_err(decoder->sd.dev,
"missing label property in node %pOFn\n",
child);
+ of_node_put(child);
goto err_connector;
}
@@ -1691,8 +1695,7 @@ static const char * const tvp5150_test_patterns[2] = {
"Black screen"
};
-static int tvp5150_probe(struct i2c_client *c,
- const struct i2c_device_id *id)
+static int tvp5150_probe(struct i2c_client *c)
{
struct tvp5150 *core;
struct v4l2_subdev *sd;
@@ -1841,7 +1844,7 @@ static struct i2c_driver tvp5150_driver = {
.of_match_table = of_match_ptr(tvp5150_of_match),
.name = "tvp5150",
},
- .probe = tvp5150_probe,
+ .probe_new = tvp5150_probe,
.remove = tvp5150_remove,
.id_table = tvp5150_id,
};
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 1b8175cab017..de313b1306da 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -930,7 +930,7 @@ done:
* Returns zero when successful, -EINVAL if register read fails or
* -EIO if i2c access is not available.
*/
-static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
+static int tvp7002_probe(struct i2c_client *c)
{
struct tvp7002_config *pdata = tvp7002_get_pdata(c);
struct v4l2_subdev *sd;
@@ -1075,7 +1075,7 @@ static struct i2c_driver tvp7002_driver = {
.of_match_table = of_match_ptr(tvp7002_of_match),
.name = TVP7002_MODULE_NAME,
},
- .probe = tvp7002_probe,
+ .probe_new = tvp7002_probe,
.remove = tvp7002_remove,
.id_table = tvp7002_id,
};
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 612d1c0010c1..a359da7773a9 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -503,77 +503,65 @@ static const unsigned int BTTV_TVNORMS = ARRAY_SIZE(bttv_tvnorms);
packed pixel formats must come first */
static const struct bttv_format formats[] = {
{
- .name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
.btformat = BT848_COLOR_FMT_Y8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "8 bpp, dithered color",
.fourcc = V4L2_PIX_FMT_HI240,
.btformat = BT848_COLOR_FMT_RGB8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED | FORMAT_FLAGS_DITHER,
},{
- .name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.btformat = BT848_COLOR_FMT_RGB15,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.btformat = BT848_COLOR_FMT_RGB15,
.btswap = 0x03, /* byteswap */
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.btformat = BT848_COLOR_FMT_RGB16,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.btformat = BT848_COLOR_FMT_RGB16,
.btswap = 0x03, /* byteswap */
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.btformat = BT848_COLOR_FMT_RGB24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.btformat = BT848_COLOR_FMT_RGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.btformat = BT848_COLOR_FMT_RGB32,
.btswap = 0x0f, /* byte+word swap */
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.btformat = BT848_COLOR_FMT_YUY2,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.btformat = BT848_COLOR_FMT_YUY2,
.btswap = 0x03, /* byteswap */
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "4:2:2, planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.btformat = BT848_COLOR_FMT_YCrCb422,
.depth = 16,
@@ -581,7 +569,6 @@ static const struct bttv_format formats[] = {
.hshift = 1,
.vshift = 0,
},{
- .name = "4:2:0, planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV420,
.btformat = BT848_COLOR_FMT_YCrCb422,
.depth = 12,
@@ -589,7 +576,6 @@ static const struct bttv_format formats[] = {
.hshift = 1,
.vshift = 1,
},{
- .name = "4:2:0, planar, Y-Cr-Cb",
.fourcc = V4L2_PIX_FMT_YVU420,
.btformat = BT848_COLOR_FMT_YCrCb422,
.depth = 12,
@@ -597,7 +583,6 @@ static const struct bttv_format formats[] = {
.hshift = 1,
.vshift = 1,
},{
- .name = "4:1:1, planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV411P,
.btformat = BT848_COLOR_FMT_YCrCb411,
.depth = 12,
@@ -605,7 +590,6 @@ static const struct bttv_format formats[] = {
.hshift = 2,
.vshift = 0,
},{
- .name = "4:1:0, planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV410,
.btformat = BT848_COLOR_FMT_YCrCb411,
.depth = 9,
@@ -613,7 +597,6 @@ static const struct bttv_format formats[] = {
.hshift = 2,
.vshift = 2,
},{
- .name = "4:1:0, planar, Y-Cr-Cb",
.fourcc = V4L2_PIX_FMT_YVU410,
.btformat = BT848_COLOR_FMT_YCrCb411,
.depth = 9,
@@ -621,7 +604,6 @@ static const struct bttv_format formats[] = {
.hshift = 2,
.vshift = 2,
},{
- .name = "raw scanlines",
.fourcc = -1,
.btformat = BT848_COLOR_FMT_RAW,
.depth = 8,
@@ -2500,7 +2482,6 @@ static int bttv_enum_fmt_cap_ovr(struct v4l2_fmtdesc *f)
return -EINVAL;
f->pixelformat = formats[i].fourcc;
- strscpy(f->description, formats[i].name, sizeof(f->description));
return i;
}
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 9adfac4d5187..492bc85c2700 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -84,7 +84,7 @@ static void ir_enltv_handle_key(struct bttv *btv)
data = ir_extract_bits(gpio, ir->mask_keycode);
/* Check if it is keyup */
- keyup = (gpio & ir->mask_keyup) ? 1 << 31 : 0;
+ keyup = (gpio & ir->mask_keyup) ? 1UL << 31 : 0;
if ((ir->last_gpio & 0x7f) != data) {
dprintk("gpio=0x%x code=%d | %s\n",
@@ -95,7 +95,7 @@ static void ir_enltv_handle_key(struct bttv *btv)
if (keyup)
rc_keyup(ir->dev);
} else {
- if ((ir->last_gpio & 1 << 31) == keyup)
+ if ((ir->last_gpio & 1UL << 31) == keyup)
return;
dprintk("(cnt) gpio=0x%x code=%d | %s\n",
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 6b59ca337c7f..fc8708047be8 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -699,9 +699,9 @@ bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf)
const struct bttv_tvnorm *tvnorm = bttv_tvnorms + buf->tvnorm;
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
- dprintk("%d: buffer field: %s format: %s size: %dx%d\n",
+ dprintk("%d: buffer field: %s format: 0x%08x size: %dx%d\n",
btv->c.nr, v4l2_field_names[buf->vb.field],
- buf->fmt->name, buf->vb.width, buf->vb.height);
+ buf->fmt->fourcc, buf->vb.width, buf->vb.height);
/* packed pixel modes */
if (buf->fmt->flags & FORMAT_FLAGS_PACKED) {
@@ -860,9 +860,9 @@ bttv_overlay_risc(struct bttv *btv,
struct bttv_buffer *buf)
{
/* check interleave, bottom+top fields */
- dprintk("%d: overlay fields: %s format: %s size: %dx%d\n",
+ dprintk("%d: overlay fields: %s format: 0x%08x size: %dx%d\n",
btv->c.nr, v4l2_field_names[buf->vb.field],
- fmt->name, ov->w.width, ov->w.height);
+ fmt->fourcc, ov->w.width, ov->w.height);
/* calculate geometry */
bttv_calc_geo(btv,&buf->geo,ov->w.width,ov->w.height,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index b159d6ddbfcf..4abf43657846 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -99,7 +99,6 @@ struct bttv_tvnorm {
extern const struct bttv_tvnorm bttv_tvnorms[];
struct bttv_format {
- char *name;
int fourcc; /* video4linux 2 */
int btformat; /* BT848_COLOR_FMT_* */
int btswap; /* BT848_COLOR_CTL_* */
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 64df9d491941..02ebd43e672e 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -393,7 +393,7 @@ static struct mt352_config advbt771_samsung_tdtc9251dh0_config = {
.demod_init = advbt771_samsung_tdtc9251dh0_demod_init,
};
-static struct dst_config dst_config = {
+static const struct dst_config dst_config = {
.demod_address = 0x55,
};
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 4885e833c052..0695078ef812 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -186,20 +186,16 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
{
struct pci_dev *pci_dev = cobalt->pci_dev;
struct pci_dev *pci_bus_dev = cobalt->pci_dev->bus->self;
- int offset;
- int bus_offset;
u32 capa;
u16 stat, ctrl;
- offset = pci_find_capability(pci_dev, PCI_CAP_ID_EXP);
- bus_offset = pci_find_capability(pci_bus_dev, PCI_CAP_ID_EXP);
- if (!offset || !bus_offset)
+ if (!pci_is_pcie(pci_dev) || !pci_is_pcie(pci_bus_dev))
return;
/* Device */
- pci_read_config_dword(pci_dev, offset + PCI_EXP_DEVCAP, &capa);
- pci_read_config_word(pci_dev, offset + PCI_EXP_DEVCTL, &ctrl);
- pci_read_config_word(pci_dev, offset + PCI_EXP_DEVSTA, &stat);
+ pcie_capability_read_dword(pci_dev, PCI_EXP_DEVCAP, &capa);
+ pcie_capability_read_word(pci_dev, PCI_EXP_DEVCTL, &ctrl);
+ pcie_capability_read_word(pci_dev, PCI_EXP_DEVSTA, &stat);
cobalt_info("PCIe device capability 0x%08x: Max payload %d\n",
capa, get_payload_size(capa & PCI_EXP_DEVCAP_PAYLOAD));
cobalt_info("PCIe device control 0x%04x: Max payload %d. Max read request %d\n",
@@ -209,9 +205,9 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
cobalt_info("PCIe device status 0x%04x\n", stat);
/* Link */
- pci_read_config_dword(pci_dev, offset + PCI_EXP_LNKCAP, &capa);
- pci_read_config_word(pci_dev, offset + PCI_EXP_LNKCTL, &ctrl);
- pci_read_config_word(pci_dev, offset + PCI_EXP_LNKSTA, &stat);
+ pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &capa);
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKCTL, &ctrl);
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
(capa & PCI_EXP_LNKCAP_MLW) >> 4);
@@ -221,15 +217,15 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
(stat & PCI_EXP_LNKSTA_NLW) >> 4);
/* Bus */
- pci_read_config_dword(pci_bus_dev, bus_offset + PCI_EXP_LNKCAP, &capa);
+ pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
capa, get_link_speed(capa),
(capa & PCI_EXP_LNKCAP_MLW) >> 4);
/* Slot */
- pci_read_config_dword(pci_dev, offset + PCI_EXP_SLTCAP, &capa);
- pci_read_config_word(pci_dev, offset + PCI_EXP_SLTCTL, &ctrl);
- pci_read_config_word(pci_dev, offset + PCI_EXP_SLTSTA, &stat);
+ pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+ pcie_capability_read_word(pci_dev, PCI_EXP_SLTCTL, &ctrl);
+ pcie_capability_read_word(pci_dev, PCI_EXP_SLTSTA, &stat);
cobalt_info("PCIe slot capability 0x%08x\n", capa);
cobalt_info("PCIe slot control 0x%04x\n", ctrl);
cobalt_info("PCIe slot status 0x%04x\n", stat);
@@ -238,26 +234,22 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
{
struct pci_dev *pci_dev = cobalt->pci_dev;
- unsigned offset;
u16 link;
- offset = pci_find_capability(pci_dev, PCI_CAP_ID_EXP);
- if (!offset)
+ if (!pci_is_pcie(pci_dev))
return 0;
- pci_read_config_word(pci_dev, offset + PCI_EXP_LNKSTA, &link);
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
return (link & PCI_EXP_LNKSTA_NLW) >> 4;
}
static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
{
struct pci_dev *pci_dev = cobalt->pci_dev->bus->self;
- unsigned offset;
u32 link;
- offset = pci_find_capability(pci_dev, PCI_CAP_ID_EXP);
- if (!offset)
+ if (!pci_is_pcie(pci_dev))
return 0;
- pci_read_config_dword(pci_dev, offset + PCI_EXP_LNKCAP, &link);
+ pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
return (link & PCI_EXP_LNKCAP_MLW) >> 4;
}
@@ -592,7 +584,7 @@ static int cobalt_subdevs_hsma_init(struct cobalt *cobalt)
.cec_clk = 12000000,
};
static struct i2c_board_info adv7511_info = {
- .type = "adv7511",
+ .type = "adv7511-v4l2",
.addr = 0x39, /* 0x39 or 0x3d */
.platform_data = &adv7511_pdata,
};
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index 429bee4ef79c..bca68572b324 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -11,6 +11,7 @@
#ifndef COBALT_DRIVER_H
#define COBALT_DRIVER_H
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -61,37 +62,37 @@
#define COBALT_CLK 50000000
/* System status register */
-#define COBALT_SYSSTAT_DIP0_MSK (1 << 0)
-#define COBALT_SYSSTAT_DIP1_MSK (1 << 1)
-#define COBALT_SYSSTAT_HSMA_PRSNTN_MSK (1 << 2)
-#define COBALT_SYSSTAT_FLASH_RDYBSYN_MSK (1 << 3)
-#define COBALT_SYSSTAT_VI0_5V_MSK (1 << 4)
-#define COBALT_SYSSTAT_VI0_INT1_MSK (1 << 5)
-#define COBALT_SYSSTAT_VI0_INT2_MSK (1 << 6)
-#define COBALT_SYSSTAT_VI0_LOST_DATA_MSK (1 << 7)
-#define COBALT_SYSSTAT_VI1_5V_MSK (1 << 8)
-#define COBALT_SYSSTAT_VI1_INT1_MSK (1 << 9)
-#define COBALT_SYSSTAT_VI1_INT2_MSK (1 << 10)
-#define COBALT_SYSSTAT_VI1_LOST_DATA_MSK (1 << 11)
-#define COBALT_SYSSTAT_VI2_5V_MSK (1 << 12)
-#define COBALT_SYSSTAT_VI2_INT1_MSK (1 << 13)
-#define COBALT_SYSSTAT_VI2_INT2_MSK (1 << 14)
-#define COBALT_SYSSTAT_VI2_LOST_DATA_MSK (1 << 15)
-#define COBALT_SYSSTAT_VI3_5V_MSK (1 << 16)
-#define COBALT_SYSSTAT_VI3_INT1_MSK (1 << 17)
-#define COBALT_SYSSTAT_VI3_INT2_MSK (1 << 18)
-#define COBALT_SYSSTAT_VI3_LOST_DATA_MSK (1 << 19)
-#define COBALT_SYSSTAT_VIHSMA_5V_MSK (1 << 20)
-#define COBALT_SYSSTAT_VIHSMA_INT1_MSK (1 << 21)
-#define COBALT_SYSSTAT_VIHSMA_INT2_MSK (1 << 22)
-#define COBALT_SYSSTAT_VIHSMA_LOST_DATA_MSK (1 << 23)
-#define COBALT_SYSSTAT_VOHSMA_INT1_MSK (1 << 24)
-#define COBALT_SYSSTAT_VOHSMA_PLL_LOCKED_MSK (1 << 25)
-#define COBALT_SYSSTAT_VOHSMA_LOST_DATA_MSK (1 << 26)
-#define COBALT_SYSSTAT_AUD_PLL_LOCKED_MSK (1 << 28)
-#define COBALT_SYSSTAT_AUD_IN_LOST_DATA_MSK (1 << 29)
-#define COBALT_SYSSTAT_AUD_OUT_LOST_DATA_MSK (1 << 30)
-#define COBALT_SYSSTAT_PCIE_SMBCLK_MSK (1 << 31)
+#define COBALT_SYSSTAT_DIP0_MSK BIT(0)
+#define COBALT_SYSSTAT_DIP1_MSK BIT(1)
+#define COBALT_SYSSTAT_HSMA_PRSNTN_MSK BIT(2)
+#define COBALT_SYSSTAT_FLASH_RDYBSYN_MSK BIT(3)
+#define COBALT_SYSSTAT_VI0_5V_MSK BIT(4)
+#define COBALT_SYSSTAT_VI0_INT1_MSK BIT(5)
+#define COBALT_SYSSTAT_VI0_INT2_MSK BIT(6)
+#define COBALT_SYSSTAT_VI0_LOST_DATA_MSK BIT(7)
+#define COBALT_SYSSTAT_VI1_5V_MSK BIT(8)
+#define COBALT_SYSSTAT_VI1_INT1_MSK BIT(9)
+#define COBALT_SYSSTAT_VI1_INT2_MSK BIT(10)
+#define COBALT_SYSSTAT_VI1_LOST_DATA_MSK BIT(11)
+#define COBALT_SYSSTAT_VI2_5V_MSK BIT(12)
+#define COBALT_SYSSTAT_VI2_INT1_MSK BIT(13)
+#define COBALT_SYSSTAT_VI2_INT2_MSK BIT(14)
+#define COBALT_SYSSTAT_VI2_LOST_DATA_MSK BIT(15)
+#define COBALT_SYSSTAT_VI3_5V_MSK BIT(16)
+#define COBALT_SYSSTAT_VI3_INT1_MSK BIT(17)
+#define COBALT_SYSSTAT_VI3_INT2_MSK BIT(18)
+#define COBALT_SYSSTAT_VI3_LOST_DATA_MSK BIT(19)
+#define COBALT_SYSSTAT_VIHSMA_5V_MSK BIT(20)
+#define COBALT_SYSSTAT_VIHSMA_INT1_MSK BIT(21)
+#define COBALT_SYSSTAT_VIHSMA_INT2_MSK BIT(22)
+#define COBALT_SYSSTAT_VIHSMA_LOST_DATA_MSK BIT(23)
+#define COBALT_SYSSTAT_VOHSMA_INT1_MSK BIT(24)
+#define COBALT_SYSSTAT_VOHSMA_PLL_LOCKED_MSK BIT(25)
+#define COBALT_SYSSTAT_VOHSMA_LOST_DATA_MSK BIT(26)
+#define COBALT_SYSSTAT_AUD_PLL_LOCKED_MSK BIT(28)
+#define COBALT_SYSSTAT_AUD_IN_LOST_DATA_MSK BIT(29)
+#define COBALT_SYSSTAT_AUD_OUT_LOST_DATA_MSK BIT(30)
+#define COBALT_SYSSTAT_PCIE_SMBCLK_MSK BIT(31)
/* Cobalt memory map */
#define COBALT_I2C_0_BASE 0x0
diff --git a/drivers/media/pci/cobalt/cobalt-flash.c b/drivers/media/pci/cobalt/cobalt-flash.c
index ef96e0f956d2..1d3c64b4cf6d 100644
--- a/drivers/media/pci/cobalt/cobalt-flash.c
+++ b/drivers/media/pci/cobalt/cobalt-flash.c
@@ -69,7 +69,7 @@ static void flash_copy_to(struct map_info *map, unsigned long to,
pr_info("%s: offset 0x%x: length %zu\n", __func__, dest, len);
while (len) {
- u16 data = 0xffff;
+ u16 data;
do {
data = *src << (8 * (dest & 1));
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 39dabd4da60f..c5207501d5e0 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -688,15 +688,12 @@ static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
{
switch (f->index) {
case 0:
- strscpy(f->description, "YUV 4:2:2", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_YUYV;
break;
case 1:
- strscpy(f->description, "RGB24", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_RGB24;
break;
case 2:
- strscpy(f->description, "RGB32", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_BGR32;
break;
default:
@@ -788,7 +785,6 @@ static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
pix->sizeimage = pix->bytesperline * pix->height;
pix->field = V4L2_FIELD_NONE;
- pix->priv = 0;
return 0;
}
@@ -893,11 +889,9 @@ static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
{
switch (f->index) {
case 0:
- strscpy(f->description, "YUV 4:2:2", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_YUYV;
break;
case 1:
- strscpy(f->description, "RGB32", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_BGR32;
break;
default:
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index d9ffc9c359ca..85f3e7307538 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -78,7 +78,7 @@ static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
return 0;
}
for (i = 0; i < 32; i++) {
- if ((1 << i) & set)
+ if (BIT(i) & set)
return 1 << i;
}
return 0;
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index 967ae2939099..162480ec68ca 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -6,7 +6,7 @@
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
-#include <stdarg.h>
+#include <linux/bitops.h>
#include "cx18-driver.h"
#include "cx18-io.h"
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 82f96a4091ac..2327fe612610 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1339,7 +1339,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index b254473db9a3..8098b15493de 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -67,7 +67,6 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
#define FORMAT_FLAGS_PACKED 0x01
static struct cx23885_fmt formats[] = {
{
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
@@ -411,9 +410,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
default:
BUG();
}
- dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+ dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index,
- dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
+ dev->width, dev->height, dev->fmt->depth, dev->fmt->fourcc,
(unsigned long)buf->risc.dma);
return 0;
}
@@ -647,8 +646,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
- strscpy(f->description, formats[f->index].name,
- sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 9da66fdd5a0d..a95a2e4c6a0d 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -127,7 +127,6 @@
V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
struct cx23885_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index de7641170478..a10261da0db6 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -35,12 +35,10 @@ MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
static const struct cx25821_fmt formats[] = {
{
- .name = "4:1:1, packed, Y41P",
.fourcc = V4L2_PIX_FMT_Y41P,
.depth = 12,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
@@ -215,9 +213,9 @@ static int cx25821_buffer_prepare(struct vb2_buffer *vb)
break;
}
- dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+ dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index, chan->width, chan->height,
- chan->fmt->depth, chan->fmt->name,
+ chan->fmt->depth, chan->fmt->fourcc,
(unsigned long)buf->risc.dma);
return ret;
@@ -311,7 +309,6 @@ static int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
- strscpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 47dbaae78509..017307984094 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -83,7 +83,6 @@
#define VID_CHANNEL_NUM 8
struct cx25821_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 200d68827073..d3da7f4297af 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -805,9 +805,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index e59a74514c7c..dcc0f02aeb70 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -69,62 +69,52 @@ MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
static const struct cx8800_fmt formats[] = {
{
- .name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
.cxformat = ColorFormatY8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.cxformat = ColorFormatRGB15,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.cxformat = ColorFormatRGB15 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.cxformat = ColorFormatRGB16,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.cxformat = ColorFormatRGB16 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.cxformat = ColorFormatRGB24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.cxformat = ColorFormatRGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.cxformat = ColorFormatRGB32 | ColorFormatBSWAP |
ColorFormatWSWAP,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.cxformat = ColorFormatYUY2,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.cxformat = ColorFormatYUY2 | ColorFormatBSWAP,
.depth = 16,
@@ -489,9 +479,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
break;
}
dprintk(2,
- "[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.vb2_buf.index,
- core->width, core->height, dev->fmt->depth, dev->fmt->name,
+ "[%p/%d] %s - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
+ buf, buf->vb.vb2_buf.index, __func__,
+ core->width, core->height, dev->fmt->depth, dev->fmt->fourcc,
(unsigned long)buf->risc.dma);
return 0;
}
@@ -829,7 +819,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
- strscpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index a70a50dc3edf..744a22328ebc 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -99,7 +99,6 @@ static inline unsigned int norm_maxh(v4l2_std_id norm)
/* static data */
struct cx8800_fmt {
- const char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index b4cdda50e742..7480f0d3ad0f 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -306,7 +306,6 @@ static int dt3155_enum_fmt_vid_cap(struct file *filp,
if (f->index)
return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_GREY;
- strscpy(f->description, "8-bit Greyscale", sizeof(f->description));
return 0;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index c1d133e17e4b..1adfdc7ab0db 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1475,57 +1475,66 @@ static const struct v4l2_async_notifier_operations cio2_async_ops = {
.complete = cio2_notifier_complete,
};
-static int cio2_fwnode_parse(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
+static int cio2_parse_firmware(struct cio2_device *cio2)
{
- struct sensor_async_subdev *s_asd =
- container_of(asd, struct sensor_async_subdev, asd);
+ unsigned int i;
+ int ret;
- if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) {
- dev_err(dev, "Only CSI2 bus type is currently supported\n");
- return -EINVAL;
- }
+ for (i = 0; i < CIO2_NUM_PORTS; i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct sensor_async_subdev *s_asd = NULL;
+ struct fwnode_handle *ep;
- s_asd->csi2.port = vep->base.port;
- s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
+ ep = fwnode_graph_get_endpoint_by_id(
+ dev_fwnode(&cio2->pci_dev->dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
- return 0;
-}
+ if (!ep)
+ continue;
-static int cio2_notifier_init(struct cio2_device *cio2)
-{
- int ret;
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
- v4l2_async_notifier_init(&cio2->notifier);
+ s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
+ if (!s_asd) {
+ ret = -ENOMEM;
+ goto err_parse;
+ }
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
- &cio2->pci_dev->dev, &cio2->notifier,
- sizeof(struct sensor_async_subdev),
- cio2_fwnode_parse);
- if (ret < 0)
- return ret;
+ s_asd->csi2.port = vep.base.port;
+ s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
- if (list_empty(&cio2->notifier.asd_list))
- return -ENODEV; /* no endpoint */
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &cio2->notifier, ep, &s_asd->asd);
+ if (ret)
+ goto err_parse;
+
+ fwnode_handle_put(ep);
+
+ continue;
+err_parse:
+ fwnode_handle_put(ep);
+ kfree(s_asd);
+ return ret;
+ }
+
+ /*
+ * Proceed even without sensors connected to allow the device to
+ * suspend.
+ */
cio2->notifier.ops = &cio2_async_ops;
ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
- if (ret) {
+ if (ret)
dev_err(&cio2->pci_dev->dev,
"failed to register async notifier : %d\n", ret);
- v4l2_async_notifier_cleanup(&cio2->notifier);
- }
return ret;
}
-static void cio2_notifier_exit(struct cio2_device *cio2)
-{
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
-}
-
/**************** Queue initialization ****************/
static const struct media_entity_operations cio2_media_ops = {
.link_validate = v4l2_subdev_link_validate,
@@ -1809,17 +1818,18 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
if (r)
goto fail_v4l2_device_unregister;
+ v4l2_async_notifier_init(&cio2->notifier);
+
/* Register notifier for subdevices we care */
- r = cio2_notifier_init(cio2);
- /* Proceed without sensors connected to allow the device to suspend. */
- if (r && r != -ENODEV)
- goto fail_cio2_queue_exit;
+ r = cio2_parse_firmware(cio2);
+ if (r)
+ goto fail_clean_notifier;
r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
IRQF_SHARED, CIO2_NAME, cio2);
if (r) {
dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
- goto fail;
+ goto fail_clean_notifier;
}
pm_runtime_put_noidle(&pci_dev->dev);
@@ -1827,9 +1837,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return 0;
-fail:
- cio2_notifier_exit(cio2);
-fail_cio2_queue_exit:
+fail_clean_notifier:
+ v4l2_async_notifier_unregister(&cio2->notifier);
+ v4l2_async_notifier_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
fail_v4l2_device_unregister:
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -1848,7 +1858,8 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
media_device_unregister(&cio2->media_dev);
- cio2_notifier_exit(cio2);
+ v4l2_async_notifier_unregister(&cio2->notifier);
+ v4l2_async_notifier_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -2000,8 +2011,7 @@ static int __maybe_unused cio2_suspend(struct device *dev)
static int __maybe_unused cio2_resume(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
+ struct cio2_device *cio2 = dev_get_drvdata(dev);
int r = 0;
struct cio2_queue *q = cio2->cur_queue;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index dd727098daf4..3f3f40ea890b 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -910,7 +910,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
/* check which i2c devices are actually found */
for (i = 0; i < 32; i++) {
- u32 device = 1 << i;
+ u32 device = BIT(i);
if (!(device & hw))
continue;
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 5595f6a274e7..137853944e46 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -73,8 +73,8 @@ static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
return 0;
}
for (i = 0; i < 32; i++) {
- if ((1 << i) & set)
- return 1 << i;
+ if (BIT(i) & set)
+ return BIT(i);
}
return 0;
}
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index 7d2f45e2b83c..b8b0703a1c82 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -10,20 +10,20 @@
#ifndef IVTV_IRQ_H
#define IVTV_IRQ_H
-#define IVTV_IRQ_ENC_START_CAP (0x1 << 31)
-#define IVTV_IRQ_ENC_EOS (0x1 << 30)
-#define IVTV_IRQ_ENC_VBI_CAP (0x1 << 29)
-#define IVTV_IRQ_ENC_VIM_RST (0x1 << 28)
-#define IVTV_IRQ_ENC_DMA_COMPLETE (0x1 << 27)
-#define IVTV_IRQ_ENC_PIO_COMPLETE (0x1 << 25)
-#define IVTV_IRQ_DEC_AUD_MODE_CHG (0x1 << 24)
-#define IVTV_IRQ_DEC_DATA_REQ (0x1 << 22)
-#define IVTV_IRQ_DEC_DMA_COMPLETE (0x1 << 20)
-#define IVTV_IRQ_DEC_VBI_RE_INSERT (0x1 << 19)
-#define IVTV_IRQ_DMA_ERR (0x1 << 18)
-#define IVTV_IRQ_DMA_WRITE (0x1 << 17)
-#define IVTV_IRQ_DMA_READ (0x1 << 16)
-#define IVTV_IRQ_DEC_VSYNC (0x1 << 10)
+#define IVTV_IRQ_ENC_START_CAP BIT(31)
+#define IVTV_IRQ_ENC_EOS BIT(30)
+#define IVTV_IRQ_ENC_VBI_CAP BIT(29)
+#define IVTV_IRQ_ENC_VIM_RST BIT(28)
+#define IVTV_IRQ_ENC_DMA_COMPLETE BIT(27)
+#define IVTV_IRQ_ENC_PIO_COMPLETE BIT(25)
+#define IVTV_IRQ_DEC_AUD_MODE_CHG BIT(24)
+#define IVTV_IRQ_DEC_DATA_REQ BIT(22)
+#define IVTV_IRQ_DEC_DMA_COMPLETE BIT(20)
+#define IVTV_IRQ_DEC_VBI_RE_INSERT BIT(19)
+#define IVTV_IRQ_DMA_ERR BIT(18)
+#define IVTV_IRQ_DMA_WRITE BIT(17)
+#define IVTV_IRQ_DMA_READ BIT(16)
+#define IVTV_IRQ_DEC_VSYNC BIT(10)
/* IRQ Masks */
#define IVTV_IRQ_MASK_INIT (IVTV_IRQ_DMA_ERR|IVTV_IRQ_ENC_DMA_COMPLETE|\
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.c b/drivers/media/pci/ivtv/ivtv-mailbox.c
index 8393675c3f46..d3fdaaa903f1 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.c
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.c
@@ -10,8 +10,6 @@
#include "ivtv-driver.h"
#include "ivtv-mailbox.h"
-#include <stdarg.h>
-
/* Firmware mailbox flags*/
#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
#define IVTV_MBOX_DRIVER_DONE 0x00000002
diff --git a/drivers/media/pci/mantis/mantis_reg.h b/drivers/media/pci/mantis/mantis_reg.h
index 67a80e42b5c7..a1e66ef6ac2f 100644
--- a/drivers/media/pci/mantis/mantis_reg.h
+++ b/drivers/media/pci/mantis/mantis_reg.h
@@ -14,44 +14,44 @@
#define MANTIS_INT_MASK 0x04
#define MANTIS_INT_RISCSTAT (0x0f << 28)
-#define MANTIS_INT_RISCEN (0x01 << 27)
-#define MANTIS_INT_I2CRACK (0x01 << 26)
+#define MANTIS_INT_RISCEN BIT(27)
+#define MANTIS_INT_I2CRACK BIT(26)
/* #define MANTIS_INT_GPIF (0xff << 12) */
-#define MANTIS_INT_PCMCIA7 (0x01 << 19)
-#define MANTIS_INT_PCMCIA6 (0x01 << 18)
-#define MANTIS_INT_PCMCIA5 (0x01 << 17)
-#define MANTIS_INT_PCMCIA4 (0x01 << 16)
-#define MANTIS_INT_PCMCIA3 (0x01 << 15)
-#define MANTIS_INT_PCMCIA2 (0x01 << 14)
-#define MANTIS_INT_PCMCIA1 (0x01 << 13)
-#define MANTIS_INT_PCMCIA0 (0x01 << 12)
-#define MANTIS_INT_IRQ1 (0x01 << 11)
-#define MANTIS_INT_IRQ0 (0x01 << 10)
-#define MANTIS_INT_OCERR (0x01 << 8)
-#define MANTIS_INT_PABORT (0x01 << 7)
-#define MANTIS_INT_RIPERR (0x01 << 6)
-#define MANTIS_INT_PPERR (0x01 << 5)
-#define MANTIS_INT_FTRGT (0x01 << 3)
-#define MANTIS_INT_RISCI (0x01 << 1)
-#define MANTIS_INT_I2CDONE (0x01 << 0)
+#define MANTIS_INT_PCMCIA7 BIT(19)
+#define MANTIS_INT_PCMCIA6 BIT(18)
+#define MANTIS_INT_PCMCIA5 BIT(17)
+#define MANTIS_INT_PCMCIA4 BIT(16)
+#define MANTIS_INT_PCMCIA3 BIT(15)
+#define MANTIS_INT_PCMCIA2 BIT(14)
+#define MANTIS_INT_PCMCIA1 BIT(13)
+#define MANTIS_INT_PCMCIA0 BIT(12)
+#define MANTIS_INT_IRQ1 BIT(11)
+#define MANTIS_INT_IRQ0 BIT(10)
+#define MANTIS_INT_OCERR BIT(8)
+#define MANTIS_INT_PABORT BIT(7)
+#define MANTIS_INT_RIPERR BIT(6)
+#define MANTIS_INT_PPERR BIT(5)
+#define MANTIS_INT_FTRGT BIT(3)
+#define MANTIS_INT_RISCI BIT(1)
+#define MANTIS_INT_I2CDONE BIT(0)
/* DMA */
#define MANTIS_DMA_CTL 0x08
#define MANTIS_GPIF_RD (0xff << 24)
#define MANTIS_GPIF_WR (0xff << 16)
-#define MANTIS_CPU_DO (0x01 << 10)
-#define MANTIS_DRV_DO (0x01 << 9)
-#define MANTIS_I2C_RD (0x01 << 7)
-#define MANTIS_I2C_WR (0x01 << 6)
-#define MANTIS_DCAP_MODE (0x01 << 5)
+#define MANTIS_CPU_DO BIT(10)
+#define MANTIS_DRV_DO BIT(9)
+#define MANTIS_I2C_RD BIT(7)
+#define MANTIS_I2C_WR BIT(6)
+#define MANTIS_DCAP_MODE BIT(5)
#define MANTIS_FIFO_TP_4 (0x00 << 3)
#define MANTIS_FIFO_TP_8 (0x01 << 3)
#define MANTIS_FIFO_TP_16 (0x02 << 3)
-#define MANTIS_FIFO_EN (0x01 << 2)
-#define MANTIS_DCAP_EN (0x01 << 1)
-#define MANTIS_RISC_EN (0x01 << 0)
+#define MANTIS_FIFO_EN BIT(2)
+#define MANTIS_DCAP_EN BIT(1)
+#define MANTIS_RISC_EN BIT(0)
/* DEBUG */
#define MANTIS_DEBUGREG 0x0c
@@ -68,8 +68,8 @@
#define MANTIS_I2C_RATE_2 (0x01 << 6)
#define MANTIS_I2C_RATE_3 (0x02 << 6)
#define MANTIS_I2C_RATE_4 (0x03 << 6)
-#define MANTIS_I2C_STOP (0x01 << 5)
-#define MANTIS_I2C_PGMODE (0x01 << 3)
+#define MANTIS_I2C_STOP BIT(5)
+#define MANTIS_I2C_PGMODE BIT(3)
/* DATA */
#define MANTIS_CMD_DATA_R1 0x20
@@ -85,77 +85,77 @@
#define MANTIS_CMD_DATA_4 (0xff << 0)
#define MANTIS_CONTROL 0x28
-#define MANTIS_DET (0x01 << 7)
-#define MANTIS_DAT_CF_EN (0x01 << 6)
+#define MANTIS_DET BIT(7)
+#define MANTIS_DAT_CF_EN BIT(6)
#define MANTIS_ACS (0x03 << 4)
-#define MANTIS_VCCEN (0x01 << 3)
-#define MANTIS_BYPASS (0x01 << 2)
-#define MANTIS_MRST (0x01 << 1)
-#define MANTIS_CRST_INT (0x01 << 0)
+#define MANTIS_VCCEN BIT(3)
+#define MANTIS_BYPASS BIT(2)
+#define MANTIS_MRST BIT(1)
+#define MANTIS_CRST_INT BIT(0)
#define MANTIS_GPIF_CFGSLA 0x84
#define MANTIS_GPIF_WAITSMPL (0x07 << 28)
-#define MANTIS_GPIF_BYTEADDRSUB (0x01 << 25)
-#define MANTIS_GPIF_WAITPOL (0x01 << 24)
+#define MANTIS_GPIF_BYTEADDRSUB BIT(25)
+#define MANTIS_GPIF_WAITPOL BIT(24)
#define MANTIS_GPIF_NCDELAY (0x07 << 20)
#define MANTIS_GPIF_RW2CSDELAY (0x07 << 16)
-#define MANTIS_GPIF_SLFTIMEDMODE (0x01 << 15)
+#define MANTIS_GPIF_SLFTIMEDMODE BIT(15)
#define MANTIS_GPIF_SLFTIMEDDELY (0x7f << 8)
#define MANTIS_GPIF_DEVTYPE (0x07 << 4)
-#define MANTIS_GPIF_BIGENDIAN (0x01 << 3)
+#define MANTIS_GPIF_BIGENDIAN BIT(3)
#define MANTIS_GPIF_FETCHCMD (0x03 << 1)
-#define MANTIS_GPIF_HWORDDEV (0x01 << 0)
+#define MANTIS_GPIF_HWORDDEV BIT(0)
#define MANTIS_GPIF_WSTOPER 0x90
-#define MANTIS_GPIF_WSTOPERWREN3 (0x01 << 31)
-#define MANTIS_GPIF_PARBOOTN (0x01 << 29)
+#define MANTIS_GPIF_WSTOPERWREN3 BIT(31)
+#define MANTIS_GPIF_PARBOOTN BIT(29)
#define MANTIS_GPIF_WSTOPERSLID3 (0x1f << 24)
-#define MANTIS_GPIF_WSTOPERWREN2 (0x01 << 23)
+#define MANTIS_GPIF_WSTOPERWREN2 BIT(23)
#define MANTIS_GPIF_WSTOPERSLID2 (0x1f << 16)
-#define MANTIS_GPIF_WSTOPERWREN1 (0x01 << 15)
+#define MANTIS_GPIF_WSTOPERWREN1 BIT(15)
#define MANTIS_GPIF_WSTOPERSLID1 (0x1f << 8)
-#define MANTIS_GPIF_WSTOPERWREN0 (0x01 << 7)
+#define MANTIS_GPIF_WSTOPERWREN0 BIT(7)
#define MANTIS_GPIF_WSTOPERSLID0 (0x1f << 0)
#define MANTIS_GPIF_CS2RW 0x94
-#define MANTIS_GPIF_CS2RWWREN3 (0x01 << 31)
+#define MANTIS_GPIF_CS2RWWREN3 BIT(31)
#define MANTIS_GPIF_CS2RWDELY3 (0x3f << 24)
-#define MANTIS_GPIF_CS2RWWREN2 (0x01 << 23)
+#define MANTIS_GPIF_CS2RWWREN2 BIT(23)
#define MANTIS_GPIF_CS2RWDELY2 (0x3f << 16)
-#define MANTIS_GPIF_CS2RWWREN1 (0x01 << 15)
+#define MANTIS_GPIF_CS2RWWREN1 BIT(15)
#define MANTIS_GPIF_CS2RWDELY1 (0x3f << 8)
-#define MANTIS_GPIF_CS2RWWREN0 (0x01 << 7)
+#define MANTIS_GPIF_CS2RWWREN0 BIT(7)
#define MANTIS_GPIF_CS2RWDELY0 (0x3f << 0)
#define MANTIS_GPIF_IRQCFG 0x98
-#define MANTIS_GPIF_IRQPOL (0x01 << 8)
-#define MANTIS_MASK_WRACK (0x01 << 7)
-#define MANTIS_MASK_BRRDY (0x01 << 6)
-#define MANTIS_MASK_OVFLW (0x01 << 5)
-#define MANTIS_MASK_OTHERR (0x01 << 4)
-#define MANTIS_MASK_WSTO (0x01 << 3)
-#define MANTIS_MASK_EXTIRQ (0x01 << 2)
-#define MANTIS_MASK_PLUGIN (0x01 << 1)
-#define MANTIS_MASK_PLUGOUT (0x01 << 0)
+#define MANTIS_GPIF_IRQPOL BIT(8)
+#define MANTIS_MASK_WRACK BIT(7)
+#define MANTIS_MASK_BRRDY BIT(6)
+#define MANTIS_MASK_OVFLW BIT(5)
+#define MANTIS_MASK_OTHERR BIT(4)
+#define MANTIS_MASK_WSTO BIT(3)
+#define MANTIS_MASK_EXTIRQ BIT(2)
+#define MANTIS_MASK_PLUGIN BIT(1)
+#define MANTIS_MASK_PLUGOUT BIT(0)
#define MANTIS_GPIF_STATUS 0x9c
-#define MANTIS_SBUF_KILLOP (0x01 << 15)
-#define MANTIS_SBUF_OPDONE (0x01 << 14)
-#define MANTIS_SBUF_EMPTY (0x01 << 13)
-#define MANTIS_GPIF_DETSTAT (0x01 << 9)
-#define MANTIS_GPIF_INTSTAT (0x01 << 8)
-#define MANTIS_GPIF_WRACK (0x01 << 7)
-#define MANTIS_GPIF_BRRDY (0x01 << 6)
-#define MANTIS_SBUF_OVFLW (0x01 << 5)
-#define MANTIS_GPIF_OTHERR (0x01 << 4)
-#define MANTIS_SBUF_WSTO (0x01 << 3)
-#define MANTIS_GPIF_EXTIRQ (0x01 << 2)
-#define MANTIS_CARD_PLUGIN (0x01 << 1)
-#define MANTIS_CARD_PLUGOUT (0x01 << 0)
+#define MANTIS_SBUF_KILLOP BIT(15)
+#define MANTIS_SBUF_OPDONE BIT(14)
+#define MANTIS_SBUF_EMPTY BIT(13)
+#define MANTIS_GPIF_DETSTAT BIT(9)
+#define MANTIS_GPIF_INTSTAT BIT(8)
+#define MANTIS_GPIF_WRACK BIT(7)
+#define MANTIS_GPIF_BRRDY BIT(6)
+#define MANTIS_SBUF_OVFLW BIT(5)
+#define MANTIS_GPIF_OTHERR BIT(4)
+#define MANTIS_SBUF_WSTO BIT(3)
+#define MANTIS_GPIF_EXTIRQ BIT(2)
+#define MANTIS_CARD_PLUGIN BIT(1)
+#define MANTIS_CARD_PLUGOUT BIT(0)
#define MANTIS_GPIF_BRADDR 0xa0
-#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
-#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_PCMCIAREG BIT(27)
+#define MANTIS_GPIF_PCMCIAIOM BIT(26)
#define MANTIS_GPIF_BR_ADDR (0xfffffff << 0)
#define MANTIS_GPIF_BRBYTES 0xa4
@@ -167,9 +167,9 @@
#define MANTIS_CARD_RESET 0xac
#define MANTIS_GPIF_ADDR 0xb0
-#define MANTIS_GPIF_HIFRDWRN (0x01 << 31)
-#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
-#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_HIFRDWRN BIT(31)
+#define MANTIS_GPIF_PCMCIAREG BIT(27)
+#define MANTIS_GPIF_PCMCIAIOM BIT(26)
#define MANTIS_GPIF_HIFADDR (0xfffffff << 0)
#define MANTIS_GPIF_DOUT 0xb4
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 8218810c899e..0e61c81356ef 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1104,12 +1104,9 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh,
if (f->index == 0) {
/* standard YUV 422 capture */
f->flags = 0;
- strscpy(f->description, "YUV422", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_YUYV;
} else {
/* compressed MJPEG capture */
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- strscpy(f->description, "MJPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MJPEG;
}
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index b75ab7d29226..af15ca1c501b 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -854,8 +854,6 @@ static int create_ring_buffer(struct pci_dev *pci_dev,
if (!Head)
return -ENOMEM;
- memset(Head, 0, MemSize);
-
PARingBufferCur = PARingBufferHead;
Cur = Head;
@@ -907,8 +905,6 @@ static int AllocateRingBuffers(struct pci_dev *pci_dev,
if (SCListMem == NULL)
return -ENOMEM;
- memset(SCListMem, 0, SCListMemSize);
-
pRingBuffer->SCListMem = SCListMem;
pRingBuffer->PASCListMem = PASCListMem;
pRingBuffer->SCListMemSize = SCListMemSize;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index e51c80bc4646..72b191cfeb54 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1217,8 +1217,7 @@ static void pt1_i2c_init(struct pt1 *pt1)
static int pt1_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pt1 *pt1 = pci_get_drvdata(pdev);
+ struct pt1 *pt1 = dev_get_drvdata(dev);
pt1_init_streams(pt1);
pt1_disable_ram(pt1);
@@ -1230,8 +1229,7 @@ static int pt1_suspend(struct device *dev)
static int pt1_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pt1 *pt1 = pci_get_drvdata(pdev);
+ struct pt1 *pt1 = dev_get_drvdata(dev);
int ret;
int i;
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 7a7afae4c84c..c0bc86793355 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -626,8 +626,7 @@ static void pt3_cleanup_adapter(struct pt3_board *pt3, int index)
static int pt3_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pt3_board *pt3 = pci_get_drvdata(pdev);
+ struct pt3_board *pt3 = dev_get_drvdata(dev);
int i;
struct pt3_adapter *adap;
@@ -646,8 +645,7 @@ static int pt3_suspend(struct device *dev)
static int pt3_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pt3_board *pt3 = pci_get_drvdata(pdev);
+ struct pt3_board *pt3 = dev_get_drvdata(dev);
int i, ret;
struct pt3_adapter *adap;
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index eb8377a95023..f359cd5c006a 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1264,6 +1264,20 @@ static int dvb_init(struct saa7134_dev *dev)
&medion_cardbus,
&dev->i2c_adap);
if (fe0->dvb.frontend) {
+ /*
+ * The TV tuner on this board is actually NOT
+ * behind the demod i2c gate.
+ * However, the demod EEPROM is indeed there and it
+ * conflicts with the SAA7134 chip config EEPROM
+ * if the i2c gate is open (since they have same
+ * bus addresses) resulting in card PCI SVID / SSID
+ * being garbage after a reboot from time to time.
+ *
+ * Let's just leave the gate permanently closed -
+ * saa7134_i2c_eeprom_md7134_gate() will close it for
+ * us at probe time if it was open for some reason.
+ */
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&dev->i2c_adap, medion_cardbus.tuner_address,
TUNER_PHILIPS_FMD1216ME_MK3);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 1a41a56afec6..cb65d345fd3e 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -91,9 +91,7 @@ static int empress_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "MPEG TS", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 493b1858815f..04e85765373e 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -342,7 +342,11 @@ static const struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+/*
+ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
+ * demod i2c gate closed due to an address clash between this EEPROM
+ * and the demod one.
+ */
static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
{
u8 subaddr = 0x7, dmdregval;
@@ -359,14 +363,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
if ((ret == 2) && (dmdregval & 0x2)) {
- pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ pr_debug("%s: DVB-T demod i2c gate was left open\n",
dev->name);
data[0] = subaddr;
data[1] = (dmdregval & ~0x2);
if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
- pr_err("%s: EEPROM i2c gate open failure\n",
- dev->name);
+ pr_err("%s: EEPROM i2c gate close failure\n",
+ dev->name);
}
}
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 5beff534d5e1..79e1afb71075 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -319,7 +319,6 @@ static int tvaudio_checkcarrier(struct saa7134_dev *dev, struct mainscan *scan)
__s32 left,right,value;
if (!(dev->tvnorm->id & scan->std)) {
- value = 0;
audio_dbg(1, "skipping %d.%03d MHz [%4s]\n",
scan->carr / 1000, scan->carr % 1000, scan->name);
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 606df51bb636..342cabf48064 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -90,70 +90,58 @@ static int video_out[][9] = {
static struct saa7134_format formats[] = {
{
- .name = "8 bpp gray",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8,
.pm = 0x06,
},{
- .name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = 16,
.pm = 0x13 | 0x80,
},{
- .name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.depth = 16,
.pm = 0x13 | 0x80,
.bswap = 1,
},{
- .name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.pm = 0x10 | 0x80,
},{
- .name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.pm = 0x10 | 0x80,
.bswap = 1,
},{
- .name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.depth = 24,
.pm = 0x11,
},{
- .name = "24 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 24,
.pm = 0x11,
.bswap = 1,
},{
- .name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.depth = 32,
.pm = 0x12,
},{
- .name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.pm = 0x12,
.bswap = 1,
.wswap = 1,
},{
- .name = "4:2:2 packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.pm = 0x00,
.bswap = 1,
.yuv = 1,
},{
- .name = "4:2:2 packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.pm = 0x00,
.yuv = 1,
},{
- .name = "4:2:2 planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16,
.pm = 0x09,
@@ -162,7 +150,6 @@ static struct saa7134_format formats[] = {
.hshift = 1,
.vshift = 0,
},{
- .name = "4:2:0 planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.pm = 0x0a,
@@ -171,7 +158,6 @@ static struct saa7134_format formats[] = {
.hshift = 1,
.vshift = 1,
},{
- .name = "4:2:0 planar, Y-Cb-Cr",
.fourcc = V4L2_PIX_FMT_YVU420,
.depth = 12,
.pm = 0x0a,
@@ -720,10 +706,10 @@ static int start_preview(struct saa7134_dev *dev)
return err;
dev->ovfield = dev->win.field;
- video_dbg("start_preview %dx%d+%d+%d %s field=%s\n",
- dev->win.w.width, dev->win.w.height,
- dev->win.w.left, dev->win.w.top,
- dev->ovfmt->name, v4l2_field_names[dev->ovfield]);
+ video_dbg("%s %dx%d+%d+%d 0x%08x field=%s\n", __func__,
+ dev->win.w.width, dev->win.w.height,
+ dev->win.w.left, dev->win.w.top,
+ dev->ovfmt->fourcc, v4l2_field_names[dev->ovfield]);
/* setup window + clipping */
set_size(dev, TASK_B, dev->win.w.width, dev->win.w.height,
@@ -1780,9 +1766,6 @@ static int saa7134_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index >= FORMATS)
return -EINVAL;
- strscpy(f->description, formats[f->index].name,
- sizeof(f->description));
-
f->pixelformat = formats[f->index].fourcc;
return 0;
@@ -1799,9 +1782,6 @@ static int saa7134_enum_fmt_vid_overlay(struct file *file, void *priv,
if ((f->index >= FORMATS) || formats[f->index].planar)
return -EINVAL;
- strscpy(f->description, formats[f->index].name,
- sizeof(f->description));
-
f->pixelformat = formats[f->index].fourcc;
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 6324f174c6f9..77c325e64a97 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -98,7 +98,6 @@ struct saa7134_tvaudio {
};
struct saa7134_format {
- char *name;
unsigned int fourcc;
unsigned int depth;
unsigned int pm;
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index dca20a3d98e2..f96226930670 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -292,6 +292,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
+ saa7146_vv_release(dev);
+ i2c_del_adapter(&hexium->i2c_adapter);
+ kfree(hexium);
return ret;
}
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 43fdaa2d32bd..3fca7257a720 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -503,7 +503,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c
index 5caeca8b5dd0..526d67cf9942 100644
--- a/drivers/media/pci/solo6x10/solo6x10-gpio.c
+++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c
@@ -39,13 +39,13 @@ static void solo_gpio_mode(struct solo_dev *solo_dev,
ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1);
for (port = 0; port < 16; port++) {
- if (!((1 << (port + 16)) & port_mask))
+ if (!((1UL << (port + 16)) & port_mask))
continue;
if (!mode)
- ret &= ~(1 << port);
+ ret &= ~(1UL << port);
else
- ret |= 1 << port;
+ ret |= 1UL << port;
}
/* Enable GPIO[31:16] */
diff --git a/drivers/media/pci/solo6x10/solo6x10-regs.h b/drivers/media/pci/solo6x10/solo6x10-regs.h
index d88cc02d01d3..804505d01b25 100644
--- a/drivers/media/pci/solo6x10/solo6x10-regs.h
+++ b/drivers/media/pci/solo6x10/solo6x10-regs.h
@@ -12,6 +12,8 @@
#ifndef __SOLO6X10_REGISTERS_H
#define __SOLO6X10_REGISTERS_H
+#include <linux/bitops.h>
+
#include "solo6x10-offsets.h"
/* Global 6010 system configuration */
@@ -32,17 +34,17 @@
#define SOLO_DMA_CTRL_REFRESH_CYCLE(n) ((n)<<8)
/* 0=16/32MB, 1=32/64MB, 2=64/128MB, 3=128/256MB */
#define SOLO_DMA_CTRL_SDRAM_SIZE(n) ((n)<<6)
-#define SOLO_DMA_CTRL_SDRAM_CLK_INVERT (1<<5)
-#define SOLO_DMA_CTRL_STROBE_SELECT (1<<4)
-#define SOLO_DMA_CTRL_READ_DATA_SELECT (1<<3)
-#define SOLO_DMA_CTRL_READ_CLK_SELECT (1<<2)
+#define SOLO_DMA_CTRL_SDRAM_CLK_INVERT BIT(5)
+#define SOLO_DMA_CTRL_STROBE_SELECT BIT(4)
+#define SOLO_DMA_CTRL_READ_DATA_SELECT BIT(3)
+#define SOLO_DMA_CTRL_READ_CLK_SELECT BIT(2)
#define SOLO_DMA_CTRL_LATENCY(n) ((n)<<0)
/* Some things we set in this are undocumented. Why Softlogic?!?! */
#define SOLO_DMA_CTRL1 0x0008
#define SOLO_SYS_VCLK 0x000C
-#define SOLO_VCLK_INVERT (1<<22)
+#define SOLO_VCLK_INVERT BIT(22)
/* 0=sys_clk/4, 1=sys_clk/2, 2=clk_in/2 of system input */
#define SOLO_VCLK_SELECT(n) ((n)<<20)
#define SOLO_VCLK_VIN1415_DELAY(n) ((n)<<14)
@@ -56,22 +58,22 @@
#define SOLO_IRQ_STAT 0x0010
#define SOLO_IRQ_MASK 0x0014
-#define SOLO_IRQ_P2M(n) (1<<((n)+17))
-#define SOLO_IRQ_GPIO (1<<16)
-#define SOLO_IRQ_VIDEO_LOSS (1<<15)
-#define SOLO_IRQ_VIDEO_IN (1<<14)
-#define SOLO_IRQ_MOTION (1<<13)
-#define SOLO_IRQ_ATA_CMD (1<<12)
-#define SOLO_IRQ_ATA_DIR (1<<11)
-#define SOLO_IRQ_PCI_ERR (1<<10)
-#define SOLO_IRQ_PS2_1 (1<<9)
-#define SOLO_IRQ_PS2_0 (1<<8)
-#define SOLO_IRQ_SPI (1<<7)
-#define SOLO_IRQ_IIC (1<<6)
-#define SOLO_IRQ_UART(n) (1<<((n) + 4))
-#define SOLO_IRQ_G723 (1<<3)
-#define SOLO_IRQ_DECODER (1<<1)
-#define SOLO_IRQ_ENCODER (1<<0)
+#define SOLO_IRQ_P2M(n) BIT((n) + 17)
+#define SOLO_IRQ_GPIO BIT(16)
+#define SOLO_IRQ_VIDEO_LOSS BIT(15)
+#define SOLO_IRQ_VIDEO_IN BIT(14)
+#define SOLO_IRQ_MOTION BIT(13)
+#define SOLO_IRQ_ATA_CMD BIT(12)
+#define SOLO_IRQ_ATA_DIR BIT(11)
+#define SOLO_IRQ_PCI_ERR BIT(10)
+#define SOLO_IRQ_PS2_1 BIT(9)
+#define SOLO_IRQ_PS2_0 BIT(8)
+#define SOLO_IRQ_SPI BIT(7)
+#define SOLO_IRQ_IIC BIT(6)
+#define SOLO_IRQ_UART(n) BIT((n) + 4)
+#define SOLO_IRQ_G723 BIT(3)
+#define SOLO_IRQ_DECODER BIT(1)
+#define SOLO_IRQ_ENCODER BIT(0)
#define SOLO_CHIP_OPTION 0x001C
#define SOLO_CHIP_ID_MASK 0x00000007
@@ -79,11 +81,11 @@
#define SOLO_PLL_CONFIG 0x0020 /* 6110 Only */
#define SOLO_EEPROM_CTRL 0x0060
-#define SOLO_EEPROM_ACCESS_EN (1<<7)
-#define SOLO_EEPROM_CS (1<<3)
-#define SOLO_EEPROM_CLK (1<<2)
-#define SOLO_EEPROM_DO (1<<1)
-#define SOLO_EEPROM_DI (1<<0)
+#define SOLO_EEPROM_ACCESS_EN BIT(7)
+#define SOLO_EEPROM_CS BIT(3)
+#define SOLO_EEPROM_CLK BIT(2)
+#define SOLO_EEPROM_DO BIT(1)
+#define SOLO_EEPROM_DI BIT(0)
#define SOLO_EEPROM_ENABLE (SOLO_EEPROM_ACCESS_EN | SOLO_EEPROM_CS)
#define SOLO_PCI_ERR 0x0070
@@ -102,13 +104,13 @@
#define SOLO_P2M_CONFIG(n) (0x0080 + ((n)*0x20))
#define SOLO_P2M_DMA_INTERVAL(n) ((n)<<6)/* N*32 clocks */
-#define SOLO_P2M_CSC_BYTE_REORDER (1<<5) /* BGR -> RGB */
+#define SOLO_P2M_CSC_BYTE_REORDER BIT(5) /* BGR -> RGB */
/* 0:r=[14:10] g=[9:5] b=[4:0], 1:r=[15:11] g=[10:5] b=[4:0] */
-#define SOLO_P2M_CSC_16BIT_565 (1<<4)
-#define SOLO_P2M_UV_SWAP (1<<3)
-#define SOLO_P2M_PCI_MASTER_MODE (1<<2)
-#define SOLO_P2M_DESC_INTR_OPT (1<<1) /* 1:Empty, 0:Each */
-#define SOLO_P2M_DESC_MODE (1<<0)
+#define SOLO_P2M_CSC_16BIT_565 BIT(4)
+#define SOLO_P2M_UV_SWAP BIT(3)
+#define SOLO_P2M_PCI_MASTER_MODE BIT(2)
+#define SOLO_P2M_DESC_INTR_OPT BIT(1) /* 1:Empty, 0:Each */
+#define SOLO_P2M_DESC_MODE BIT(0)
#define SOLO_P2M_DES_ADR(n) (0x0084 + ((n)*0x20))
@@ -116,7 +118,7 @@
#define SOLO_P2M_UPDATE_ID(n) ((n)<<0)
#define SOLO_P2M_STATUS(n) (0x008C + ((n)*0x20))
-#define SOLO_P2M_COMMAND_DONE (1<<8)
+#define SOLO_P2M_COMMAND_DONE BIT(8)
#define SOLO_P2M_CURRENT_ID(stat) (0xff & (stat))
#define SOLO_P2M_CONTROL(n) (0x0090 + ((n)*0x20))
@@ -129,13 +131,13 @@
#define SOLO_P2M_BURST_128 2
#define SOLO_P2M_BURST_64 3
#define SOLO_P2M_BURST_32 4
-#define SOLO_P2M_CSC_16BIT (1<<6) /* 0:24bit, 1:16bit */
+#define SOLO_P2M_CSC_16BIT BIT(6) /* 0:24bit, 1:16bit */
/* 0:Y[0]<-0(OFF), 1:Y[0]<-1(ON), 2:Y[0]<-G[0], 3:Y[0]<-Bit[15] */
#define SOLO_P2M_ALPHA_MODE(n) ((n)<<4)
-#define SOLO_P2M_CSC_ON (1<<3)
-#define SOLO_P2M_INTERRUPT_REQ (1<<2)
-#define SOLO_P2M_WRITE (1<<1)
-#define SOLO_P2M_TRANS_ON (1<<0)
+#define SOLO_P2M_CSC_ON BIT(3)
+#define SOLO_P2M_INTERRUPT_REQ BIT(2)
+#define SOLO_P2M_WRITE BIT(1)
+#define SOLO_P2M_TRANS_ON BIT(0)
#define SOLO_P2M_EXT_CFG(n) (0x0094 + ((n)*0x20))
#define SOLO_P2M_EXT_INC(n) ((n)<<20)
@@ -157,9 +159,9 @@
#define SOLO_VI_PROG_MASK(n) ((n)<<0)
#define SOLO_VI_FMT_CFG 0x0114
-#define SOLO_VI_FMT_CHECK_VCOUNT (1<<31)
-#define SOLO_VI_FMT_CHECK_HCOUNT (1<<30)
-#define SOLO_VI_FMT_TEST_SIGNAL (1<<28)
+#define SOLO_VI_FMT_CHECK_VCOUNT BIT(31)
+#define SOLO_VI_FMT_CHECK_HCOUNT BIT(30)
+#define SOLO_VI_FMT_TEST_SIGNAL BIT(28)
#define SOLO_VI_PAGE_SW 0x0118
#define SOLO_FI_INV_DISP_LIVE(n) ((n)<<8)
@@ -171,7 +173,7 @@
#define SOLO_VI_ACT_I_P 0x011C
#define SOLO_VI_ACT_I_S 0x0120
#define SOLO_VI_ACT_P 0x0124
-#define SOLO_VI_FI_INVERT (1<<31)
+#define SOLO_VI_FI_INVERT BIT(31)
#define SOLO_VI_H_START(n) ((n)<<21)
#define SOLO_VI_V_START(n) ((n)<<11)
#define SOLO_VI_V_STOP(n) ((n)<<0)
@@ -184,8 +186,8 @@
#define DISP_PAGE(stat) ((stat) & 0x07)
#define SOLO_VI_PB_CONFIG 0x0130
-#define SOLO_VI_PB_USER_MODE (1<<1)
-#define SOLO_VI_PB_PAL (1<<0)
+#define SOLO_VI_PB_USER_MODE BIT(1)
+#define SOLO_VI_PB_PAL BIT(0)
#define SOLO_VI_PB_RANGE_HV 0x0134
#define SOLO_VI_PB_HSIZE(h) ((h)<<12)
#define SOLO_VI_PB_VSIZE(v) ((v)<<0)
@@ -226,35 +228,35 @@
#define SOLO_VI_MOT_CTRL 0x0264
#define SOLO_VI_MOTION_FRAME_COUNT(n) ((n)<<24)
#define SOLO_VI_MOTION_SAMPLE_LENGTH(n) ((n)<<16)
-#define SOLO_VI_MOTION_INTR_START_STOP (1<<15)
-#define SOLO_VI_MOTION_FREEZE_DATA (1<<14)
+#define SOLO_VI_MOTION_INTR_START_STOP BIT(15)
+#define SOLO_VI_MOTION_FREEZE_DATA BIT(14)
#define SOLO_VI_MOTION_SAMPLE_COUNT(n) ((n)<<0)
#define SOLO_VI_MOT_CLEAR 0x0268
#define SOLO_VI_MOT_STATUS 0x026C
#define SOLO_VI_MOTION_CNT(n) ((n)<<0)
#define SOLO_VI_MOTION_BORDER 0x0270
#define SOLO_VI_MOTION_BAR 0x0274
-#define SOLO_VI_MOTION_Y_SET (1<<29)
-#define SOLO_VI_MOTION_Y_ADD (1<<28)
-#define SOLO_VI_MOTION_CB_SET (1<<27)
-#define SOLO_VI_MOTION_CB_ADD (1<<26)
-#define SOLO_VI_MOTION_CR_SET (1<<25)
-#define SOLO_VI_MOTION_CR_ADD (1<<24)
+#define SOLO_VI_MOTION_Y_SET BIT(29)
+#define SOLO_VI_MOTION_Y_ADD BIT(28)
+#define SOLO_VI_MOTION_CB_SET BIT(27)
+#define SOLO_VI_MOTION_CB_ADD BIT(26)
+#define SOLO_VI_MOTION_CR_SET BIT(25)
+#define SOLO_VI_MOTION_CR_ADD BIT(24)
#define SOLO_VI_MOTION_Y_VALUE(v) ((v)<<16)
#define SOLO_VI_MOTION_CB_VALUE(v) ((v)<<8)
#define SOLO_VI_MOTION_CR_VALUE(v) ((v)<<0)
#define SOLO_VO_FMT_ENC 0x0300
-#define SOLO_VO_SCAN_MODE_PROGRESSIVE (1<<31)
-#define SOLO_VO_FMT_TYPE_PAL (1<<30)
+#define SOLO_VO_SCAN_MODE_PROGRESSIVE BIT(31)
+#define SOLO_VO_FMT_TYPE_PAL BIT(30)
#define SOLO_VO_FMT_TYPE_NTSC 0
-#define SOLO_VO_USER_SET (1<<29)
+#define SOLO_VO_USER_SET BIT(29)
-#define SOLO_VO_FI_CHANGE (1<<20)
-#define SOLO_VO_USER_COLOR_SET_VSYNC (1<<19)
-#define SOLO_VO_USER_COLOR_SET_HSYNC (1<<18)
-#define SOLO_VO_USER_COLOR_SET_NAH (1<<17)
-#define SOLO_VO_USER_COLOR_SET_NAV (1<<16)
+#define SOLO_VO_FI_CHANGE BIT(20)
+#define SOLO_VO_USER_COLOR_SET_VSYNC BIT(19)
+#define SOLO_VO_USER_COLOR_SET_HSYNC BIT(18)
+#define SOLO_VO_USER_COLOR_SET_NAH BIT(17)
+#define SOLO_VO_USER_COLOR_SET_NAV BIT(16)
#define SOLO_VO_NA_COLOR_Y(Y) ((Y)<<8)
#define SOLO_VO_NA_COLOR_CB(CB) (((CB)/16)<<4)
#define SOLO_VO_NA_COLOR_CR(CR) (((CR)/16)<<0)
@@ -270,32 +272,32 @@
#define SOLO_VO_V_STOP(n) ((n)<<0)
#define SOLO_VO_RANGE_HV 0x030C
-#define SOLO_VO_SYNC_INVERT (1<<24)
-#define SOLO_VO_HSYNC_INVERT (1<<23)
-#define SOLO_VO_VSYNC_INVERT (1<<22)
+#define SOLO_VO_SYNC_INVERT BIT(24)
+#define SOLO_VO_HSYNC_INVERT BIT(23)
+#define SOLO_VO_VSYNC_INVERT BIT(22)
#define SOLO_VO_H_LEN(n) ((n)<<11)
#define SOLO_VO_V_LEN(n) ((n)<<0)
#define SOLO_VO_DISP_CTRL 0x0310
-#define SOLO_VO_DISP_ON (1<<31)
+#define SOLO_VO_DISP_ON BIT(31)
#define SOLO_VO_DISP_ERASE_COUNT(n) ((n&0xf)<<24)
-#define SOLO_VO_DISP_DOUBLE_SCAN (1<<22)
-#define SOLO_VO_DISP_SINGLE_PAGE (1<<21)
+#define SOLO_VO_DISP_DOUBLE_SCAN BIT(22)
+#define SOLO_VO_DISP_SINGLE_PAGE BIT(21)
#define SOLO_VO_DISP_BASE(n) (((n)>>16) & 0xffff)
#define SOLO_VO_DISP_ERASE 0x0314
-#define SOLO_VO_DISP_ERASE_ON (1<<0)
+#define SOLO_VO_DISP_ERASE_ON BIT(0)
#define SOLO_VO_ZOOM_CTRL 0x0318
-#define SOLO_VO_ZOOM_VER_ON (1<<24)
-#define SOLO_VO_ZOOM_HOR_ON (1<<23)
-#define SOLO_VO_ZOOM_V_COMP (1<<22)
+#define SOLO_VO_ZOOM_VER_ON BIT(24)
+#define SOLO_VO_ZOOM_HOR_ON BIT(23)
+#define SOLO_VO_ZOOM_V_COMP BIT(22)
#define SOLO_VO_ZOOM_SX(h) (((h)/2)<<11)
#define SOLO_VO_ZOOM_SY(v) (((v)/2)<<0)
#define SOLO_VO_FREEZE_CTRL 0x031C
-#define SOLO_VO_FREEZE_ON (1<<1)
-#define SOLO_VO_FREEZE_INTERPOLATION (1<<0)
+#define SOLO_VO_FREEZE_ON BIT(1)
+#define SOLO_VO_FREEZE_INTERPOLATION BIT(0)
#define SOLO_VO_BKG_COLOR 0x0320
#define SOLO_BG_Y(y) ((y)<<16)
@@ -334,8 +336,8 @@
#define SOLO_VO_EXPANSION(id) (0x0250+((id)*4))
#define SOLO_OSG_CONFIG 0x03E0
-#define SOLO_VO_OSG_ON (1<<31)
-#define SOLO_VO_OSG_COLOR_MUTE (1<<28)
+#define SOLO_VO_OSG_ON BIT(31)
+#define SOLO_VO_OSG_COLOR_MUTE BIT(28)
#define SOLO_VO_OSG_ALPHA_RATE(n) ((n)<<22)
#define SOLO_VO_OSG_ALPHA_BG_RATE(n) ((n)<<16)
#define SOLO_VO_OSG_BASE(offset) (((offset)>>16)&0xffff)
@@ -345,8 +347,8 @@
#define SOLO_OSG_ERASE_OFF (0x00)
#define SOLO_VO_OSG_BLINK 0x03E8
-#define SOLO_VO_OSG_BLINK_ON (1<<1)
-#define SOLO_VO_OSG_BLINK_INTREVAL18 (1<<0)
+#define SOLO_VO_OSG_BLINK_ON BIT(1)
+#define SOLO_VO_OSG_BLINK_INTREVAL18 BIT(0)
#define SOLO_CAP_BASE 0x0400
#define SOLO_CAP_MAX_PAGE(n) ((n)<<16)
@@ -374,19 +376,19 @@
#define SOLO_VE_CFG0 0x0610
-#define SOLO_VE_TWO_PAGE_MODE (1<<31)
+#define SOLO_VE_TWO_PAGE_MODE BIT(31)
#define SOLO_VE_INTR_CTRL(n) ((n)<<24)
#define SOLO_VE_BLOCK_SIZE(n) ((n)<<16)
#define SOLO_VE_BLOCK_BASE(n) ((n)<<0)
#define SOLO_VE_CFG1 0x0614
#define SOLO_VE_BYTE_ALIGN(n) ((n)<<24)
-#define SOLO_VE_INSERT_INDEX (1<<18)
+#define SOLO_VE_INSERT_INDEX BIT(18)
#define SOLO_VE_MOTION_MODE(n) ((n)<<16)
#define SOLO_VE_MOTION_BASE(n) ((n)<<0)
#define SOLO_VE_MPEG_SIZE_H(n) ((n)<<28) /* 6110 Only */
#define SOLO_VE_JPEG_SIZE_H(n) ((n)<<20) /* 6110 Only */
-#define SOLO_VE_INSERT_INDEX_JPEG (1<<19) /* 6110 Only */
+#define SOLO_VE_INSERT_INDEX_JPEG BIT(19) /* 6110 Only */
#define SOLO_VE_WMRK_POLY 0x061C
#define SOLO_VE_VMRK_INIT_KEY 0x0620
@@ -394,8 +396,8 @@
#define SOLO_VE_ENCRYP_POLY 0x0628
#define SOLO_VE_ENCRYP_INIT 0x062C
#define SOLO_VE_ATTR 0x0630
-#define SOLO_VE_LITTLE_ENDIAN (1<<31)
-#define SOLO_COMP_ATTR_RN (1<<30)
+#define SOLO_VE_LITTLE_ENDIAN BIT(31)
+#define SOLO_COMP_ATTR_RN BIT(30)
#define SOLO_COMP_ATTR_FCODE(n) ((n)<<27)
#define SOLO_COMP_TIME_INC(n) ((n)<<25)
#define SOLO_COMP_TIME_WIDTH(n) ((n)<<21)
@@ -416,9 +418,9 @@
#define SOLO_VE_OSD_BASE 0x0694
#define SOLO_VE_OSD_CLR 0x0698
#define SOLO_VE_OSD_OPT 0x069C
-#define SOLO_VE_OSD_V_DOUBLE (1<<16) /* 6110 Only */
-#define SOLO_VE_OSD_H_SHADOW (1<<15)
-#define SOLO_VE_OSD_V_SHADOW (1<<14)
+#define SOLO_VE_OSD_V_DOUBLE BIT(16) /* 6110 Only */
+#define SOLO_VE_OSD_H_SHADOW BIT(15)
+#define SOLO_VE_OSD_V_SHADOW BIT(14)
#define SOLO_VE_OSD_H_OFFSET(n) ((n & 0x7f)<<7)
#define SOLO_VE_OSD_V_OFFSET(n) (n & 0x7f)
@@ -435,18 +437,18 @@
#define SOLO_VE_JPEG_QUE(n) (0x0A04+((n)*8))
#define SOLO_VD_CFG0 0x0900
-#define SOLO_VD_CFG_NO_WRITE_NO_WINDOW (1<<24)
-#define SOLO_VD_CFG_BUSY_WIAT_CODE (1<<23)
-#define SOLO_VD_CFG_BUSY_WIAT_REF (1<<22)
-#define SOLO_VD_CFG_BUSY_WIAT_RES (1<<21)
-#define SOLO_VD_CFG_BUSY_WIAT_MS (1<<20)
-#define SOLO_VD_CFG_SINGLE_MODE (1<<18)
-#define SOLO_VD_CFG_SCAL_MANUAL (1<<17)
-#define SOLO_VD_CFG_USER_PAGE_CTRL (1<<16)
-#define SOLO_VD_CFG_LITTLE_ENDIAN (1<<15)
-#define SOLO_VD_CFG_START_FI (1<<14)
-#define SOLO_VD_CFG_ERR_LOCK (1<<13)
-#define SOLO_VD_CFG_ERR_INT_ENA (1<<12)
+#define SOLO_VD_CFG_NO_WRITE_NO_WINDOW BIT(24)
+#define SOLO_VD_CFG_BUSY_WIAT_CODE BIT(23)
+#define SOLO_VD_CFG_BUSY_WIAT_REF BIT(22)
+#define SOLO_VD_CFG_BUSY_WIAT_RES BIT(21)
+#define SOLO_VD_CFG_BUSY_WIAT_MS BIT(20)
+#define SOLO_VD_CFG_SINGLE_MODE BIT(18)
+#define SOLO_VD_CFG_SCAL_MANUAL BIT(17)
+#define SOLO_VD_CFG_USER_PAGE_CTRL BIT(16)
+#define SOLO_VD_CFG_LITTLE_ENDIAN BIT(15)
+#define SOLO_VD_CFG_START_FI BIT(14)
+#define SOLO_VD_CFG_ERR_LOCK BIT(13)
+#define SOLO_VD_CFG_ERR_INT_ENA BIT(12)
#define SOLO_VD_CFG_TIME_WIDTH(n) ((n)<<8)
#define SOLO_VD_CFG_DCT_INTERVAL(n) ((n)<<0)
@@ -459,37 +461,37 @@
#define SOLO_VD_CODE_ADR 0x090C
#define SOLO_VD_CTRL 0x0910
-#define SOLO_VD_OPER_ON (1<<31)
+#define SOLO_VD_OPER_ON BIT(31)
#define SOLO_VD_MAX_ITEM(n) ((n)<<0)
#define SOLO_VD_STATUS0 0x0920
-#define SOLO_VD_STATUS0_INTR_ACK (1<<22)
-#define SOLO_VD_STATUS0_INTR_EMPTY (1<<21)
-#define SOLO_VD_STATUS0_INTR_ERR (1<<20)
+#define SOLO_VD_STATUS0_INTR_ACK BIT(22)
+#define SOLO_VD_STATUS0_INTR_EMPTY BIT(21)
+#define SOLO_VD_STATUS0_INTR_ERR BIT(20)
#define SOLO_VD_STATUS1 0x0924
#define SOLO_VD_IDX0 0x0930
-#define SOLO_VD_IDX_INTERLACE (1<<30)
+#define SOLO_VD_IDX_INTERLACE BIT(30)
#define SOLO_VD_IDX_CHANNEL(n) ((n)<<24)
#define SOLO_VD_IDX_SIZE(n) ((n)<<0)
#define SOLO_VD_IDX1 0x0934
#define SOLO_VD_IDX_SRC_SCALE(n) ((n)<<28)
#define SOLO_VD_IDX_WINDOW(n) ((n)<<24)
-#define SOLO_VD_IDX_DEINTERLACE (1<<16)
+#define SOLO_VD_IDX_DEINTERLACE BIT(16)
#define SOLO_VD_IDX_H_BLOCK(n) ((n)<<8)
#define SOLO_VD_IDX_V_BLOCK(n) ((n)<<0)
#define SOLO_VD_IDX2 0x0938
-#define SOLO_VD_IDX_REF_BASE_SIDE (1<<31)
+#define SOLO_VD_IDX_REF_BASE_SIDE BIT(31)
#define SOLO_VD_IDX_REF_BASE(n) (((n)>>16)&0xffff)
#define SOLO_VD_IDX3 0x093C
#define SOLO_VD_IDX_DISP_SCALE(n) ((n)<<28)
-#define SOLO_VD_IDX_INTERLACE_WR (1<<27)
-#define SOLO_VD_IDX_INTERPOL (1<<26)
-#define SOLO_VD_IDX_HOR2X (1<<25)
+#define SOLO_VD_IDX_INTERLACE_WR BIT(27)
+#define SOLO_VD_IDX_INTERPOL BIT(26)
+#define SOLO_VD_IDX_HOR2X BIT(25)
#define SOLO_VD_IDX_OFFSET_X(n) ((n)<<12)
#define SOLO_VD_IDX_OFFSET_Y(n) ((n)<<0)
@@ -511,21 +513,21 @@
#define SOLO_IIC_CFG 0x0B20
-#define SOLO_IIC_ENABLE (1<<8)
+#define SOLO_IIC_ENABLE BIT(8)
#define SOLO_IIC_PRESCALE(n) ((n)<<0)
#define SOLO_IIC_CTRL 0x0B24
-#define SOLO_IIC_AUTO_CLEAR (1<<20)
-#define SOLO_IIC_STATE_RX_ACK (1<<19)
-#define SOLO_IIC_STATE_BUSY (1<<18)
-#define SOLO_IIC_STATE_SIG_ERR (1<<17)
-#define SOLO_IIC_STATE_TRNS (1<<16)
+#define SOLO_IIC_AUTO_CLEAR BIT(20)
+#define SOLO_IIC_STATE_RX_ACK BIT(19)
+#define SOLO_IIC_STATE_BUSY BIT(18)
+#define SOLO_IIC_STATE_SIG_ERR BIT(17)
+#define SOLO_IIC_STATE_TRNS BIT(16)
#define SOLO_IIC_CH_SET(n) ((n)<<5)
-#define SOLO_IIC_ACK_EN (1<<4)
-#define SOLO_IIC_START (1<<3)
-#define SOLO_IIC_STOP (1<<2)
-#define SOLO_IIC_READ (1<<1)
-#define SOLO_IIC_WRITE (1<<0)
+#define SOLO_IIC_ACK_EN BIT(4)
+#define SOLO_IIC_START BIT(3)
+#define SOLO_IIC_STOP BIT(2)
+#define SOLO_IIC_READ BIT(1)
+#define SOLO_IIC_WRITE BIT(0)
#define SOLO_IIC_TXD 0x0B28
#define SOLO_IIC_RXD 0x0B2C
@@ -535,15 +537,15 @@
*/
#define SOLO_UART_CONTROL(n) (0x0BA0 + ((n)*0x20))
#define SOLO_UART_CLK_DIV(n) ((n)<<24)
-#define SOLO_MODEM_CTRL_EN (1<<20)
-#define SOLO_PARITY_ERROR_DROP (1<<18)
-#define SOLO_IRQ_ERR_EN (1<<17)
-#define SOLO_IRQ_RX_EN (1<<16)
-#define SOLO_IRQ_TX_EN (1<<15)
-#define SOLO_RX_EN (1<<14)
-#define SOLO_TX_EN (1<<13)
-#define SOLO_UART_HALF_DUPLEX (1<<12)
-#define SOLO_UART_LOOPBACK (1<<11)
+#define SOLO_MODEM_CTRL_EN BIT(20)
+#define SOLO_PARITY_ERROR_DROP BIT(18)
+#define SOLO_IRQ_ERR_EN BIT(17)
+#define SOLO_IRQ_RX_EN BIT(16)
+#define SOLO_IRQ_TX_EN BIT(15)
+#define SOLO_RX_EN BIT(14)
+#define SOLO_TX_EN BIT(13)
+#define SOLO_UART_HALF_DUPLEX BIT(12)
+#define SOLO_UART_LOOPBACK BIT(11)
#define SOLO_BAUDRATE_230400 ((0<<9)|(0<<6))
#define SOLO_BAUDRATE_115200 ((0<<9)|(1<<6))
@@ -569,12 +571,12 @@
#define SOLO_UART_PARITY_ODD (3<<0)
#define SOLO_UART_STATUS(n) (0x0BA4 + ((n)*0x20))
-#define SOLO_UART_CTS (1<<15)
-#define SOLO_UART_RX_BUSY (1<<14)
-#define SOLO_UART_OVERRUN (1<<13)
-#define SOLO_UART_FRAME_ERR (1<<12)
-#define SOLO_UART_PARITY_ERR (1<<11)
-#define SOLO_UART_TX_BUSY (1<<5)
+#define SOLO_UART_CTS BIT(15)
+#define SOLO_UART_RX_BUSY BIT(14)
+#define SOLO_UART_OVERRUN BIT(13)
+#define SOLO_UART_FRAME_ERR BIT(12)
+#define SOLO_UART_PARITY_ERR BIT(11)
+#define SOLO_UART_TX_BUSY BIT(5)
#define SOLO_UART_RX_BUFF_CNT(stat) (((stat)>>6) & 0x1f)
#define SOLO_UART_RX_BUFF_SIZE 8
@@ -582,9 +584,9 @@
#define SOLO_UART_TX_BUFF_SIZE 8
#define SOLO_UART_TX_DATA(n) (0x0BA8 + ((n)*0x20))
-#define SOLO_UART_TX_DATA_PUSH (1<<8)
+#define SOLO_UART_TX_DATA_PUSH BIT(8)
#define SOLO_UART_RX_DATA(n) (0x0BAC + ((n)*0x20))
-#define SOLO_UART_RX_DATA_POP (1<<8)
+#define SOLO_UART_RX_DATA_POP BIT(8)
#define SOLO_TIMER_CLOCK_NUM 0x0be0
#define SOLO_TIMER_USEC 0x0be8
@@ -592,19 +594,19 @@
#define SOLO_TIMER_USEC_LSB 0x0d20 /* 6110 Only */
#define SOLO_AUDIO_CONTROL 0x0D00
-#define SOLO_AUDIO_ENABLE (1<<31)
-#define SOLO_AUDIO_MASTER_MODE (1<<30)
-#define SOLO_AUDIO_I2S_MODE (1<<29)
-#define SOLO_AUDIO_I2S_LR_SWAP (1<<27)
-#define SOLO_AUDIO_I2S_8BIT (1<<26)
+#define SOLO_AUDIO_ENABLE BIT(31)
+#define SOLO_AUDIO_MASTER_MODE BIT(30)
+#define SOLO_AUDIO_I2S_MODE BIT(29)
+#define SOLO_AUDIO_I2S_LR_SWAP BIT(27)
+#define SOLO_AUDIO_I2S_8BIT BIT(26)
#define SOLO_AUDIO_I2S_MULTI(n) ((n)<<24)
-#define SOLO_AUDIO_MIX_9TO0 (1<<23)
+#define SOLO_AUDIO_MIX_9TO0 BIT(23)
#define SOLO_AUDIO_DEC_9TO0_VOL(n) ((n)<<20)
-#define SOLO_AUDIO_MIX_19TO10 (1<<19)
+#define SOLO_AUDIO_MIX_19TO10 BIT(19)
#define SOLO_AUDIO_DEC_19TO10_VOL(n) ((n)<<16)
#define SOLO_AUDIO_MODE(n) ((n)<<0)
#define SOLO_AUDIO_SAMPLE 0x0D04
-#define SOLO_AUDIO_EE_MODE_ON (1<<30)
+#define SOLO_AUDIO_EE_MODE_ON BIT(30)
#define SOLO_AUDIO_EE_ENC_CH(ch) ((ch)<<25)
#define SOLO_AUDIO_BITRATE(n) ((n)<<16)
#define SOLO_AUDIO_CLK_DIV(n) ((n)<<0)
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 609100a46ff8..476d7f3b32d6 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -822,25 +822,18 @@ static int solo_enc_enum_fmt_cap(struct file *file, void *priv,
switch (dev_type) {
case SOLO_DEV_6010:
f->pixelformat = V4L2_PIX_FMT_MPEG4;
- strscpy(f->description, "MPEG-4 part 2",
- sizeof(f->description));
break;
case SOLO_DEV_6110:
f->pixelformat = V4L2_PIX_FMT_H264;
- strscpy(f->description, "H.264", sizeof(f->description));
break;
}
break;
case 1:
f->pixelformat = V4L2_PIX_FMT_MJPEG;
- strscpy(f->description, "MJPEG", sizeof(f->description));
break;
default:
return -EINVAL;
}
-
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
-
return 0;
}
@@ -886,7 +879,6 @@ static int solo_enc_try_fmt_cap(struct file *file, void *priv,
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->sizeimage = FRAME_BUF_SIZE;
pix->bytesperline = 0;
- pix->priv = 0;
return 0;
}
@@ -941,7 +933,6 @@ static int solo_enc_get_fmt_cap(struct file *file, void *priv,
V4L2_FIELD_NONE;
pix->sizeimage = FRAME_BUF_SIZE;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- pix->priv = 0;
return 0;
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index a968f75920b5..78792067e920 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -458,8 +458,6 @@ static int solo_enum_fmt_cap(struct file *file, void *priv,
return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_UYVY;
- strscpy(f->description, "UYUV 4:2:2 Packed", sizeof(f->description));
-
return 0;
}
@@ -479,7 +477,6 @@ static int solo_try_fmt_cap(struct file *file, void *priv,
pix->field = V4L2_FIELD_INTERLACED;
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- pix->priv = 0;
return 0;
}
@@ -509,7 +506,6 @@ static int solo_get_fmt_cap(struct file *file, void *priv,
pix->sizeimage = solo_image_size(solo_dev);
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->bytesperline = solo_bytesperline(solo_dev);
- pix->priv = 0;
return 0;
}
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index e52e29814378..fd3de3bb0c89 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -560,9 +560,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "4:2:2, packed, UYVY", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_UYVY;
- f->flags = 0;
return 0;
}
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 8c2442a11f07..e8a8ec5405e2 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -14,7 +14,6 @@
/* for debugging ARM communication: */
//#define COM_DEBUG
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index 432789a3c312..a851ba328e4a 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -37,12 +37,10 @@ void av7110_ir_handler(struct av7110 *av7110, u32 ircom)
proto = RC_PROTO_RC5;
break;
- case IR_RCMM: /* RCMM: ? bits device address, ? bits command */
- command = ircom & 0xff;
- addr = (ircom >> 8) & 0x1f;
- scancode = ircom;
+ case IR_RCMM: /* RCMM: 32 bits scancode */
+ scancode = ircom & ~0x8000;
toggle = ircom & 0x8000;
- proto = RC_PROTO_UNKNOWN;
+ proto = RC_PROTO_RCMM32;
break;
case IR_RC5_EXT:
@@ -83,9 +81,9 @@ static int change_protocol(struct rc_dev *rcdev, u64 *rc_type)
struct av7110 *av7110 = rcdev->priv;
u32 ir_config;
- if (*rc_type & RC_PROTO_BIT_UNKNOWN) {
+ if (*rc_type & RC_PROTO_BIT_RCMM32) {
ir_config = IR_RCMM;
- *rc_type = RC_PROTO_UNKNOWN;
+ *rc_type = RC_PROTO_BIT_RCMM32;
} else if (*rc_type & RC_PROTO_BIT_RC5) {
if (FW_VERSION(av7110->arm_app) >= 0x2620)
ir_config = IR_RC5_EXT;
@@ -133,7 +131,7 @@ int av7110_ir_init(struct av7110 *av7110)
}
rcdev->dev.parent = &pci->dev;
- rcdev->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_UNKNOWN;
+ rcdev->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RCMM32;
rcdev->change_protocol = change_protocol;
rcdev->map_name = RC_MAP_HAUPPAUGE;
rcdev->priv = av7110;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 8e0952d65ad4..2fb82d50c53e 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -34,53 +34,43 @@
*/
static const struct tw68_format formats[] = {
{
- .name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = 16,
.twformat = ColorFormatRGB15,
}, {
- .name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.depth = 16,
.twformat = ColorFormatRGB15 | ColorFormatBSWAP,
}, {
- .name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.twformat = ColorFormatRGB16,
}, {
- .name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.twformat = ColorFormatRGB16 | ColorFormatBSWAP,
}, {
- .name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.depth = 24,
.twformat = ColorFormatRGB24,
}, {
- .name = "24 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 24,
.twformat = ColorFormatRGB24 | ColorFormatBSWAP,
}, {
- .name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.depth = 32,
.twformat = ColorFormatRGB32,
}, {
- .name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.twformat = ColorFormatRGB32 | ColorFormatBSWAP |
ColorFormatWSWAP,
}, {
- .name = "4:2:2 packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.twformat = ColorFormatYUY2,
}, {
- .name = "4:2:2 packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.twformat = ColorFormatYUY2 | ColorFormatBSWAP,
@@ -592,7 +582,6 @@ static int tw68_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
return 0;
}
@@ -774,9 +763,6 @@ static int tw68_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index >= FORMATS)
return -EINVAL;
- strscpy(f->description, formats[f->index].name,
- sizeof(f->description));
-
f->pixelformat = formats[f->index].fourcc;
return 0;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 7021290d726a..a1f422d6e600 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -85,7 +85,6 @@ struct tw68_tvnorm {
};
struct tw68_format {
- char *name;
u32 fourcc;
u32 depth;
u32 twformat;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 8a19654b393a..83a785010753 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -16,7 +16,7 @@ source "drivers/media/platform/marvell-ccic/Kconfig"
config VIDEO_VIA_CAMERA
tristate "VIAFB camera controller support"
depends on FB_VIA
- select VIDEOBUF_DMA_SG
+ select VIDEOBUF2_DMA_SG
select VIDEO_OV7670
help
Driver support for the integrated camera controller in VIA
@@ -121,7 +121,7 @@ config VIDEO_S3C_CAMIF
config VIDEO_STM32_DCMI
tristate "STM32 Digital Camera Memory Interface (DCMI) support"
- depends on VIDEO_V4L2 && OF
+ depends on VIDEO_V4L2 && OF && MEDIA_CONTROLLER
depends on ARCH_STM32 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
@@ -146,7 +146,7 @@ source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
source "drivers/media/platform/rcar-vin/Kconfig"
source "drivers/media/platform/atmel/Kconfig"
-source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
+source "drivers/media/platform/sunxi/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 7cbbd925124c..6ee7eb0d36f4 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -100,4 +100,4 @@ obj-y += meson/
obj-y += cros-ec-cec/
-obj-$(CONFIG_VIDEO_SUN6I_CSI) += sunxi/sun6i-csi/
+obj-y += sunxi/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index fe7b937eb5f2..2b42ba1f5949 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -76,7 +76,6 @@ struct bus_format {
/*
* struct vpfe_fmt - VPFE media bus format information
- * @name: V4L2 format description
* @code: V4L2 media bus format code
* @shifted: V4L2 media bus format code for the same pixel layout but
* shifted to be 8 bits per pixel. =0 if format is not shiftable.
@@ -86,7 +85,6 @@ struct bus_format {
* @supported: Indicates format supported by subdev
*/
struct vpfe_fmt {
- const char *name;
u32 fourcc;
u32 code;
struct bus_format l;
@@ -97,7 +95,6 @@ struct vpfe_fmt {
static struct vpfe_fmt formats[] = {
{
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
.l.width = 10,
@@ -106,7 +103,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 2,
.supported = false,
}, {
- .name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
.l.width = 10,
@@ -115,7 +111,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 2,
.supported = false,
}, {
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
.l.width = 10,
@@ -124,7 +119,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 2,
.supported = false,
}, {
- .name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
.l.width = 10,
@@ -133,7 +127,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 2,
.supported = false,
}, {
- .name = "RAW8 BGGR",
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.l.width = 10,
@@ -142,7 +135,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 1,
.supported = false,
}, {
- .name = "RAW8 GBRG",
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
.l.width = 10,
@@ -151,7 +143,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 1,
.supported = false,
}, {
- .name = "RAW8 GRBG",
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
.l.width = 10,
@@ -160,7 +151,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 1,
.supported = false,
}, {
- .name = "RAW8 RGGB",
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
.l.width = 10,
@@ -169,7 +159,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 1,
.supported = false,
}, {
- .name = "RGB565 (LE)",
.fourcc = V4L2_PIX_FMT_RGB565,
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.l.width = 10,
@@ -178,7 +167,6 @@ static struct vpfe_fmt formats[] = {
.s.bpp = 2,
.supported = false,
}, {
- .name = "RGB565 (BE)",
.fourcc = V4L2_PIX_FMT_RGB565X,
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
.l.width = 10,
@@ -1412,10 +1400,6 @@ static int vpfe_querycap(struct file *file, void *priv,
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", vpfe->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1540,12 +1524,10 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
- f->type = vpfe->fmt.type;
- vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
- f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
+ vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s\n",
+ f->index, fmt->code, print_fourcc(fmt->fourcc));
return 0;
}
@@ -2393,6 +2375,8 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &vpfe->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
video_set_drvdata(vdev, vpfe);
err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
if (err) {
@@ -2505,10 +2489,9 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
&vpfe->notifier, of_fwnode_handle(rem),
sizeof(struct v4l2_async_subdev));
- if (IS_ERR(pdata->asd[i])) {
- of_node_put(rem);
+ of_node_put(rem);
+ if (IS_ERR(pdata->asd[i]))
goto cleanup;
- }
}
of_node_put(endpoint);
@@ -2557,7 +2540,6 @@ static int vpfe_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
ret = -ENODEV;
goto probe_out_cleanup;
}
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 17d7aa426788..4678285f34c6 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -65,12 +65,6 @@ struct vpfe_hw_if_param {
#define VPFE_MAX_SUBDEV 1
#define VPFE_MAX_INPUTS 1
-struct vpfe_pixel_format {
- struct v4l2_fmtdesc fmtdesc;
- /* bytes per pixel */
- int bpp;
-};
-
struct vpfe_std_info {
int active_pixels;
int active_lines;
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
index 4a0ed29723e8..0746c48ec23f 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -66,13 +66,13 @@
#define VPFE_PIX_FMT_MASK 3
#define VPFE_PIX_FMT_SHIFT 12
#define VPFE_VP2SDR_DISABLE 0xfffbffff
-#define VPFE_WEN_ENABLE (1 << 17)
+#define VPFE_WEN_ENABLE BIT(17)
#define VPFE_SDR2RSZ_DISABLE 0xfff7ffff
-#define VPFE_VDHDEN_ENABLE (1 << 16)
-#define VPFE_LPF_ENABLE (1 << 14)
-#define VPFE_ALAW_ENABLE (1 << 3)
+#define VPFE_VDHDEN_ENABLE BIT(16)
+#define VPFE_LPF_ENABLE BIT(14)
+#define VPFE_ALAW_ENABLE BIT(3)
#define VPFE_ALAW_GAMMA_WD_MASK 7
-#define VPFE_BLK_CLAMP_ENABLE (1 << 31)
+#define VPFE_BLK_CLAMP_ENABLE BIT(31)
#define VPFE_BLK_SGAIN_MASK 0x1f
#define VPFE_BLK_ST_PXL_MASK 0x7fff
#define VPFE_BLK_ST_PXL_SHIFT 10
@@ -85,8 +85,8 @@
#define VPFE_BLK_COMP_GB_COMP_SHIFT 8
#define VPFE_BLK_COMP_GR_COMP_SHIFT 16
#define VPFE_BLK_COMP_R_COMP_SHIFT 24
-#define VPFE_LATCH_ON_VSYNC_DISABLE (1 << 15)
-#define VPFE_DATA_PACK_ENABLE (1 << 11)
+#define VPFE_LATCH_ON_VSYNC_DISABLE BIT(15)
+#define VPFE_DATA_PACK_ENABLE BIT(11)
#define VPFE_HORZ_INFO_SPH_SHIFT 16
#define VPFE_VERT_START_SLV0_SHIFT 16
#define VPFE_VDINT_VDINT0_SHIFT 16
@@ -114,15 +114,15 @@
#define VPFE_SYN_FLDMODE_MASK 1
#define VPFE_SYN_FLDMODE_SHIFT 7
#define VPFE_REC656IF_BT656_EN 3
-#define VPFE_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define VPFE_SYN_MODE_VD_POL_NEGATIVE BIT(2)
#define VPFE_CCDCFG_Y8POS_SHIFT 11
-#define VPFE_CCDCFG_BW656_10BIT (1 << 5)
+#define VPFE_CCDCFG_BW656_10BIT BIT(5)
#define VPFE_SDOFST_FIELD_INTERLEAVED 0x249
#define VPFE_NO_CULLING 0xffff00ff
-#define VPFE_VDINT0 (1 << 0)
-#define VPFE_VDINT1 (1 << 1)
-#define VPFE_VDINT2 (1 << 2)
-#define VPFE_DMA_CNTL_OVERFLOW (1 << 31)
+#define VPFE_VDINT0 BIT(0)
+#define VPFE_VDINT1 BIT(1)
+#define VPFE_VDINT2 BIT(2)
+#define VPFE_DMA_CNTL_OVERFLOW BIT(31)
#define VPFE_CONFIG_PCLK_INV_SHIFT 0
#define VPFE_CONFIG_PCLK_INV_MASK 1
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index f899ac3b4a61..eb12f3793062 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -630,7 +630,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
}
if (hsync_counter < 0 || vsync_counter < 0) {
- u32 ctrl;
+ u32 ctrl = 0;
if (hsync_counter < 0) {
ctrl = VE_CTRL_HSYNC_POL;
@@ -650,7 +650,8 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
V4L2_DV_VSYNC_POS_POL;
}
- aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ if (ctrl)
+ aspeed_video_update(video, VE_CTRL, 0, ctrl);
}
}
@@ -1624,6 +1625,7 @@ static int aspeed_video_init(struct aspeed_video *video)
if (!aspeed_video_alloc_buf(video, &video->jpeg,
VE_JPEG_HEADER_SIZE)) {
dev_err(dev, "Failed to allocate DMA for JPEG header\n");
+ rc = -ENOMEM;
goto err_release_reserved_mem;
}
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index d7d94c1a39d3..428f117caa59 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -493,7 +493,7 @@ static void stop_streaming(struct vb2_queue *vq)
spin_unlock_irq(&isi->irqlock);
if (!isi->enable_preview_path) {
- timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+ timeout = jiffies + (FRAME_INTERVAL_MILLI_SEC * HZ) / 1000;
/* Wait until the end of the current frame. */
while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
time_before(jiffies, timeout))
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index 266df14da2d5..78381651238d 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -160,11 +160,8 @@ static int atmel_isc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- dev_err(dev, "failed to get irq: %d\n", ret);
- return ret;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, isc_interrupt, 0,
ATMEL_ISC_NAME, isc);
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index 5042d053b94e..e4d08acfbb49 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -2,7 +2,7 @@
/*
* Driver for Cadence MIPI-CSI2 TX Controller
*
- * Copyright (C) 2017-2018 Cadence Design Systems Inc.
+ * Copyright (C) 2017-2019 Cadence Design Systems Inc.
*/
#include <linux/clk.h>
@@ -52,6 +52,17 @@
#define CSI2TX_STREAM_IF_CFG_REG(n) (0x100 + (n) * 4)
#define CSI2TX_STREAM_IF_CFG_FILL_LEVEL(n) ((n) & 0x1f)
+/* CSI2TX V2 Registers */
+#define CSI2TX_V2_DPHY_CFG_REG 0x28
+#define CSI2TX_V2_DPHY_CFG_RESET BIT(16)
+#define CSI2TX_V2_DPHY_CFG_CLOCK_MODE BIT(10)
+#define CSI2TX_V2_DPHY_CFG_MODE_MASK GENMASK(9, 8)
+#define CSI2TX_V2_DPHY_CFG_MODE_LPDT (2 << 8)
+#define CSI2TX_V2_DPHY_CFG_MODE_HS (1 << 8)
+#define CSI2TX_V2_DPHY_CFG_MODE_ULPS (0 << 8)
+#define CSI2TX_V2_DPHY_CFG_CLK_ENABLE BIT(4)
+#define CSI2TX_V2_DPHY_CFG_LANE_ENABLE(n) BIT(n)
+
#define CSI2TX_LANES_MAX 4
#define CSI2TX_STREAMS_MAX 4
@@ -70,6 +81,13 @@ struct csi2tx_fmt {
u32 bpp;
};
+struct csi2tx_priv;
+
+/* CSI2TX Variant Operations */
+struct csi2tx_vops {
+ void (*dphy_setup)(struct csi2tx_priv *csi2tx);
+};
+
struct csi2tx_priv {
struct device *dev;
unsigned int count;
@@ -82,6 +100,8 @@ struct csi2tx_priv {
void __iomem *base;
+ struct csi2tx_vops *vops;
+
struct clk *esc_clk;
struct clk *p_clk;
struct clk *pixel_clk[CSI2TX_STREAMS_MAX];
@@ -209,53 +229,92 @@ static const struct v4l2_subdev_pad_ops csi2tx_pad_ops = {
.set_fmt = csi2tx_set_pad_format,
};
-static void csi2tx_reset(struct csi2tx_priv *csi2tx)
+/* Set Wake Up value in the D-PHY */
+static void csi2tx_dphy_set_wakeup(struct csi2tx_priv *csi2tx)
{
- writel(CSI2TX_CONFIG_SRST_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
-
- udelay(10);
+ writel(CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(32),
+ csi2tx->base + CSI2TX_DPHY_CLK_WAKEUP_REG);
}
-static int csi2tx_start(struct csi2tx_priv *csi2tx)
+/*
+ * Finishes the D-PHY initialization
+ * reg dphy cfg value to be used
+ */
+static void csi2tx_dphy_init_finish(struct csi2tx_priv *csi2tx, u32 reg)
{
- struct media_entity *entity = &csi2tx->subdev.entity;
- struct media_link *link;
unsigned int i;
- u32 reg;
- csi2tx_reset(csi2tx);
+ udelay(10);
- writel(CSI2TX_CONFIG_CFG_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
+ /* Enable our (clock and data) lanes */
+ reg |= CSI2TX_DPHY_CFG_CLK_ENABLE;
+ for (i = 0; i < csi2tx->num_lanes; i++)
+ reg |= CSI2TX_DPHY_CFG_LANE_ENABLE(csi2tx->lanes[i] - 1);
+ writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG);
udelay(10);
- /* Configure our PPI interface with the D-PHY */
- writel(CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(32),
- csi2tx->base + CSI2TX_DPHY_CLK_WAKEUP_REG);
+ /* Switch to HS mode */
+ reg &= ~CSI2TX_DPHY_CFG_MODE_MASK;
+ writel(reg | CSI2TX_DPHY_CFG_MODE_HS,
+ csi2tx->base + CSI2TX_DPHY_CFG_REG);
+}
+
+/* Configures D-PHY in CSIv1.3 */
+static void csi2tx_dphy_setup(struct csi2tx_priv *csi2tx)
+{
+ u32 reg;
+ unsigned int i;
+
+ csi2tx_dphy_set_wakeup(csi2tx);
/* Put our lanes (clock and data) out of reset */
reg = CSI2TX_DPHY_CFG_CLK_RESET | CSI2TX_DPHY_CFG_MODE_LPDT;
for (i = 0; i < csi2tx->num_lanes; i++)
- reg |= CSI2TX_DPHY_CFG_LANE_RESET(csi2tx->lanes[i]);
+ reg |= CSI2TX_DPHY_CFG_LANE_RESET(csi2tx->lanes[i] - 1);
writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG);
- udelay(10);
+ csi2tx_dphy_init_finish(csi2tx, reg);
+}
- /* Enable our (clock and data) lanes */
- reg |= CSI2TX_DPHY_CFG_CLK_ENABLE;
- for (i = 0; i < csi2tx->num_lanes; i++)
- reg |= CSI2TX_DPHY_CFG_LANE_ENABLE(csi2tx->lanes[i]);
- writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG);
+/* Configures D-PHY in CSIv2 */
+static void csi2tx_v2_dphy_setup(struct csi2tx_priv *csi2tx)
+{
+ u32 reg;
+
+ csi2tx_dphy_set_wakeup(csi2tx);
+
+ /* Put our lanes (clock and data) out of reset */
+ reg = CSI2TX_V2_DPHY_CFG_RESET | CSI2TX_V2_DPHY_CFG_MODE_LPDT;
+ writel(reg, csi2tx->base + CSI2TX_V2_DPHY_CFG_REG);
+
+ csi2tx_dphy_init_finish(csi2tx, reg);
+}
+
+static void csi2tx_reset(struct csi2tx_priv *csi2tx)
+{
+ writel(CSI2TX_CONFIG_SRST_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
udelay(10);
+}
- /* Switch to HS mode */
- reg &= ~CSI2TX_DPHY_CFG_MODE_MASK;
- writel(reg | CSI2TX_DPHY_CFG_MODE_HS,
- csi2tx->base + CSI2TX_DPHY_CFG_REG);
+static int csi2tx_start(struct csi2tx_priv *csi2tx)
+{
+ struct media_entity *entity = &csi2tx->subdev.entity;
+ struct media_link *link;
+ unsigned int i;
+
+ csi2tx_reset(csi2tx);
+
+ writel(CSI2TX_CONFIG_CFG_REQ, csi2tx->base + CSI2TX_CONFIG_REG);
udelay(10);
+ if (csi2tx->vops && csi2tx->vops->dphy_setup) {
+ csi2tx->vops->dphy_setup(csi2tx);
+ udelay(10);
+ }
+
/*
* Create a static mapping between the CSI virtual channels
* and the input streams.
@@ -434,7 +493,7 @@ static int csi2tx_check_lanes(struct csi2tx_priv *csi2tx)
{
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
struct device_node *ep;
- int ret;
+ int ret, i;
ep = of_graph_get_endpoint_by_regs(csi2tx->dev->of_node, 0, 0);
if (!ep)
@@ -461,6 +520,15 @@ static int csi2tx_check_lanes(struct csi2tx_priv *csi2tx)
goto out;
}
+ for (i = 0; i < csi2tx->num_lanes; i++) {
+ if (v4l2_ep.bus.mipi_csi2.data_lanes[i] < 1) {
+ dev_err(csi2tx->dev, "Invalid lane[%d] number: %u\n",
+ i, v4l2_ep.bus.mipi_csi2.data_lanes[i]);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
memcpy(csi2tx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
sizeof(csi2tx->lanes));
@@ -469,9 +537,35 @@ out:
return ret;
}
+static const struct csi2tx_vops csi2tx_vops = {
+ .dphy_setup = csi2tx_dphy_setup,
+};
+
+static const struct csi2tx_vops csi2tx_v2_vops = {
+ .dphy_setup = csi2tx_v2_dphy_setup,
+};
+
+static const struct of_device_id csi2tx_of_table[] = {
+ {
+ .compatible = "cdns,csi2tx",
+ .data = &csi2tx_vops
+ },
+ {
+ .compatible = "cdns,csi2tx-1.3",
+ .data = &csi2tx_vops
+ },
+ {
+ .compatible = "cdns,csi2tx-2.1",
+ .data = &csi2tx_v2_vops
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, csi2tx_of_table);
+
static int csi2tx_probe(struct platform_device *pdev)
{
struct csi2tx_priv *csi2tx;
+ const struct of_device_id *of_id;
unsigned int i;
int ret;
@@ -486,6 +580,9 @@ static int csi2tx_probe(struct platform_device *pdev)
if (ret)
goto err_free_priv;
+ of_id = of_match_node(csi2tx_of_table, pdev->dev.of_node);
+ csi2tx->vops = (struct csi2tx_vops *)of_id->data;
+
v4l2_subdev_init(&csi2tx->subdev, &csi2tx_subdev_ops);
csi2tx->subdev.owner = THIS_MODULE;
csi2tx->subdev.dev = &pdev->dev;
@@ -543,12 +640,6 @@ static int csi2tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id csi2tx_of_table[] = {
- { .compatible = "cdns,csi2tx" },
- { },
-};
-MODULE_DEVICE_TABLE(of, csi2tx_of_table);
-
static struct platform_driver csi2tx_driver = {
.probe = csi2tx_probe,
.remove = csi2tx_remove,
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 01428de2596e..73222c0615c0 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -390,9 +390,6 @@ static int coda_querycap(struct file *file, void *priv,
strscpy(cap->card, coda_product_name(ctx->dev->devtype->product),
sizeof(cap->card));
strscpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -2699,6 +2696,7 @@ static int coda_register_device(struct coda_dev *dev, int i)
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
video_set_drvdata(vfd, dev);
/* Not applicable, use the selection API instead */
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 068df9888dbf..76ab83f55cc0 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -206,10 +206,10 @@ static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
*/
struct cec_dmi_match {
- char *sys_vendor;
- char *product_name;
- char *devname;
- char *conn;
+ const char *sys_vendor;
+ const char *product_name;
+ const char *devname;
+ const char *conn;
};
static const struct cec_dmi_match cec_dmi_match_table[] = {
@@ -217,8 +217,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Fizz", "0000:00:02.0", "Port B" },
};
-static int cros_ec_cec_get_notifier(struct device *dev,
- struct cec_notifier **notify)
+static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
+ const char **conn)
{
int i;
@@ -233,26 +233,25 @@ static int cros_ec_cec_get_notifier(struct device *dev,
d = bus_find_device_by_name(&pci_bus_type, NULL,
m->devname);
if (!d)
- return -EPROBE_DEFER;
-
- *notify = cec_notifier_get_conn(d, m->conn);
+ return ERR_PTR(-EPROBE_DEFER);
put_device(d);
- return 0;
+ *conn = m->conn;
+ return d;
}
}
/* Hardware support must be added in the cec_dmi_match_table */
dev_warn(dev, "CEC notifier not configured for this hardware\n");
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
#else
-static int cros_ec_cec_get_notifier(struct device *dev,
- struct cec_notifier **notify)
+static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
+ const char **conn)
{
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
#endif
@@ -262,8 +261,14 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
struct cros_ec_device *cros_ec = ec_dev->ec_dev;
struct cros_ec_cec *cros_ec_cec;
+ struct device *hdmi_dev;
+ const char *conn = NULL;
int ret;
+ hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conn);
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
+
cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec),
GFP_KERNEL);
if (!cros_ec_cec)
@@ -272,10 +277,6 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cros_ec_cec);
cros_ec_cec->cros_ec = cros_ec;
- ret = cros_ec_cec_get_notifier(&pdev->dev, &cros_ec_cec->notify);
- if (ret)
- return ret;
-
ret = device_init_wakeup(&pdev->dev, 1);
if (ret) {
dev_err(&pdev->dev, "failed to initialize wakeup\n");
@@ -283,29 +284,39 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
}
cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
- DRV_NAME, CEC_CAP_DEFAULTS, 1);
+ DRV_NAME,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
if (IS_ERR(cros_ec_cec->adap))
return PTR_ERR(cros_ec_cec->adap);
+ cros_ec_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, conn,
+ cros_ec_cec->adap);
+ if (!cros_ec_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_adapter;
+ }
+
/* Get CEC events from the EC. */
cros_ec_cec->notifier.notifier_call = cros_ec_cec_event;
ret = blocking_notifier_chain_register(&cros_ec->event_notifier,
&cros_ec_cec->notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register notifier\n");
- cec_delete_adapter(cros_ec_cec->adap);
- return ret;
+ goto out_probe_notify;
}
ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
- if (ret < 0) {
- cec_delete_adapter(cros_ec_cec->adap);
- return ret;
- }
-
- cec_register_cec_notifier(cros_ec_cec->adap, cros_ec_cec->notify);
+ if (ret < 0)
+ goto out_probe_notify;
return 0;
+
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+out_probe_adapter:
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
}
static int cros_ec_cec_remove(struct platform_device *pdev)
@@ -323,11 +334,9 @@ static int cros_ec_cec_remove(struct platform_device *pdev)
return ret;
}
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
cec_unregister_adapter(cros_ec_cec->adap);
- if (cros_ec_cec->notify)
- cec_notifier_put(cros_ec_cec->notify);
-
return 0;
}
diff --git a/drivers/media/platform/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
index 3ae301320313..c4894f6a254e 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/platform/davinci/dm644x_ccdc_regs.h
@@ -66,13 +66,13 @@
#define CCDC_PIX_FMT_MASK 3
#define CCDC_PIX_FMT_SHIFT 12
#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF
-#define CCDC_WEN_ENABLE (1 << 17)
+#define CCDC_WEN_ENABLE BIT(17)
#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF
-#define CCDC_VDHDEN_ENABLE (1 << 16)
-#define CCDC_LPF_ENABLE (1 << 14)
-#define CCDC_ALAW_ENABLE (1 << 3)
+#define CCDC_VDHDEN_ENABLE BIT(16)
+#define CCDC_LPF_ENABLE BIT(14)
+#define CCDC_ALAW_ENABLE BIT(3)
#define CCDC_ALAW_GAMMA_WD_MASK 7
-#define CCDC_BLK_CLAMP_ENABLE (1 << 31)
+#define CCDC_BLK_CLAMP_ENABLE BIT(31)
#define CCDC_BLK_SGAIN_MASK 0x1F
#define CCDC_BLK_ST_PXL_MASK 0x7FFF
#define CCDC_BLK_ST_PXL_SHIFT 10
@@ -85,11 +85,11 @@
#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
#define CCDC_BLK_COMP_GR_COMP_SHIFT 16
#define CCDC_BLK_COMP_R_COMP_SHIFT 24
-#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
-#define CCDC_FPC_ENABLE (1 << 15)
+#define CCDC_LATCH_ON_VSYNC_DISABLE BIT(15)
+#define CCDC_FPC_ENABLE BIT(15)
#define CCDC_FPC_DISABLE 0
#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
-#define CCDC_DATA_PACK_ENABLE (1 << 11)
+#define CCDC_DATA_PACK_ENABLE BIT(11)
#define CCDC_FMTCFG_VPIN_MASK 7
#define CCDC_FMTCFG_VPIN_SHIFT 12
#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
@@ -132,9 +132,9 @@
#define CCDC_SYN_FLDMODE_MASK 1
#define CCDC_SYN_FLDMODE_SHIFT 7
#define CCDC_REC656IF_BT656_EN 3
-#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define CCDC_SYN_MODE_VD_POL_NEGATIVE BIT(2)
#define CCDC_CCDCFG_Y8POS_SHIFT 11
-#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
+#define CCDC_CCDCFG_BW656_10BIT BIT(5)
#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
#define CCDC_NO_CULLING 0xffff00ff
#endif
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 000b191c42d8..ae419958e420 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -19,10 +19,6 @@
#include <asm/pgtable.h>
-#ifdef CONFIG_ARCH_DAVINCI
-#include <mach/cputype.h>
-#endif
-
#include <media/v4l2-dev.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -633,8 +629,6 @@ static int vpbe_display_querycap(struct file *file, void *priv,
struct vpbe_layer *layer = video_drvdata(file);
struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
snprintf(cap->driver, sizeof(cap->driver), "%s",
dev_name(vpbe_dev->pdev));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
@@ -792,7 +786,6 @@ static int vpbe_display_enum_fmt(struct file *file, void *priv,
{
struct vpbe_layer *layer = video_drvdata(file);
struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- unsigned int index = 0;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_ENUM_FMT, layer id = %d\n",
@@ -803,19 +796,10 @@ static int vpbe_display_enum_fmt(struct file *file, void *priv,
}
/* Fill in the information about format */
- index = fmt->index;
- memset(fmt, 0, sizeof(*fmt));
- fmt->index = index;
- fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- if (index == 0) {
- strscpy(fmt->description, "YUV 4:2:2 - UYVY",
- sizeof(fmt->description));
+ if (fmt->index == 0)
fmt->pixelformat = V4L2_PIX_FMT_UYVY;
- } else {
- strscpy(fmt->description, "Y/CbCr 4:2:0",
- sizeof(fmt->description));
+ else
fmt->pixelformat = V4L2_PIX_FMT_NV12;
- }
return 0;
}
@@ -1319,6 +1303,7 @@ static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
vbd->lock = &vpbe_display_layer->opslock;
vbd->vfl_dir = VFL_DIR_TX;
+ vbd->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
if (disp_dev->vpbe_dev->current_timings.timings_type &
VPBE_ENC_STD)
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 491842ef33c5..91b571a0ac2c 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -16,11 +16,6 @@
#include <linux/clk.h>
#include <linux/slab.h>
-#ifdef CONFIG_ARCH_DAVINCI
-#include <mach/cputype.h>
-#include <mach/hardware.h>
-#endif
-
#include <media/davinci/vpss.h>
#include <media/v4l2-device.h>
#include <media/davinci/vpbe_types.h>
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 425f91f07165..8caa084e5704 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -14,11 +14,6 @@
#include <linux/videodev2.h>
#include <linux/slab.h>
-#ifdef CONFIG_ARCH_DAVINCI
-#include <mach/hardware.h>
-#include <mach/mux.h>
-#endif
-
#include <linux/platform_data/i2c-davinci.h>
#include <linux/io.h>
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 295fbf1a49cf..916ed743d716 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -119,57 +119,27 @@ static const struct vpfe_standard vpfe_standards[] = {
/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
{
- .fmtdesc = {
- .index = 0,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "Bayer GrRBGb 8bit A-Law compr.",
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- },
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
.bpp = 1,
},
{
- .fmtdesc = {
- .index = 1,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "Bayer GrRBGb - 16bit",
- .pixelformat = V4L2_PIX_FMT_SBGGR16,
- },
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
.bpp = 2,
},
{
- .fmtdesc = {
- .index = 2,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "Bayer GrRBGb 8bit DPCM compr.",
- .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
- },
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
.bpp = 1,
},
{
- .fmtdesc = {
- .index = 3,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "YCbCr 4:2:2 Interleaved UYVY",
- .pixelformat = V4L2_PIX_FMT_UYVY,
- },
+ .pixelformat = V4L2_PIX_FMT_UYVY,
.bpp = 2,
},
{
- .fmtdesc = {
- .index = 4,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "YCbCr 4:2:2 Interleaved YUYV",
- .pixelformat = V4L2_PIX_FMT_YUYV,
- },
+ .pixelformat = V4L2_PIX_FMT_YUYV,
.bpp = 2,
},
{
- .fmtdesc = {
- .index = 5,
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .description = "Y/CbCr 4:2:0 - Semi planar",
- .pixelformat = V4L2_PIX_FMT_NV12,
- },
+ .pixelformat = V4L2_PIX_FMT_NV12,
.bpp = 1,
},
};
@@ -183,7 +153,7 @@ static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
int i;
for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
- if (pix_format == vpfe_pix_fmts[i].fmtdesc.pixelformat)
+ if (pix_format == vpfe_pix_fmts[i].pixelformat)
return &vpfe_pix_fmts[i];
}
return NULL;
@@ -782,7 +752,7 @@ static const struct vpfe_pixel_format *
temp = 0;
found = 0;
while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
- if (vpfe_pix_fmt->fmtdesc.pixelformat == pix) {
+ if (vpfe_pix_fmt->pixelformat == pix) {
found = 1;
break;
}
@@ -877,8 +847,6 @@ static int vpfe_querycap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
strscpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
strscpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
@@ -901,7 +869,6 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
const struct vpfe_pixel_format *pix_fmt;
- int temp_index;
u32 pix;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
@@ -912,9 +879,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
/* Fill in the information about format */
pix_fmt = vpfe_lookup_pix_format(pix);
if (pix_fmt) {
- temp_index = fmt->index;
- *fmt = pix_fmt->fmtdesc;
- fmt->index = temp_index;
+ fmt->pixelformat = fmt->pixelformat;
return 0;
}
return -EINVAL;
@@ -1785,6 +1750,7 @@ static int vpfe_probe(struct platform_device *pdev)
vfd->ioctl_ops = &vpfe_ioctl_ops;
vfd->tvnorms = 0;
vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
snprintf(vfd->name, sizeof(vfd->name),
"%s_V%d.%d.%d",
CAPTURE_DRV_NAME,
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index f0f7ef638c56..71f4fe882d13 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -938,17 +938,10 @@ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
}
/* Fill in the information about format */
- if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) {
- fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- strscpy(fmt->description, "Raw Mode -Bayer Pattern GrRBGb",
- sizeof(fmt->description));
+ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
- } else {
- fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- strscpy(fmt->description, "YCbCr4:2:2 Semi-Planar",
- sizeof(fmt->description));
+ else
fmt->pixelformat = V4L2_PIX_FMT_NV16;
- }
return 0;
}
@@ -979,7 +972,6 @@ static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
pixfmt->bytesperline = common->fmt.fmt.pix.width * 2;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
}
- pixfmt->priv = 0;
dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d pixelformat=0x%08x, field=%d, size=%d\n", __func__,
pixfmt->width, pixfmt->height,
@@ -1085,8 +1077,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_capture_config *config = vpif_dev->platform_data;
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(vpif_dev));
@@ -1473,6 +1463,7 @@ static int vpif_probe_complete(void)
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &common->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev,
VFL_TYPE_GRABBER, (j ? 1 : 0));
@@ -1511,6 +1502,7 @@ static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev)
{
struct device_node *endpoint = NULL;
+ struct device_node *rem = NULL;
struct vpif_capture_config *pdata;
struct vpif_subdev_info *sdinfo;
struct vpif_capture_chan_config *chan;
@@ -1541,7 +1533,6 @@ vpif_capture_get_pdata(struct platform_device *pdev)
for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) {
struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
- struct device_node *rem;
unsigned int flags;
int err;
@@ -1554,7 +1545,6 @@ vpif_capture_get_pdata(struct platform_device *pdev)
if (!rem) {
dev_dbg(&pdev->dev, "Remote device at %pOF not found\n",
endpoint);
- of_node_put(endpoint);
goto done;
}
@@ -1564,11 +1554,8 @@ vpif_capture_get_pdata(struct platform_device *pdev)
VPIF_CAPTURE_NUM_CHANNELS,
sizeof(*chan->inputs),
GFP_KERNEL);
- if (!chan->inputs) {
- of_node_put(rem);
- of_node_put(endpoint);
+ if (!chan->inputs)
goto err_cleanup;
- }
chan->input_count++;
chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA;
@@ -1577,7 +1564,6 @@ vpif_capture_get_pdata(struct platform_device *pdev)
err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
&bus_cfg);
- of_node_put(endpoint);
if (err) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
of_node_put(rem);
@@ -1601,13 +1587,14 @@ vpif_capture_get_pdata(struct platform_device *pdev)
pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
&vpif_obj.notifier, of_fwnode_handle(rem),
sizeof(struct v4l2_async_subdev));
- if (IS_ERR(pdata->asd[i])) {
- of_node_put(rem);
+ if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
- }
+
+ of_node_put(rem);
}
done:
+ of_node_put(endpoint);
pdata->asd_sizes[0] = i;
pdata->subdev_count = i;
pdata->card_name = "DA850/OMAP-L138 Video Capture";
@@ -1615,6 +1602,8 @@ done:
return pdata;
err_cleanup:
+ of_node_put(rem);
+ of_node_put(endpoint);
v4l2_async_notifier_cleanup(&vpif_obj.notifier);
return NULL;
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index a69897c68a50..abbdbac08e6f 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -584,8 +584,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_display_config *config = vpif_dev->platform_data;
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(vpif_dev));
@@ -601,11 +599,7 @@ static int vpif_enum_fmt_vid_out(struct file *file, void *priv,
return -EINVAL;
/* Fill in the information about format */
- fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- strscpy(fmt->description, "YCbCr4:2:2 YC Planar",
- sizeof(fmt->description));
fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
- fmt->flags = 0;
return 0;
}
@@ -1218,6 +1212,7 @@ static int vpif_probe_complete(void)
vdev->vfl_dir = VFL_DIR_TX;
vdev->queue = q;
vdev->lock = &common->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
video_set_drvdata(&ch->video_dev, ch);
err = video_register_device(vdev, VFL_TYPE_GRABBER,
(j ? 3 : 2));
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 854869f0024e..f6650b45bc3d 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -27,21 +27,18 @@
static const struct gsc_fmt gsc_formats[] = {
{
- .name = "RGB565",
.pixelformat = V4L2_PIX_FMT_RGB565X,
.depth = { 16 },
.color = GSC_RGB,
.num_planes = 1,
.num_comp = 1,
}, {
- .name = "BGRX-8-8-8-8, 32 bpp",
.pixelformat = V4L2_PIX_FMT_BGR32,
.depth = { 32 },
.color = GSC_RGB,
.num_planes = 1,
.num_comp = 1,
}, {
- .name = "YUV 4:2:2 packed, YCbYCr",
.pixelformat = V4L2_PIX_FMT_YUYV,
.depth = { 16 },
.color = GSC_YUV422,
@@ -51,7 +48,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
- .name = "YUV 4:2:2 packed, CbYCrY",
.pixelformat = V4L2_PIX_FMT_UYVY,
.depth = { 16 },
.color = GSC_YUV422,
@@ -61,7 +57,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
}, {
- .name = "YUV 4:2:2 packed, CrYCbY",
.pixelformat = V4L2_PIX_FMT_VYUY,
.depth = { 16 },
.color = GSC_YUV422,
@@ -71,7 +66,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
}, {
- .name = "YUV 4:2:2 packed, YCrYCb",
.pixelformat = V4L2_PIX_FMT_YVYU,
.depth = { 16 },
.color = GSC_YUV422,
@@ -81,7 +75,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
}, {
- .name = "YUV 4:4:4 planar, YCbYCr",
.pixelformat = V4L2_PIX_FMT_YUV32,
.depth = { 32 },
.color = GSC_YUV444,
@@ -90,7 +83,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 1,
}, {
- .name = "YUV 4:2:2 planar, Y/Cb/Cr",
.pixelformat = V4L2_PIX_FMT_YUV422P,
.depth = { 16 },
.color = GSC_YUV422,
@@ -99,7 +91,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 3,
}, {
- .name = "YUV 4:2:2 planar, Y/CbCr",
.pixelformat = V4L2_PIX_FMT_NV16,
.depth = { 16 },
.color = GSC_YUV422,
@@ -108,7 +99,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
- .name = "YUV 4:2:2 non-contig, Y/CbCr",
.pixelformat = V4L2_PIX_FMT_NV16M,
.depth = { 8, 8 },
.color = GSC_YUV422,
@@ -117,7 +107,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 2,
.num_comp = 2,
}, {
- .name = "YUV 4:2:2 planar, Y/CrCb",
.pixelformat = V4L2_PIX_FMT_NV61,
.depth = { 16 },
.color = GSC_YUV422,
@@ -126,7 +115,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
- .name = "YUV 4:2:2 non-contig, Y/CrCb",
.pixelformat = V4L2_PIX_FMT_NV61M,
.depth = { 8, 8 },
.color = GSC_YUV422,
@@ -135,7 +123,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 2,
.num_comp = 2,
}, {
- .name = "YUV 4:2:0 planar, YCbCr",
.pixelformat = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
.color = GSC_YUV420,
@@ -144,7 +131,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 3,
}, {
- .name = "YUV 4:2:0 planar, YCrCb",
.pixelformat = V4L2_PIX_FMT_YVU420,
.depth = { 12 },
.color = GSC_YUV420,
@@ -154,7 +140,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_comp = 3,
}, {
- .name = "YUV 4:2:0 planar, Y/CbCr",
.pixelformat = V4L2_PIX_FMT_NV12,
.depth = { 12 },
.color = GSC_YUV420,
@@ -163,7 +148,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
- .name = "YUV 4:2:0 planar, Y/CrCb",
.pixelformat = V4L2_PIX_FMT_NV21,
.depth = { 12 },
.color = GSC_YUV420,
@@ -172,7 +156,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
- .name = "YUV 4:2:0 non-contig. 2p, Y/CrCb",
.pixelformat = V4L2_PIX_FMT_NV21M,
.depth = { 8, 4 },
.color = GSC_YUV420,
@@ -181,7 +164,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 2,
.num_comp = 2,
}, {
- .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.pixelformat = V4L2_PIX_FMT_NV12M,
.depth = { 8, 4 },
.color = GSC_YUV420,
@@ -190,7 +172,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 2,
.num_comp = 2,
}, {
- .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
.pixelformat = V4L2_PIX_FMT_YUV420M,
.depth = { 8, 2, 2 },
.color = GSC_YUV420,
@@ -199,7 +180,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 3,
.num_comp = 3,
}, {
- .name = "YUV 4:2:0 non-contig. 3p, Y/Cr/Cb",
.pixelformat = V4L2_PIX_FMT_YVU420M,
.depth = { 8, 2, 2 },
.color = GSC_YUV420,
@@ -208,7 +188,6 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 3,
.num_comp = 3,
}, {
- .name = "YUV 4:2:0 n.c. 2p, Y/CbCr tiled",
.pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
.depth = { 8, 4 },
.color = GSC_YUV420,
@@ -335,7 +314,6 @@ int gsc_enum_fmt(struct v4l2_fmtdesc *f)
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->pixelformat;
return 0;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 772183b090c2..8e5a9acb78aa 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -103,7 +103,6 @@ enum gsc_yuv_fmt {
/**
* struct gsc_fmt - the driver's internal color format data
* @mbus_code: Media Bus pixel code, -1 if not applicable
- * @name: format description
* @pixelformat: the fourcc code for this format, 0 if not applicable
* @yorder: Y/C order
* @corder: Chrominance order control
@@ -114,7 +113,6 @@ enum gsc_yuv_fmt {
*/
struct gsc_fmt {
u32 mbus_code;
- char *name;
u32 pixelformat;
u32 color;
u32 yorder;
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 66510365dd5d..121d609ff856 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -738,10 +738,7 @@ static int fimc_cap_enum_fmt(struct file *file, void *priv,
f->index);
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
- if (fmt->fourcc == MEDIA_BUS_FMT_JPEG_1X8)
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 7006f54bfee2..cde60fbb23a8 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -36,7 +36,6 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
- .name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = FIMC_FMT_RGB565,
@@ -44,7 +43,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = { 32 },
.color = FIMC_FMT_RGB666,
@@ -52,7 +50,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "BGRA8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_BGR32,
.depth = { 32 },
.color = FIMC_FMT_RGB888,
@@ -60,7 +57,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
}, {
- .name = "ARGB1555",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = { 16 },
.color = FIMC_FMT_RGB555,
@@ -68,7 +64,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
}, {
- .name = "ARGB4444",
.fourcc = V4L2_PIX_FMT_RGB444,
.depth = { 16 },
.color = FIMC_FMT_RGB444,
@@ -76,11 +71,9 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
}, {
- .name = "YUV 4:4:4",
.mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
.flags = FMT_FLAGS_WRITEBACK,
}, {
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = { 16 },
.color = FIMC_FMT_YCBYCR422,
@@ -89,7 +82,6 @@ static struct fimc_fmt fimc_formats[] = {
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = { 16 },
.color = FIMC_FMT_CBYCRY422,
@@ -98,7 +90,6 @@ static struct fimc_fmt fimc_formats[] = {
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = { 16 },
.color = FIMC_FMT_CRYCBY422,
@@ -107,7 +98,6 @@ static struct fimc_fmt fimc_formats[] = {
.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = { 16 },
.color = FIMC_FMT_YCRYCB422,
@@ -116,7 +106,6 @@ static struct fimc_fmt fimc_formats[] = {
.mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
.flags = FMT_FLAGS_M2M | FMT_FLAGS_CAM,
}, {
- .name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = { 16 },
.color = FIMC_FMT_YCBYCR422,
@@ -124,7 +113,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = { 16 },
.color = FIMC_FMT_YCBYCR422,
@@ -132,7 +120,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = { 16 },
.color = FIMC_FMT_YCRYCB422,
@@ -140,7 +127,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 planar, YCbCr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
.color = FIMC_FMT_YCBCR420,
@@ -148,7 +134,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = { 12 },
.color = FIMC_FMT_YCBCR420,
@@ -156,7 +141,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -164,7 +148,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 2, 2 },
@@ -172,7 +155,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contig. 2p, tiled",
.fourcc = V4L2_PIX_FMT_NV12MT,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -180,7 +162,6 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "JPEG encoded data",
.fourcc = V4L2_PIX_FMT_JPEG,
.color = FIMC_FMT_JPEG,
.depth = { 8 },
@@ -189,7 +170,6 @@ static struct fimc_fmt fimc_formats[] = {
.mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
.flags = FMT_FLAGS_CAM | FMT_FLAGS_COMPRESSED,
}, {
- .name = "S5C73MX interleaved UYVY/JPEG",
.fourcc = V4L2_PIX_FMT_S5C_UYVY_JPG,
.color = FIMC_FMT_YUYV_JPEG,
.depth = { 8 },
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index e043d55133a3..64148b7e0d98 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -341,7 +341,6 @@ static int fimc_is_alloc_cpu_memory(struct fimc_is *is)
return -ENOMEM;
is->memory.size = FIMC_IS_CPU_MEM_SIZE;
- memset(is->memory.vaddr, 0, is->memory.size);
dev_info(dev, "FIMC-IS CPU memory base: %#x\n", (u32)is->memory.paddr);
@@ -806,6 +805,7 @@ static int fimc_is_probe(struct platform_device *pdev)
return -ENODEV;
is->pmu_regs = of_iomap(node, 0);
+ of_node_put(node);
if (!is->pmu_regs)
return -ENOMEM;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index a75f932a289a..378cc302e1f8 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -362,7 +362,6 @@ static int isp_video_enum_fmt(struct file *file, void *priv,
if (WARN_ON(fmt == NULL))
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 907b83e6649d..cde0d254ec1c 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -33,21 +33,18 @@ module_param_named(debug_isp, fimc_isp_debug, int, S_IRUGO | S_IWUSR);
static const struct fimc_fmt fimc_isp_formats[FIMC_ISP_NUM_FORMATS] = {
{
- .name = "RAW8 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG8,
.depth = { 8 },
.color = FIMC_FMT_RAW8,
.memplanes = 1,
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
}, {
- .name = "RAW10 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG10,
.depth = { 10 },
.color = FIMC_FMT_RAW10,
.memplanes = 1,
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
}, {
- .name = "RAW12 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG12,
.depth = { 12 },
.color = FIMC_FMT_RAW12,
diff --git a/drivers/media/platform/exynos4-is/fimc-lite-reg.h b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
index 48f2cf1148b8..c5656e902750 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite-reg.h
+++ b/drivers/media/platform/exynos4-is/fimc-lite-reg.h
@@ -6,6 +6,8 @@
#ifndef FIMC_LITE_REG_H_
#define FIMC_LITE_REG_H_
+#include <linux/bitops.h>
+
#include "fimc-lite.h"
/* Camera Source size */
@@ -27,27 +29,27 @@
/* User defined formats. x = 0...15 */
#define FLITE_REG_CIGCTRL_USER(x) ((0x30 + x - 1) << 24)
#define FLITE_REG_CIGCTRL_FMT_MASK (0x3f << 24)
-#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE (1 << 21)
-#define FLITE_REG_CIGCTRL_ODMA_DISABLE (1 << 20)
-#define FLITE_REG_CIGCTRL_SWRST_REQ (1 << 19)
-#define FLITE_REG_CIGCTRL_SWRST_RDY (1 << 18)
-#define FLITE_REG_CIGCTRL_SWRST (1 << 17)
-#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR (1 << 15)
-#define FLITE_REG_CIGCTRL_INVPOLPCLK (1 << 14)
-#define FLITE_REG_CIGCTRL_INVPOLVSYNC (1 << 13)
-#define FLITE_REG_CIGCTRL_INVPOLHREF (1 << 12)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE BIT(21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE BIT(20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ BIT(19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY BIT(18)
+#define FLITE_REG_CIGCTRL_SWRST BIT(17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR BIT(15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK BIT(14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC BIT(13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF BIT(12)
/* Interrupts mask bits (1 disables an interrupt) */
-#define FLITE_REG_CIGCTRL_IRQ_LASTEN (1 << 8)
-#define FLITE_REG_CIGCTRL_IRQ_ENDEN (1 << 7)
-#define FLITE_REG_CIGCTRL_IRQ_STARTEN (1 << 6)
-#define FLITE_REG_CIGCTRL_IRQ_OVFEN (1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN BIT(8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN BIT(7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN BIT(6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN BIT(5)
#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK (0xf << 5)
-#define FLITE_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI BIT(3)
/* Image Capture Enable */
#define FLITE_REG_CIIMGCPT 0x08
-#define FLITE_REG_CIIMGCPT_IMGCPTEN (1 << 31)
-#define FLITE_REG_CIIMGCPT_CPT_FREN (1 << 25)
+#define FLITE_REG_CIIMGCPT_IMGCPTEN BIT(31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN BIT(25)
#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT (1 << 18)
#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN (0 << 18)
@@ -56,10 +58,10 @@
/* Camera Window Offset */
#define FLITE_REG_CIWDOFST 0x10
-#define FLITE_REG_CIWDOFST_WINOFSEN (1 << 31)
-#define FLITE_REG_CIWDOFST_CLROVIY (1 << 31)
-#define FLITE_REG_CIWDOFST_CLROVFICB (1 << 15)
-#define FLITE_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FLITE_REG_CIWDOFST_WINOFSEN BIT(31)
+#define FLITE_REG_CIWDOFST_CLROVIY BIT(31)
+#define FLITE_REG_CIWDOFST_CLROVFICB BIT(15)
+#define FLITE_REG_CIWDOFST_CLROVFICR BIT(14)
#define FLITE_REG_CIWDOFST_OFST_MASK ((0x1fff << 16) | 0x1fff)
/* Camera Window Offset2 */
@@ -67,8 +69,8 @@
/* Camera Output DMA Format */
#define FLITE_REG_CIODMAFMT 0x18
-#define FLITE_REG_CIODMAFMT_RAW_CON (1 << 15)
-#define FLITE_REG_CIODMAFMT_PACK12 (1 << 14)
+#define FLITE_REG_CIODMAFMT_RAW_CON BIT(15)
+#define FLITE_REG_CIODMAFMT_PACK12 BIT(14)
#define FLITE_REG_CIODMAFMT_YCBYCR (0 << 4)
#define FLITE_REG_CIODMAFMT_YCRYCB (1 << 4)
#define FLITE_REG_CIODMAFMT_CBYCRY (2 << 4)
@@ -88,34 +90,34 @@
/* Camera Status */
#define FLITE_REG_CISTATUS 0x40
-#define FLITE_REG_CISTATUS_MIPI_VVALID (1 << 22)
-#define FLITE_REG_CISTATUS_MIPI_HVALID (1 << 21)
-#define FLITE_REG_CISTATUS_MIPI_DVALID (1 << 20)
-#define FLITE_REG_CISTATUS_ITU_VSYNC (1 << 14)
-#define FLITE_REG_CISTATUS_ITU_HREFF (1 << 13)
-#define FLITE_REG_CISTATUS_OVFIY (1 << 10)
-#define FLITE_REG_CISTATUS_OVFICB (1 << 9)
-#define FLITE_REG_CISTATUS_OVFICR (1 << 8)
-#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW (1 << 7)
-#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND (1 << 6)
-#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART (1 << 5)
-#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND (1 << 4)
-#define FLITE_REG_CISTATUS_IRQ_CAM (1 << 0)
+#define FLITE_REG_CISTATUS_MIPI_VVALID BIT(22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID BIT(21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID BIT(20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC BIT(14)
+#define FLITE_REG_CISTATUS_ITU_HREFF BIT(13)
+#define FLITE_REG_CISTATUS_OVFIY BIT(10)
+#define FLITE_REG_CISTATUS_OVFICB BIT(9)
+#define FLITE_REG_CISTATUS_OVFICR BIT(8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW BIT(7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND BIT(6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART BIT(5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND BIT(4)
+#define FLITE_REG_CISTATUS_IRQ_CAM BIT(0)
#define FLITE_REG_CISTATUS_IRQ_MASK (0xf << 4)
/* Camera Status2 */
#define FLITE_REG_CISTATUS2 0x44
-#define FLITE_REG_CISTATUS2_LASTCAPEND (1 << 1)
-#define FLITE_REG_CISTATUS2_FRMEND (1 << 0)
+#define FLITE_REG_CISTATUS2_LASTCAPEND BIT(1)
+#define FLITE_REG_CISTATUS2_FRMEND BIT(0)
/* Qos Threshold */
#define FLITE_REG_CITHOLD 0xf0
-#define FLITE_REG_CITHOLD_W_QOS_EN (1 << 30)
+#define FLITE_REG_CITHOLD_W_QOS_EN BIT(30)
/* Camera General Purpose */
#define FLITE_REG_CIGENERAL 0xfc
/* b0: 1 - camera B, 0 - camera A */
-#define FLITE_REG_CIGENERAL_CAM_B (1 << 0)
+#define FLITE_REG_CIGENERAL_CAM_B BIT(0)
#define FLITE_REG_CIFCNTSEQ 0x100
#define FLITE_REG_CIOSAN(x) (0x200 + (4 * (x)))
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index c1f0aee02e5e..e87c6a09205b 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -39,7 +39,6 @@ module_param(debug, int, 0644);
static const struct fimc_fmt fimc_lite_formats[] = {
{
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.colorspace = V4L2_COLORSPACE_JPEG,
.depth = { 16 },
@@ -48,7 +47,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
- .name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.colorspace = V4L2_COLORSPACE_JPEG,
.depth = { 16 },
@@ -57,7 +55,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
- .name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.colorspace = V4L2_COLORSPACE_JPEG,
.depth = { 16 },
@@ -66,7 +63,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.colorspace = V4L2_COLORSPACE_JPEG,
.depth = { 16 },
@@ -75,7 +71,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
.flags = FMT_FLAGS_YUV,
}, {
- .name = "RAW8 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG8,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = { 8 },
@@ -84,7 +79,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.flags = FMT_FLAGS_RAW_BAYER,
}, {
- .name = "RAW10 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG10,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = { 16 },
@@ -93,7 +87,6 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.flags = FMT_FLAGS_RAW_BAYER,
}, {
- .name = "RAW12 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG12,
.colorspace = V4L2_COLORSPACE_SRGB,
.depth = { 16 },
@@ -667,7 +660,6 @@ static int fimc_lite_enum_fmt(struct file *file, void *priv,
return -EINVAL;
fmt = &fimc_lite_formats[f->index];
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 62e876fc3555..c70c2cbe3eb1 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -247,7 +247,6 @@ static int fimc_m2m_enum_fmt(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.h b/drivers/media/platform/exynos4-is/fimc-reg.h
index 03ba6c2bc84b..b81826d04936 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.h
+++ b/drivers/media/platform/exynos4-is/fimc-reg.h
@@ -8,12 +8,14 @@
#ifndef FIMC_REG_H_
#define FIMC_REG_H_
+#include <linux/bitops.h>
+
#include "fimc-core.h"
/* Input source format */
#define FIMC_REG_CISRCFMT 0x00
-#define FIMC_REG_CISRCFMT_ITU601_8BIT (1 << 31)
-#define FIMC_REG_CISRCFMT_ITU601_16BIT (1 << 29)
+#define FIMC_REG_CISRCFMT_ITU601_8BIT BIT(31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT BIT(29)
#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR (0 << 14)
#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB (1 << 14)
#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY (2 << 14)
@@ -21,45 +23,45 @@
/* Window offset */
#define FIMC_REG_CIWDOFST 0x04
-#define FIMC_REG_CIWDOFST_OFF_EN (1 << 31)
-#define FIMC_REG_CIWDOFST_CLROVFIY (1 << 30)
-#define FIMC_REG_CIWDOFST_CLROVRLB (1 << 29)
+#define FIMC_REG_CIWDOFST_OFF_EN BIT(31)
+#define FIMC_REG_CIWDOFST_CLROVFIY BIT(30)
+#define FIMC_REG_CIWDOFST_CLROVRLB BIT(29)
#define FIMC_REG_CIWDOFST_HOROFF_MASK (0x7ff << 16)
-#define FIMC_REG_CIWDOFST_CLROVFICB (1 << 15)
-#define FIMC_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FIMC_REG_CIWDOFST_CLROVFICB BIT(15)
+#define FIMC_REG_CIWDOFST_CLROVFICR BIT(14)
#define FIMC_REG_CIWDOFST_VEROFF_MASK (0xfff << 0)
/* Global control */
#define FIMC_REG_CIGCTRL 0x08
-#define FIMC_REG_CIGCTRL_SWRST (1 << 31)
-#define FIMC_REG_CIGCTRL_CAMRST_A (1 << 30)
-#define FIMC_REG_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define FIMC_REG_CIGCTRL_SWRST BIT(31)
+#define FIMC_REG_CIGCTRL_CAMRST_A BIT(30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A BIT(29)
#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL (0 << 27)
#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC (3 << 27)
#define FIMC_REG_CIGCTRL_TESTPAT_MASK (3 << 27)
#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT 27
-#define FIMC_REG_CIGCTRL_INVPOLPCLK (1 << 26)
-#define FIMC_REG_CIGCTRL_INVPOLVSYNC (1 << 25)
-#define FIMC_REG_CIGCTRL_INVPOLHREF (1 << 24)
-#define FIMC_REG_CIGCTRL_IRQ_OVFEN (1 << 22)
-#define FIMC_REG_CIGCTRL_HREF_MASK (1 << 21)
-#define FIMC_REG_CIGCTRL_IRQ_LEVEL (1 << 20)
-#define FIMC_REG_CIGCTRL_IRQ_CLR (1 << 19)
-#define FIMC_REG_CIGCTRL_IRQ_ENABLE (1 << 16)
-#define FIMC_REG_CIGCTRL_SHDW_DISABLE (1 << 12)
+#define FIMC_REG_CIGCTRL_INVPOLPCLK BIT(26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC BIT(25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF BIT(24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN BIT(22)
+#define FIMC_REG_CIGCTRL_HREF_MASK BIT(21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL BIT(20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR BIT(19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE BIT(16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE BIT(12)
/* 0 - selects Writeback A (LCD), 1 - selects Writeback B (LCD/ISP) */
-#define FIMC_REG_CIGCTRL_SELWB_A (1 << 10)
-#define FIMC_REG_CIGCTRL_CAM_JPEG (1 << 8)
-#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A (1 << 7)
-#define FIMC_REG_CIGCTRL_CAMIF_SELWB (1 << 6)
+#define FIMC_REG_CIGCTRL_SELWB_A BIT(10)
+#define FIMC_REG_CIGCTRL_CAM_JPEG BIT(8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A BIT(7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB BIT(6)
/* 0 - ITU601; 1 - ITU709 */
-#define FIMC_REG_CIGCTRL_CSC_ITU601_709 (1 << 5)
-#define FIMC_REG_CIGCTRL_INVPOLHSYNC (1 << 4)
-#define FIMC_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
-#define FIMC_REG_CIGCTRL_INVPOLFIELD (1 << 1)
-#define FIMC_REG_CIGCTRL_INTERLACE (1 << 0)
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709 BIT(5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC BIT(4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI BIT(3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD BIT(1)
+#define FIMC_REG_CIGCTRL_INTERLACE BIT(0)
/* Window offset 2 */
#define FIMC_REG_CIWDOFST2 0x14
@@ -73,7 +75,7 @@
/* Target image format */
#define FIMC_REG_CITRGFMT 0x48
-#define FIMC_REG_CITRGFMT_INROT90 (1 << 31)
+#define FIMC_REG_CITRGFMT_INROT90 BIT(31)
#define FIMC_REG_CITRGFMT_YCBCR420 (0 << 29)
#define FIMC_REG_CITRGFMT_YCBCR422 (1 << 29)
#define FIMC_REG_CITRGFMT_YCBCR422_1P (2 << 29)
@@ -86,7 +88,7 @@
#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
#define FIMC_REG_CITRGFMT_FLIP_180 (3 << 14)
#define FIMC_REG_CITRGFMT_FLIP_MASK (3 << 14)
-#define FIMC_REG_CITRGFMT_OUTROT90 (1 << 13)
+#define FIMC_REG_CITRGFMT_OUTROT90 BIT(13)
#define FIMC_REG_CITRGFMT_VSIZE_MASK (0xfff << 0)
/* Output DMA control */
@@ -96,7 +98,7 @@
#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB (1 << 0)
#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY (2 << 0)
#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY (3 << 0)
-#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE BIT(2)
#define FIMC_REG_CIOCTRL_YCBCR_3PLANE (0 << 3)
#define FIMC_REG_CIOCTRL_YCBCR_2PLANE (1 << 3)
#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
@@ -116,14 +118,14 @@
/* Main scaler control */
#define FIMC_REG_CISCCTRL 0x58
-#define FIMC_REG_CISCCTRL_SCALERBYPASS (1 << 31)
-#define FIMC_REG_CISCCTRL_SCALEUP_H (1 << 30)
-#define FIMC_REG_CISCCTRL_SCALEUP_V (1 << 29)
-#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE (1 << 28)
-#define FIMC_REG_CISCCTRL_CSCY2R_WIDE (1 << 27)
-#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
-#define FIMC_REG_CISCCTRL_INTERLACE (1 << 25)
-#define FIMC_REG_CISCCTRL_SCALERSTART (1 << 15)
+#define FIMC_REG_CISCCTRL_SCALERBYPASS BIT(31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H BIT(30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V BIT(29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE BIT(28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE BIT(27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO BIT(26)
+#define FIMC_REG_CISCCTRL_INTERLACE BIT(25)
+#define FIMC_REG_CISCCTRL_SCALERSTART BIT(15)
#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
@@ -132,8 +134,8 @@
#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
-#define FIMC_REG_CISCCTRL_RGB_EXT (1 << 10)
-#define FIMC_REG_CISCCTRL_ONE2ONE (1 << 9)
+#define FIMC_REG_CISCCTRL_RGB_EXT BIT(10)
+#define FIMC_REG_CISCCTRL_ONE2ONE BIT(9)
#define FIMC_REG_CISCCTRL_MHRATIO(x) ((x) << 16)
#define FIMC_REG_CISCCTRL_MVRATIO(x) ((x) << 0)
#define FIMC_REG_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
@@ -147,39 +149,39 @@
/* General status */
#define FIMC_REG_CISTATUS 0x64
-#define FIMC_REG_CISTATUS_OVFIY (1 << 31)
-#define FIMC_REG_CISTATUS_OVFICB (1 << 30)
-#define FIMC_REG_CISTATUS_OVFICR (1 << 29)
-#define FIMC_REG_CISTATUS_VSYNC (1 << 28)
+#define FIMC_REG_CISTATUS_OVFIY BIT(31)
+#define FIMC_REG_CISTATUS_OVFICB BIT(30)
+#define FIMC_REG_CISTATUS_OVFICR BIT(29)
+#define FIMC_REG_CISTATUS_VSYNC BIT(28)
#define FIMC_REG_CISTATUS_FRAMECNT_MASK (3 << 26)
#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT 26
-#define FIMC_REG_CISTATUS_WINOFF_EN (1 << 25)
-#define FIMC_REG_CISTATUS_IMGCPT_EN (1 << 22)
-#define FIMC_REG_CISTATUS_IMGCPT_SCEN (1 << 21)
-#define FIMC_REG_CISTATUS_VSYNC_A (1 << 20)
-#define FIMC_REG_CISTATUS_VSYNC_B (1 << 19)
-#define FIMC_REG_CISTATUS_OVRLB (1 << 18)
-#define FIMC_REG_CISTATUS_FRAME_END (1 << 17)
-#define FIMC_REG_CISTATUS_LASTCAPT_END (1 << 16)
-#define FIMC_REG_CISTATUS_VVALID_A (1 << 15)
-#define FIMC_REG_CISTATUS_VVALID_B (1 << 14)
+#define FIMC_REG_CISTATUS_WINOFF_EN BIT(25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN BIT(22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN BIT(21)
+#define FIMC_REG_CISTATUS_VSYNC_A BIT(20)
+#define FIMC_REG_CISTATUS_VSYNC_B BIT(19)
+#define FIMC_REG_CISTATUS_OVRLB BIT(18)
+#define FIMC_REG_CISTATUS_FRAME_END BIT(17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END BIT(16)
+#define FIMC_REG_CISTATUS_VVALID_A BIT(15)
+#define FIMC_REG_CISTATUS_VVALID_B BIT(14)
/* Indexes to the last and the currently processed buffer. */
#define FIMC_REG_CISTATUS2 0x68
/* Image capture control */
#define FIMC_REG_CIIMGCPT 0xc0
-#define FIMC_REG_CIIMGCPT_IMGCPTEN (1 << 31)
-#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC (1 << 30)
-#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
-#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN BIT(31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC BIT(30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE BIT(25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT BIT(18)
/* Frame capture sequence */
#define FIMC_REG_CICPTSEQ 0xc4
/* Image effect */
#define FIMC_REG_CIIMGEFF 0xd0
-#define FIMC_REG_CIIMGEFF_IE_ENABLE (1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_ENABLE BIT(30)
#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE (0 << 29)
#define FIMC_REG_CIIMGEFF_IE_SC_AFTER (1 << 29)
#define FIMC_REG_CIIMGEFF_FIN_BYPASS (0 << 26)
@@ -198,8 +200,8 @@
/* Real input DMA image size */
#define FIMC_REG_CIREAL_ISIZE 0xf8
-#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
-#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN BIT(31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS BIT(30)
/* Input DMA control */
#define FIMC_REG_MSCTRL 0xfc
@@ -215,7 +217,7 @@
#define FIMC_REG_MSCTRL_FLIP_X_MIRROR (1 << 13)
#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR (2 << 13)
#define FIMC_REG_MSCTRL_FLIP_180 (3 << 13)
-#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL (1 << 12)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL BIT(12)
#define FIMC_REG_MSCTRL_ORDER422_SHIFT 4
#define FIMC_REG_MSCTRL_ORDER422_CRYCBY (0 << 4)
#define FIMC_REG_MSCTRL_ORDER422_YCRYCB (1 << 4)
@@ -223,14 +225,14 @@
#define FIMC_REG_MSCTRL_ORDER422_YCBYCR (3 << 4)
#define FIMC_REG_MSCTRL_ORDER422_MASK (3 << 4)
#define FIMC_REG_MSCTRL_INPUT_EXTCAM (0 << 3)
-#define FIMC_REG_MSCTRL_INPUT_MEMORY (1 << 3)
-#define FIMC_REG_MSCTRL_INPUT_MASK (1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY BIT(3)
+#define FIMC_REG_MSCTRL_INPUT_MASK BIT(3)
#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
#define FIMC_REG_MSCTRL_INFORMAT_RGB (3 << 1)
#define FIMC_REG_MSCTRL_INFORMAT_MASK (3 << 1)
-#define FIMC_REG_MSCTRL_ENVID (1 << 0)
+#define FIMC_REG_MSCTRL_ENVID BIT(0)
#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
/* Output DMA Y/Cb/Cr offset */
@@ -277,10 +279,10 @@
/* SYSREG ISP Writeback register address offsets */
#define SYSREG_ISPBLK 0x020c
-#define SYSREG_ISPBLK_FIFORST_CAM_BLK (1 << 7)
+#define SYSREG_ISPBLK_FIFORST_CAM_BLK BIT(7)
#define SYSREG_CAMBLK 0x0218
-#define SYSREG_CAMBLK_FIFORST_ISP (1 << 15)
+#define SYSREG_CAMBLK_FIFORST_ISP BIT(15)
#define SYSREG_CAMBLK_ISPWB_FULL_EN (7 << 20)
/*
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index d53427a8db11..a838189d4490 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -501,6 +501,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
continue;
ret = fimc_md_parse_port_node(fmd, port, index);
+ of_node_put(port);
if (ret < 0) {
of_node_put(node);
goto cleanup;
@@ -542,6 +543,7 @@ static int __of_get_csis_id(struct device_node *np)
if (!np)
return -EINVAL;
of_property_read_u32(np, "reg", &reg);
+ of_node_put(np);
return reg - FIMC_INPUT_MIPI_CSI2_0;
}
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 3e9ac6066cf6..540151bbf58f 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* CSIS global control */
#define S5PCSIS_CTRL 0x00
#define S5PCSIS_CTRL_DPDN_DEFAULT (0 << 31)
-#define S5PCSIS_CTRL_DPDN_SWAP (1 << 31)
+#define S5PCSIS_CTRL_DPDN_SWAP (1UL << 31)
#define S5PCSIS_CTRL_ALIGN_32BIT (1 << 20)
#define S5PCSIS_CTRL_UPDATE_SHADOW (1 << 16)
#define S5PCSIS_CTRL_WCLK_EXTCLK (1 << 8)
@@ -65,7 +65,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* Interrupt mask */
#define S5PCSIS_INTMSK 0x10
-#define S5PCSIS_INTMSK_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTMSK_EVEN_BEFORE (1UL << 31)
#define S5PCSIS_INTMSK_EVEN_AFTER (1 << 30)
#define S5PCSIS_INTMSK_ODD_BEFORE (1 << 29)
#define S5PCSIS_INTMSK_ODD_AFTER (1 << 28)
@@ -83,7 +83,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* Interrupt source */
#define S5PCSIS_INTSRC 0x14
-#define S5PCSIS_INTSRC_EVEN_BEFORE (1 << 31)
+#define S5PCSIS_INTSRC_EVEN_BEFORE (1UL << 31)
#define S5PCSIS_INTSRC_EVEN_AFTER (1 << 30)
#define S5PCSIS_INTSRC_EVEN (0x3 << 30)
#define S5PCSIS_INTSRC_ODD_BEFORE (1 << 29)
@@ -803,10 +803,8 @@ static int s5pcsis_probe(struct platform_device *pdev)
return PTR_ERR(state->regs);
state->irq = platform_get_irq(pdev, 0);
- if (state->irq < 0) {
- dev_err(dev, "Failed to get irq\n");
+ if (state->irq < 0)
return state->irq;
- }
for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
state->supplies[i].supply = csis_supply_name[i];
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 691be788e38b..81a8faedbba6 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -32,7 +32,7 @@
#define VIU_VERSION "0.5.1"
/* Allow building this driver with COMPILE_TEST */
-#ifndef CONFIG_PPC
+#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
#define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
#define in_be32(a) ioread32be((void __iomem *)a)
#endif
@@ -214,7 +214,7 @@ enum status_config {
FIELD_NO = 0x01 << 28, /* Field number */
DITHER_ON = 0x01 << 29, /* Dithering is on */
ROUND_ON = 0x01 << 30, /* Round is on */
- MODE_32BIT = 0x01 << 31, /* Data in RGBa888,
+ MODE_32BIT = 1UL << 31, /* Data in RGBa888,
* 0 in RGB565
*/
};
@@ -563,11 +563,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "viu", sizeof(cap->driver));
strscpy(cap->card, "viu", sizeof(cap->card));
strscpy(cap->bus_info, "platform:viu", sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1380,6 +1375,8 @@ static const struct video_device viu_template = {
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_READWRITE,
};
static int viu_of_probe(struct platform_device *op)
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 8e7ef23b9a7e..38d942322302 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1661,10 +1661,8 @@ static int pxp_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, pxp_irq_handler,
IRQF_ONESHOT, dev_name(&pdev->dev), dev);
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index beb7fd7442fb..9ad24c86c5ab 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -37,7 +37,6 @@ module_param(debug, bool, 0644);
v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
struct deinterlace_fmt {
- char *name;
u32 fourcc;
/* Types the format can be used for */
u32 types;
@@ -45,12 +44,10 @@ struct deinterlace_fmt {
static struct deinterlace_fmt formats[] = {
{
- .name = "YUV 4:2:0 Planar",
.fourcc = V4L2_PIX_FMT_YUV420,
.types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
},
{
- .name = "YUYV 4:2:2",
.fourcc = V4L2_PIX_FMT_YUYV,
.types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
},
@@ -135,13 +132,13 @@ struct deinterlace_dev {
};
struct deinterlace_ctx {
+ struct v4l2_fh fh;
struct deinterlace_dev *dev;
/* Abort requested by m2m */
int aborting;
enum v4l2_colorspace colorspace;
dma_cookie_t cookie;
- struct v4l2_m2m_ctx *m2m_ctx;
struct dma_interleaved_template *xt;
};
@@ -153,9 +150,9 @@ static int deinterlace_job_ready(void *priv)
struct deinterlace_ctx *ctx = priv;
struct deinterlace_dev *pcdev = ctx->dev;
- if ((v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0)
- && (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0)
- && (atomic_read(&ctx->dev->busy) == 0)) {
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0 &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0 &&
+ !atomic_read(&ctx->dev->busy)) {
dprintk(pcdev, "Task ready\n");
return 1;
}
@@ -174,7 +171,7 @@ static void deinterlace_job_abort(void *priv)
dprintk(pcdev, "Aborting task\n");
- v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->fh.m2m_ctx);
}
static void dma_callback(void *data)
@@ -185,8 +182,8 @@ static void dma_callback(void *data)
atomic_set(&pcdev->busy, 0);
- src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
@@ -197,7 +194,7 @@ static void dma_callback(void *data)
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
- v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->fh.m2m_ctx);
dprintk(pcdev, "dma transfers completed.\n");
}
@@ -216,8 +213,8 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
dma_addr_t p_in, p_out;
enum dma_ctrl_flags flags;
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
s_width = s_q_data->width;
@@ -436,16 +433,7 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
- strscpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->card));
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
+ strscpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info));
return 0;
}
@@ -470,7 +458,6 @@ static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
if (i < NUM_FORMATS) {
/* Format found */
fmt = &formats[i];
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
}
@@ -496,7 +483,7 @@ static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
struct deinterlace_q_data *q_data;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -593,7 +580,7 @@ static int vidioc_s_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
struct deinterlace_q_data *q_data;
struct vb2_queue *vq;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -666,36 +653,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct deinterlace_ctx *ctx = priv;
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct deinterlace_ctx *ctx = priv;
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct deinterlace_ctx *ctx = priv;
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct deinterlace_ctx *ctx = priv;
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
@@ -736,15 +693,7 @@ static int vidioc_streamon(struct file *file, void *priv,
return -EINVAL;
}
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct deinterlace_ctx *ctx = priv;
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
}
static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
@@ -760,14 +709,15 @@ static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
-
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
};
@@ -831,7 +781,7 @@ static void deinterlace_buf_queue(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static const struct vb2_ops deinterlace_qops = {
@@ -849,7 +799,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &deinterlace_qops;
@@ -868,7 +818,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &deinterlace_qops;
@@ -897,12 +847,13 @@ static int deinterlace_open(struct file *file)
if (!ctx)
return -ENOMEM;
- file->private_data = ctx;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
ctx->dev = pcdev;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- int ret = PTR_ERR(ctx->m2m_ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ int ret = PTR_ERR(ctx->fh.m2m_ctx);
kfree(ctx);
return ret;
@@ -916,8 +867,10 @@ static int deinterlace_open(struct file *file)
}
ctx->colorspace = V4L2_COLORSPACE_REC709;
+ v4l2_fh_add(&ctx->fh);
- dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
return 0;
}
@@ -929,40 +882,22 @@ static int deinterlace_release(struct file *file)
dprintk(pcdev, "Releasing instance %p\n", ctx);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
kfree(ctx->xt);
kfree(ctx);
return 0;
}
-static __poll_t deinterlace_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct deinterlace_ctx *ctx = file->private_data;
- __poll_t ret;
-
- mutex_lock(&ctx->dev->dev_mutex);
- ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&ctx->dev->dev_mutex);
-
- return ret;
-}
-
-static int deinterlace_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct deinterlace_ctx *ctx = file->private_data;
-
- return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-}
-
static const struct v4l2_file_operations deinterlace_fops = {
.owner = THIS_MODULE,
.open = deinterlace_open,
.release = deinterlace_release,
- .poll = deinterlace_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = deinterlace_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static const struct video_device deinterlace_videodev = {
@@ -972,6 +907,7 @@ static const struct video_device deinterlace_videodev = {
.minor = -1,
.release = video_device_release_empty,
.vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index dc30c48d4671..803baf97f06e 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -98,56 +98,48 @@ MODULE_PARM_DESC(buffer_mode,
container_of(notifier, struct mcam_camera, notifier)
static struct mcam_format_struct {
- __u8 *desc;
__u32 pixelformat;
int bpp; /* Bytes per pixel */
bool planar;
u32 mbus_code;
} mcam_formats[] = {
{
- .desc = "YUYV 4:2:2",
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
- .desc = "YVYU 4:2:2",
.pixelformat = V4L2_PIX_FMT_YVYU,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
- .desc = "YUV 4:2:0 PLANAR",
.pixelformat = V4L2_PIX_FMT_YUV420,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 1,
.planar = true,
},
{
- .desc = "YVU 4:2:0 PLANAR",
.pixelformat = V4L2_PIX_FMT_YVU420,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 1,
.planar = true,
},
{
- .desc = "XRGB 444",
.pixelformat = V4L2_PIX_FMT_XRGB444,
.mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 2,
.planar = false,
},
{
- .desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.bpp = 2,
.planar = false,
},
{
- .desc = "Raw RGB Bayer",
.pixelformat = V4L2_PIX_FMT_SBGGR8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.bpp = 1,
@@ -1357,9 +1349,6 @@ static int mcam_vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, "marvell_ccic", sizeof(cap->driver));
strscpy(cap->card, "marvell_ccic", sizeof(cap->card));
strscpy(cap->bus_info, cam->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1369,8 +1358,6 @@ static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
{
if (fmt->index >= N_MCAM_FMTS)
return -EINVAL;
- strscpy(fmt->description, mcam_formats[fmt->index].desc,
- sizeof(fmt->description));
fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
return 0;
}
@@ -1698,6 +1685,8 @@ static const struct video_device mcam_v4l_template = {
.fops = &mcam_v4l_fops,
.ioctl_ops = &mcam_v4l_ioctl_ops,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
/* ---------------------------------------------------------------------- */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 10559492e09e..92b92255dac6 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -372,6 +372,7 @@ static const struct of_device_id mmpcam_of_match[] = {
{ .compatible = "marvell,mmp2-ccic", },
{},
};
+MODULE_DEVICE_TABLE(of, mmpcam_of_match);
static struct platform_driver mmpcam_driver = {
.probe = mmpcam_probe,
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index fb52e5dd044a..3b39e875292e 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -121,6 +121,9 @@
#define CECB_CTRL_TYPE_NEXT 2
#define CECB_CTRL2 0x01
+
+#define CECB_CTRL2_RISE_DEL_MAX GENMASK(4, 0)
+
#define CECB_INTR_MASK 0x02
#define CECB_LADD_LOW 0x05
#define CECB_LADD_HIGH 0x06
@@ -165,6 +168,11 @@
#define CECB_WAKEUPCTRL 0x31
+struct meson_ao_cec_g12a_data {
+ /* Setup the internal CECB_CTRL2 register */
+ bool ctrl2_setup;
+};
+
struct meson_ao_cec_g12a_device {
struct platform_device *pdev;
struct regmap *regmap;
@@ -175,6 +183,7 @@ struct meson_ao_cec_g12a_device {
struct cec_msg rx_msg;
struct clk *oscin;
struct clk *core;
+ const struct meson_ao_cec_g12a_data *data;
};
static const struct regmap_config meson_ao_cec_g12a_regmap_conf = {
@@ -605,6 +614,10 @@ static int meson_ao_cec_g12a_adap_enable(struct cec_adapter *adap, bool enable)
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_RESET, 0);
+ if (ao_cec->data->ctrl2_setup)
+ regmap_write(ao_cec->regmap_cec, CECB_CTRL2,
+ FIELD_PREP(CECB_CTRL2_RISE_DEL_MAX, 2));
+
meson_ao_cec_g12a_irq_setup(ao_cec, true);
return 0;
@@ -632,20 +645,28 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
if (!ao_cec)
return -ENOMEM;
+ ao_cec->data = of_device_get_match_data(&pdev->dev);
+ if (!ao_cec->data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
spin_lock_init(&ao_cec->cec_reg_lock);
ao_cec->pdev = pdev;
- ao_cec->notify = cec_notifier_get(hdmi_dev);
- if (!ao_cec->notify)
- return -ENOMEM;
-
ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_g12a_ops, ao_cec,
"meson_g12a_ao_cec",
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
- if (IS_ERR(ao_cec->adap)) {
- ret = PTR_ERR(ao_cec->adap);
- goto out_probe_notify;
+ if (IS_ERR(ao_cec->adap))
+ return PTR_ERR(ao_cec->adap);
+
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_adapter;
}
ao_cec->adap->owner = THIS_MODULE;
@@ -654,21 +675,21 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
irq = platform_get_irq(pdev, 0);
@@ -678,24 +699,24 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
- goto out_probe_adapter;
+ goto out_probe_notify;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_adapter;
+ goto out_probe_notify;
}
device_reset_optional(&pdev->dev);
@@ -703,27 +724,23 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ao_cec);
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
- if (ret < 0) {
- cec_notifier_put(ao_cec->notify);
+ if (ret < 0)
goto out_probe_core_clk;
- }
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
- cec_register_cec_notifier(ao_cec->adap, ao_cec->notify);
-
return 0;
out_probe_core_clk:
clk_disable_unprepare(ao_cec->core);
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
+
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
-out_probe_notify:
- cec_notifier_put(ao_cec->notify);
-
dev_err(&pdev->dev, "CEC controller registration failed\n");
return ret;
@@ -735,15 +752,30 @@ static int meson_ao_cec_g12a_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_unregister_adapter(ao_cec->adap);
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
- cec_notifier_put(ao_cec->notify);
+ cec_unregister_adapter(ao_cec->adap);
return 0;
}
+static const struct meson_ao_cec_g12a_data ao_cec_g12a_data = {
+ .ctrl2_setup = false,
+};
+
+static const struct meson_ao_cec_g12a_data ao_cec_sm1_data = {
+ .ctrl2_setup = true,
+};
+
static const struct of_device_id meson_ao_cec_g12a_of_match[] = {
- { .compatible = "amlogic,meson-g12a-ao-cec", },
+ {
+ .compatible = "amlogic,meson-g12a-ao-cec",
+ .data = &ao_cec_g12a_data,
+ },
+ {
+ .compatible = "amlogic,meson-sm1-ao-cec",
+ .data = &ao_cec_sm1_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_ao_cec_g12a_of_match);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index facf9b029e79..64ed549bf012 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -616,20 +616,19 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
spin_lock_init(&ao_cec->cec_reg_lock);
- ao_cec->notify = cec_notifier_get(hdmi_dev);
- if (!ao_cec->notify)
- return -ENOMEM;
-
ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec,
"meson_ao_cec",
- CEC_CAP_LOG_ADDRS |
- CEC_CAP_TRANSMIT |
- CEC_CAP_RC |
- CEC_CAP_PASSTHROUGH,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
1); /* Use 1 for now */
- if (IS_ERR(ao_cec->adap)) {
- ret = PTR_ERR(ao_cec->adap);
- goto out_probe_notify;
+ if (IS_ERR(ao_cec->adap))
+ return PTR_ERR(ao_cec->adap);
+
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_adapter;
}
ao_cec->adap->owner = THIS_MODULE;
@@ -638,7 +637,7 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
irq = platform_get_irq(pdev, 0);
@@ -648,20 +647,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_adapter;
+ goto out_probe_notify;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
@@ -676,28 +675,24 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ao_cec);
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
- if (ret < 0) {
- cec_notifier_put(ao_cec->notify);
+ if (ret < 0)
goto out_probe_clk;
- }
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
ao_cec->base + CEC_GEN_CNTL_REG);
- cec_register_cec_notifier(ao_cec->adap, ao_cec->notify);
-
return 0;
out_probe_clk:
clk_disable_unprepare(ao_cec->core);
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
+
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
-out_probe_notify:
- cec_notifier_put(ao_cec->notify);
-
dev_err(&pdev->dev, "CEC controller registration failed\n");
return ret;
@@ -709,10 +704,9 @@ static int meson_ao_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
cec_unregister_adapter(ao_cec->adap);
- cec_notifier_put(ao_cec->notify);
-
return 0;
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index fc9faec85edb..c1e29a46ae69 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -110,7 +110,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
mutex_init(&mdp->vpulock);
/* Old dts had the components as child nodes */
- if (of_get_next_child(dev->of_node, NULL)) {
+ node = of_get_next_child(dev->of_node, NULL);
+ if (node) {
+ of_node_put(node);
parent = dev->of_node;
dev_warn(dev, "device tree is out of date\n");
} else {
@@ -145,13 +147,16 @@ static int mtk_mdp_probe(struct platform_device *pdev)
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_comp;
}
mdp->comp[comp_id] = comp;
ret = mtk_mdp_comp_init(dev, node, comp, comp_id);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_comp;
+ }
}
mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 90d1a67db7e5..26a55c3e807e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -29,16 +29,19 @@ static const struct mtk_video_fmt mtk_video_formats[] = {
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_DEC,
.num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_VP8,
.type = MTK_FMT_DEC,
.num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_VP9,
.type = MTK_FMT_DEC,
.num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
{
.fourcc = V4L2_PIX_FMT_MT21C,
@@ -948,6 +951,7 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
fmt = &mtk_video_formats[i];
f->pixelformat = fmt->fourcc;
+ f->flags = fmt->flags;
return 0;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c95de5d08dda..9fd56dee7fd1 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -99,6 +99,7 @@ struct mtk_video_fmt {
u32 fourcc;
enum mtk_fmt_type type;
u32 num_planes;
+ u32 flags;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index c5f8f1fca44c..49aa85a9bb5a 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -29,6 +29,9 @@
#define H264_MAX_FB_NUM 17
#define HDR_PARSING_BUF_SZ 1024
+#define DEC_ERR_RET(ret) ((ret) >> 16)
+#define H264_ERR_NOT_VALID 3
+
/**
* struct h264_fb - h264 decode frame buffer information
* @vdec_fb_va : virtual address of struct vdec_fb
@@ -357,8 +360,11 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
buf = (unsigned char *)bs->va;
buf_sz = bs->size;
nal_start_idx = find_start_code(buf, buf_sz);
- if (nal_start_idx < 0)
+ if (nal_start_idx < 0) {
+ mtk_vcodec_err(inst, "invalid nal start code");
+ err = -EIO;
goto err_free_fb_out;
+ }
nal_start = buf[nal_start_idx];
nal_type = NAL_TYPE(buf[nal_start_idx]);
@@ -382,8 +388,14 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
data[0] = buf_sz;
data[1] = nal_start;
err = vpu_dec_start(vpu, data, 2);
- if (err)
+ if (err) {
+ if (err > 0 && (DEC_ERR_RET(err) == H264_ERR_NOT_VALID)) {
+ mtk_vcodec_err(inst, "- error bitstream - err = %d -",
+ err);
+ err = -EIO;
+ }
goto err_free_fb_out;
+ }
*res_chg = inst->vsi->dec.resolution_changed;
if (*res_chg) {
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 333324c75027..27779b75df54 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -120,7 +120,7 @@ module_param(debug, bool, 0644);
#define PRP_CNTL_RZ_FIFO_LEVEL(x) ((x) << 27)
#define PRP_CNTL_CH2B1EN (1 << 29)
#define PRP_CNTL_CH2B2EN (1 << 30)
-#define PRP_CNTL_CH2FEN (1 << 31)
+#define PRP_CNTL_CH2FEN (1UL << 31)
#define PRP_SIZE_HEIGHT(x) (x)
#define PRP_SIZE_WIDTH(x) ((x) << 16)
@@ -145,7 +145,6 @@ module_param(debug, bool, 0644);
#define PRP_INTR_ST_CH2OVF (1 << 8)
struct emmaprp_fmt {
- char *name;
u32 fourcc;
/* Types the format can be used for */
u32 types;
@@ -153,12 +152,10 @@ struct emmaprp_fmt {
static struct emmaprp_fmt formats[] = {
{
- .name = "YUV 4:2:0 Planar",
.fourcc = V4L2_PIX_FMT_YUV420,
.types = MEM2MEM_CAPTURE,
},
{
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.types = MEM2MEM_OUTPUT,
},
@@ -210,11 +207,11 @@ struct emmaprp_dev {
};
struct emmaprp_ctx {
+ struct v4l2_fh fh;
struct emmaprp_dev *dev;
/* Abort requested by m2m */
int aborting;
struct emmaprp_q_data q_data[2];
- struct v4l2_m2m_ctx *m2m_ctx;
};
static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
@@ -243,7 +240,7 @@ static void emmaprp_job_abort(void *priv)
dprintk(pcdev, "Aborting task\n");
- v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->fh.m2m_ctx);
}
static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev)
@@ -278,8 +275,8 @@ static void emmaprp_device_run(void *priv)
dma_addr_t p_in, p_out;
u32 tmp;
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
s_width = s_q_data->width;
@@ -353,8 +350,8 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
pr_err("PrP bus error occurred, this transfer is probably corrupted\n");
writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
} else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
- src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
dst_vb->flags &=
@@ -371,7 +368,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
}
}
- v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx);
+ v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
@@ -383,8 +380,6 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver));
strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -409,7 +404,6 @@ static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
if (i < NUM_FORMATS) {
/* Format found */
fmt = &formats[i];
- strscpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
return 0;
}
@@ -435,7 +429,7 @@ static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
struct emmaprp_q_data *q_data;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -540,7 +534,7 @@ static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
int ret;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -596,52 +590,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return vidioc_s_fmt(priv, f);
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct emmaprp_ctx *ctx = priv;
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
@@ -655,14 +603,14 @@ static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
-
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
-
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
};
@@ -722,7 +670,7 @@ static void emmaprp_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static const struct vb2_ops emmaprp_qops = {
@@ -740,7 +688,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &emmaprp_qops;
@@ -754,7 +702,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &emmaprp_qops;
@@ -778,7 +726,8 @@ static int emmaprp_open(struct file *file)
if (!ctx)
return -ENOMEM;
- file->private_data = ctx;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
ctx->dev = pcdev;
if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
@@ -786,10 +735,10 @@ static int emmaprp_open(struct file *file)
return -ERESTARTSYS;
}
- ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- int ret = PTR_ERR(ctx->m2m_ctx);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ int ret = PTR_ERR(ctx->fh.m2m_ctx);
mutex_unlock(&pcdev->dev_mutex);
kfree(ctx);
@@ -800,9 +749,10 @@ static int emmaprp_open(struct file *file)
clk_prepare_enable(pcdev->clk_emma_ahb);
ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+ v4l2_fh_add(&ctx->fh);
mutex_unlock(&pcdev->dev_mutex);
- dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+ dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx);
return 0;
}
@@ -817,46 +767,22 @@ static int emmaprp_release(struct file *file)
mutex_lock(&pcdev->dev_mutex);
clk_disable_unprepare(pcdev->clk_emma_ahb);
clk_disable_unprepare(pcdev->clk_emma_ipg);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&pcdev->dev_mutex);
kfree(ctx);
return 0;
}
-static __poll_t emmaprp_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct emmaprp_dev *pcdev = video_drvdata(file);
- struct emmaprp_ctx *ctx = file->private_data;
- __poll_t res;
-
- mutex_lock(&pcdev->dev_mutex);
- res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&pcdev->dev_mutex);
- return res;
-}
-
-static int emmaprp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct emmaprp_dev *pcdev = video_drvdata(file);
- struct emmaprp_ctx *ctx = file->private_data;
- int ret;
-
- if (mutex_lock_interruptible(&pcdev->dev_mutex))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&pcdev->dev_mutex);
- return ret;
-}
-
static const struct v4l2_file_operations emmaprp_fops = {
.owner = THIS_MODULE,
.open = emmaprp_open,
.release = emmaprp_release,
- .poll = emmaprp_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = emmaprp_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static const struct video_device emmaprp_videodev = {
@@ -866,6 +792,7 @@ static const struct video_device emmaprp_videodev = {
.minor = -1,
.release = video_device_release,
.vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 1a99dff21ca0..f73b5893220d 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -10,8 +10,7 @@ config VIDEO_OMAP2_VOUT
depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n)
depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST
depends on VIDEO_V4L2
- select VIDEOBUF_GEN
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FRAME_VECTOR
help
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index cb6a9e3946b6..513b99bf963b 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -40,9 +40,9 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <video/omapvrfb.h>
#include <video/omapfb_dss.h>
@@ -63,33 +63,12 @@ enum omap_vout_channels {
OMAP_VIDEO2,
};
-static struct videobuf_queue_ops video_vbq_ops;
/* Variables configurable through module params*/
-static u32 video1_numbuffers = 3;
-static u32 video2_numbuffers = 3;
-static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
-static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
static bool vid1_static_vrfb_alloc;
static bool vid2_static_vrfb_alloc;
static bool debug;
/* Module parameters */
-module_param(video1_numbuffers, uint, S_IRUGO);
-MODULE_PARM_DESC(video1_numbuffers,
- "Number of buffers to be allocated at init time for Video1 device.");
-
-module_param(video2_numbuffers, uint, S_IRUGO);
-MODULE_PARM_DESC(video2_numbuffers,
- "Number of buffers to be allocated at init time for Video2 device.");
-
-module_param(video1_bufsize, uint, S_IRUGO);
-MODULE_PARM_DESC(video1_bufsize,
- "Size of the buffer to be allocated for video1 device");
-
-module_param(video2_bufsize, uint, S_IRUGO);
-MODULE_PARM_DESC(video2_bufsize,
- "Size of the buffer to be allocated for video2 device");
-
module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
MODULE_PARM_DESC(vid1_static_vrfb_alloc,
"Static allocation of the VRFB buffer for video1 device");
@@ -114,14 +93,12 @@ static const struct v4l2_fmtdesc omap_formats[] = {
* Byte 0 Byte 1
* g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
*/
- .description = "RGB565, le",
.pixelformat = V4L2_PIX_FMT_RGB565,
},
{
/* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
* this for RGB24 unpack mode, the last 8 bits are ignored
* */
- .description = "RGB32, le",
.pixelformat = V4L2_PIX_FMT_RGB32,
},
{
@@ -129,15 +106,12 @@ static const struct v4l2_fmtdesc omap_formats[] = {
* this for RGB24 packed mode
*
*/
- .description = "RGB24, le",
.pixelformat = V4L2_PIX_FMT_RGB24,
},
{
- .description = "YUYV (YUV 4:2:2), packed",
.pixelformat = V4L2_PIX_FMT_YUYV,
},
{
- .description = "UYVY, packed",
.pixelformat = V4L2_PIX_FMT_UYVY,
},
};
@@ -164,13 +138,13 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
ifmt = 0;
pix->pixelformat = omap_formats[ifmt].pixelformat;
- pix->field = V4L2_FIELD_ANY;
+ pix->field = V4L2_FIELD_NONE;
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
- pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
bpp = YUYV_BPP;
break;
case V4L2_PIX_FMT_RGB565:
@@ -195,56 +169,6 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
}
/*
- * omap_vout_get_userptr: Convert user space virtual address to physical
- * address.
- */
-static int omap_vout_get_userptr(struct videobuf_buffer *vb, long virtp,
- u32 *physp)
-{
- struct frame_vector *vec;
- int ret;
-
- /* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET) {
- *physp = virt_to_phys((void *)virtp);
- return 0;
- }
-
- vec = frame_vector_create(1);
- if (!vec)
- return -ENOMEM;
-
- ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
- if (ret != 1) {
- frame_vector_destroy(vec);
- return -EINVAL;
- }
- *physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
- vb->priv = vec;
-
- return 0;
-}
-
-/*
- * Free the V4L2 buffers
- */
-void omap_vout_free_buffers(struct omap_vout_device *vout)
-{
- int i, numbuffers;
-
- /* Allocate memory for the buffers */
- numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
- vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
-
- for (i = 0; i < numbuffers; i++) {
- omap_vout_free_buffer(vout->buf_virt_addr[i],
- vout->buffer_size);
- vout->buf_phy_addr[i] = 0;
- vout->buf_virt_addr[i] = 0;
- }
-}
-
-/*
* Convert V4L2 rotation to DSS rotation
* V4L2 understand 0, 90, 180, 270.
* Convert to 0, 1, 2 and 3 respectively for DSS
@@ -537,9 +461,9 @@ static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
if (vout->cur_frm == vout->next_frm)
goto err;
- vout->cur_frm->ts = ts;
- vout->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm->vbuf.vb2_buf.timestamp = ts;
+ vout->cur_frm->vbuf.sequence = vout->sequence++;
+ vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
vout->cur_frm = vout->next_frm;
} else {
if (list_empty(&vout->dma_queue) ||
@@ -562,9 +486,6 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
struct omap_dss_device *cur_display;
struct omap_vout_device *vout = (struct omap_vout_device *)arg;
- if (!vout->streaming)
- return;
-
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
@@ -608,9 +529,9 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
}
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
- vout->cur_frm->ts = ts;
- vout->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm->vbuf.vb2_buf.timestamp = ts;
+ vout->cur_frm->vbuf.sequence = vout->sequence++;
+ vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
vout->cur_frm = vout->next_frm;
}
@@ -619,12 +540,10 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
goto vout_isr_err;
vout->next_frm = list_entry(vout->dma_queue.next,
- struct videobuf_buffer, queue);
+ struct omap_vout_buffer, queue);
list_del(&vout->next_frm->queue);
- vout->next_frm->state = VIDEOBUF_ACTIVE;
-
- addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ addr = (unsigned long)vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
/* First save the configuration in ovelray structure */
@@ -644,394 +563,6 @@ vout_isr_err:
spin_unlock(&vout->vbq_lock);
}
-/* Video buffer call backs */
-
-/*
- * Buffer setup function is called by videobuf layer when REQBUF ioctl is
- * called. This is used to setup buffers and return size and count of
- * buffers allocated. After the call to this buffer, videobuf layer will
- * setup buffer queue depending on the size and count of buffers
- */
-static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
-{
- int startindex = 0, i, j;
- u32 phy_addr = 0, virt_addr = 0;
- struct omap_vout_device *vout = q->priv_data;
- struct omapvideo_info *ovid = &vout->vid_info;
- int vid_max_buf_size;
-
- if (!vout)
- return -EINVAL;
-
- vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize :
- video2_bufsize;
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
- return -EINVAL;
-
- startindex = (vout->vid == OMAP_VIDEO1) ?
- video1_numbuffers : video2_numbuffers;
- if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
- *count = startindex;
-
- if (ovid->rotation_type == VOUT_ROT_VRFB) {
- if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
- return -ENOMEM;
- }
-
- if (V4L2_MEMORY_MMAP != vout->memory)
- return 0;
-
- /* Now allocated the V4L2 buffers */
- *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
- startindex = (vout->vid == OMAP_VIDEO1) ?
- video1_numbuffers : video2_numbuffers;
-
- /* Check the size of the buffer */
- if (*size > vid_max_buf_size) {
- v4l2_err(&vout->vid_dev->v4l2_dev,
- "buffer allocation mismatch [%u] [%u]\n",
- *size, vout->buffer_size);
- return -ENOMEM;
- }
-
- for (i = startindex; i < *count; i++) {
- vout->buffer_size = *size;
-
- virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
- &phy_addr);
- if (!virt_addr) {
- if (ovid->rotation_type == VOUT_ROT_NONE)
- break;
-
- if (!is_rotation_enabled(vout))
- break;
-
- /* Free the VRFB buffers if no space for V4L2 buffers */
- for (j = i; j < *count; j++) {
- omap_vout_free_buffer(vout->smsshado_virt_addr[j],
- vout->smsshado_size);
- vout->smsshado_virt_addr[j] = 0;
- vout->smsshado_phy_addr[j] = 0;
- }
- }
- vout->buf_virt_addr[i] = virt_addr;
- vout->buf_phy_addr[i] = phy_addr;
- }
- *count = vout->buffer_allocated = i;
-
- return 0;
-}
-
-/*
- * Free the V4L2 buffers additionally allocated than default
- * number of buffers
- */
-static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
-{
- int num_buffers = 0, i;
-
- num_buffers = (vout->vid == OMAP_VIDEO1) ?
- video1_numbuffers : video2_numbuffers;
-
- for (i = num_buffers; i < vout->buffer_allocated; i++) {
- if (vout->buf_virt_addr[i])
- omap_vout_free_buffer(vout->buf_virt_addr[i],
- vout->buffer_size);
-
- vout->buf_virt_addr[i] = 0;
- vout->buf_phy_addr[i] = 0;
- }
- vout->buffer_allocated = num_buffers;
-}
-
-/*
- * This function will be called when VIDIOC_QBUF ioctl is called.
- * It prepare buffers before give out for the display. This function
- * converts user space virtual address into physical address if userptr memory
- * exchange mechanism is used. If rotation is enabled, it copies entire
- * buffer into VRFB memory space before giving it to the DSS.
- */
-static int omap_vout_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct omap_vout_device *vout = q->priv_data;
- struct omapvideo_info *ovid = &vout->vid_info;
-
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = vout->pix.width;
- vb->height = vout->pix.height;
- vb->size = vb->width * vb->height * vout->bpp;
- vb->field = field;
- }
- vb->state = VIDEOBUF_PREPARED;
- /* if user pointer memory mechanism is used, get the physical
- * address of the buffer
- */
- if (V4L2_MEMORY_USERPTR == vb->memory) {
- int ret;
-
- if (0 == vb->baddr)
- return -EINVAL;
- /* Physical address */
- ret = omap_vout_get_userptr(vb, vb->baddr,
- (u32 *)&vout->queued_buf_addr[vb->i]);
- if (ret < 0)
- return ret;
- } else {
- unsigned long addr, dma_addr;
- unsigned long size;
-
- addr = (unsigned long) vout->buf_virt_addr[vb->i];
- size = (unsigned long) vb->size;
-
- dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
- v4l2_err(&vout->vid_dev->v4l2_dev,
- "dma_map_single failed\n");
-
- vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
- }
-
- if (ovid->rotation_type == VOUT_ROT_VRFB)
- return omap_vout_prepare_vrfb(vout, vb);
- else
- return 0;
-}
-
-/*
- * Buffer queue function will be called from the videobuf layer when _QBUF
- * ioctl is called. It is used to enqueue buffer, which is ready to be
- * displayed.
- */
-static void omap_vout_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct omap_vout_device *vout = q->priv_data;
-
- /* Driver is also maintainig a queue. So enqueue buffer in the driver
- * queue */
- list_add_tail(&vb->queue, &vout->dma_queue);
-
- vb->state = VIDEOBUF_QUEUED;
-}
-
-/*
- * Buffer release function is called from videobuf layer to release buffer
- * which are already allocated
- */
-static void omap_vout_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- vb->state = VIDEOBUF_NEEDS_INIT;
- if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
- struct frame_vector *vec = vb->priv;
-
- put_vaddr_frames(vec);
- frame_vector_destroy(vec);
- }
-}
-
-/*
- * File operations
- */
-static __poll_t omap_vout_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct omap_vout_device *vout = file->private_data;
- struct videobuf_queue *q = &vout->vbq;
-
- return videobuf_poll_stream(file, q, wait);
-}
-
-static void omap_vout_vm_open(struct vm_area_struct *vma)
-{
- struct omap_vout_device *vout = vma->vm_private_data;
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
- "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
- vout->mmap_count++;
-}
-
-static void omap_vout_vm_close(struct vm_area_struct *vma)
-{
- struct omap_vout_device *vout = vma->vm_private_data;
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
- "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
- vout->mmap_count--;
-}
-
-static const struct vm_operations_struct omap_vout_vm_ops = {
- .open = omap_vout_vm_open,
- .close = omap_vout_vm_close,
-};
-
-static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int i;
- void *pos;
- unsigned long start = vma->vm_start;
- unsigned long size = (vma->vm_end - vma->vm_start);
- struct omap_vout_device *vout = file->private_data;
- struct videobuf_queue *q = &vout->vbq;
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
- " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
- vma->vm_pgoff, vma->vm_start, vma->vm_end);
-
- /* look for the buffer to map */
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (NULL == q->bufs[i])
- continue;
- if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
- continue;
- if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
- break;
- }
-
- if (VIDEO_MAX_FRAME == i) {
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
- "offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- return -EINVAL;
- }
- /* Check the size of the buffer */
- if (size > vout->buffer_size) {
- v4l2_err(&vout->vid_dev->v4l2_dev,
- "insufficient memory [%lu] [%u]\n",
- size, vout->buffer_size);
- return -ENOMEM;
- }
-
- q->bufs[i]->baddr = vma->vm_start;
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_ops = &omap_vout_vm_ops;
- vma->vm_private_data = (void *) vout;
- pos = (void *)vout->buf_virt_addr[i];
- vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
- while (size > 0) {
- unsigned long pfn;
- pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
- if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
- return -EAGAIN;
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- vout->mmap_count++;
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
-
- return 0;
-}
-
-static int omap_vout_release(struct file *file)
-{
- unsigned int ret, i;
- struct videobuf_queue *q;
- struct omapvideo_info *ovid;
- struct omap_vout_device *vout = file->private_data;
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
- ovid = &vout->vid_info;
-
- if (!vout)
- return 0;
-
- q = &vout->vbq;
- /* Disable all the overlay managers connected with this interface */
- for (i = 0; i < ovid->num_overlays; i++) {
- struct omap_overlay *ovl = ovid->overlays[i];
- struct omap_dss_device *dssdev = ovl->get_device(ovl);
-
- if (dssdev)
- ovl->disable(ovl);
- }
- /* Turn off the pipeline */
- ret = omapvid_apply_changes(vout);
- if (ret)
- v4l2_warn(&vout->vid_dev->v4l2_dev,
- "Unable to apply changes\n");
-
- /* Free all buffers */
- omap_vout_free_extra_buffers(vout);
-
- /* Free the VRFB buffers only if they are allocated
- * during reqbufs. Don't free if init time allocated
- */
- if (ovid->rotation_type == VOUT_ROT_VRFB) {
- if (!vout->vrfb_static_allocation)
- omap_vout_free_vrfb_buffers(vout);
- }
- videobuf_mmap_free(q);
-
- /* Even if apply changes fails we should continue
- freeing allocated memory */
- if (vout->streaming) {
- u32 mask = 0;
-
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
- DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
- omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
- vout->streaming = false;
-
- videobuf_streamoff(q);
- videobuf_queue_cancel(q);
- }
-
- if (vout->mmap_count != 0)
- vout->mmap_count = 0;
-
- vout->opened -= 1;
- file->private_data = NULL;
-
- if (vout->buffer_allocated)
- videobuf_mmap_free(q);
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
- return ret;
-}
-
-static int omap_vout_open(struct file *file)
-{
- struct videobuf_queue *q;
- struct omap_vout_device *vout = NULL;
-
- vout = video_drvdata(file);
-
- if (vout == NULL)
- return -ENODEV;
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
-
- /* for now, we only support single open */
- if (vout->opened)
- return -EBUSY;
-
- vout->opened += 1;
-
- file->private_data = vout;
- vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-
- q = &vout->vbq;
- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
- video_vbq_ops.buf_release = omap_vout_buffer_release;
- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
- spin_lock_init(&vout->vbq_lock);
-
- videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
- &vout->vbq_lock, vout->type, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), vout, NULL);
-
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
- return 0;
-}
/*
* V4L2 ioctls
@@ -1039,15 +570,12 @@ static int omap_vout_open(struct file *file)
static int vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
strscpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strscpy(cap->card, vout->vfd->name, sizeof(cap->card));
- cap->bus_info[0] = '\0';
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s.%d", VOUT_NAME, vout->vid);
return 0;
}
@@ -1060,8 +588,6 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
return -EINVAL;
fmt->flags = omap_formats[index].flags;
- strscpy(fmt->description, omap_formats[index].description,
- sizeof(fmt->description));
fmt->pixelformat = omap_formats[index].pixelformat;
return 0;
@@ -1070,7 +596,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
f->fmt.pix = vout->pix;
return 0;
@@ -1083,7 +609,7 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_video_timings *timing;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omap_dss_device *dssdev;
ovid = &vout->vid_info;
@@ -1110,14 +636,12 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct omap_video_timings *timing;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omap_dss_device *dssdev;
- if (vout->streaming)
+ if (vb2_is_busy(&vout->vq))
return -EBUSY;
- mutex_lock(&vout->lock);
-
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
dssdev = ovl->get_device(ovl);
@@ -1168,7 +692,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
ret = 0;
s_fmt_vid_out_exit:
- mutex_unlock(&vout->lock);
return ret;
}
@@ -1176,7 +699,7 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
int ret = 0;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
struct v4l2_window *win = &f->fmt.win;
@@ -1186,12 +709,8 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
ret = omap_vout_try_window(&vout->fbuf, win);
- if (!ret) {
- if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
- win->global_alpha = 255;
- else
- win->global_alpha = f->fmt.win.global_alpha;
- }
+ if (!ret && !(ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA))
+ win->global_alpha = 0;
return ret;
}
@@ -1202,35 +721,53 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
int ret = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct v4l2_window *win = &f->fmt.win;
- mutex_lock(&vout->lock);
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
if (!ret) {
+ enum omap_dss_trans_key_type key_type =
+ OMAP_DSS_COLOR_KEY_GFX_DST;
+ int enable;
+
/* Video1 plane does not support global alpha on OMAP3 */
- if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
- vout->win.global_alpha = 255;
+ if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA)
+ vout->win.global_alpha = win->global_alpha;
+ else
+ win->global_alpha = 0;
+ if (vout->fbuf.flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
else
- vout->win.global_alpha = f->fmt.win.global_alpha;
+ enable = 0;
+ if (vout->fbuf.flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ struct omap_overlay_manager_info info;
- vout->win.chromakey = f->fmt.win.chromakey;
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
}
- mutex_unlock(&vout->lock);
return ret;
}
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_format *f)
{
- u32 key_value = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
- struct omap_vout_device *vout = fh;
- struct omap_overlay_manager_info info;
+ struct omap_vout_device *vout = video_drvdata(file);
struct v4l2_window *win = &f->fmt.win;
ovid = &vout->vid_info;
@@ -1238,19 +775,20 @@ static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
win->w = vout->win.w;
win->field = vout->win.field;
- win->global_alpha = vout->win.global_alpha;
-
- if (ovl->manager && ovl->manager->get_manager_info) {
- ovl->manager->get_manager_info(ovl->manager, &info);
- key_value = info.trans_key;
- }
- win->chromakey = key_value;
+ win->chromakey = vout->win.chromakey;
+ if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA)
+ win->global_alpha = vout->win.global_alpha;
+ else
+ win->global_alpha = 0;
+ win->clips = NULL;
+ win->clipcount = 0;
+ win->bitmap = NULL;
return 0;
}
static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct v4l2_pix_format *pix = &vout->pix;
if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1277,7 +815,7 @@ static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection
static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
int ret = -EINVAL;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omapvideo_info *ovid;
struct omap_overlay *ovl;
struct omap_video_timings *timing;
@@ -1289,10 +827,9 @@ static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (vout->streaming)
+ if (vb2_is_busy(&vout->vq))
return -EBUSY;
- mutex_lock(&vout->lock);
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
/* get the display device attached to the overlay */
@@ -1317,7 +854,6 @@ static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection
&vout->fbuf, &sel->r);
s_crop_err:
- mutex_unlock(&vout->lock);
return ret;
}
@@ -1334,26 +870,21 @@ static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
ovid = &vout->vid_info;
- mutex_lock(&vout->lock);
if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
- mutex_unlock(&vout->lock);
ret = -ERANGE;
break;
}
if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
- mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
vout->mirror)) {
- mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
- mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_BG_COLOR:
@@ -1364,9 +895,7 @@ static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
ovl = vout->vid_info.overlays[0];
- mutex_lock(&vout->lock);
if (!ovl->manager || !ovl->manager->get_manager_info) {
- mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
@@ -1374,11 +903,9 @@ static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
ovl->manager->get_manager_info(ovl->manager, &info);
info.default_color = color;
if (ovl->manager->set_manager_info(ovl->manager, &info)) {
- mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
- mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_VFLIP:
@@ -1388,20 +915,16 @@ static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
ovid = &vout->vid_info;
- mutex_lock(&vout->lock);
if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
- mutex_unlock(&vout->lock);
ret = -ERANGE;
break;
}
if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
- mutex_unlock(&vout->lock);
ret = -EINVAL;
break;
}
vout->mirror = mirror;
- mutex_unlock(&vout->lock);
break;
}
default:
@@ -1414,185 +937,94 @@ static const struct v4l2_ctrl_ops omap_vout_ctrl_ops = {
.s_ctrl = omap_vout_s_ctrl,
};
-static int vidioc_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *req)
+static int omap_vout_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
- int ret = 0;
- unsigned int i, num_buffers = 0;
- struct omap_vout_device *vout = fh;
- struct videobuf_queue *q = &vout->vbq;
-
- if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return -EINVAL;
- /* if memory is not mmp or userptr
- return error */
- if ((V4L2_MEMORY_MMAP != req->memory) &&
- (V4L2_MEMORY_USERPTR != req->memory))
- return -EINVAL;
-
- mutex_lock(&vout->lock);
- /* Cannot be requested when streaming is on */
- if (vout->streaming) {
- ret = -EBUSY;
- goto reqbuf_err;
- }
+ struct omap_vout_device *vout = vb2_get_drv_priv(vq);
+ int size = vout->pix.sizeimage;
- /* If buffers are already allocated free them */
- if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
- if (vout->mmap_count) {
- ret = -EBUSY;
- goto reqbuf_err;
- }
- num_buffers = (vout->vid == OMAP_VIDEO1) ?
- video1_numbuffers : video2_numbuffers;
- for (i = num_buffers; i < vout->buffer_allocated; i++) {
- omap_vout_free_buffer(vout->buf_virt_addr[i],
- vout->buffer_size);
- vout->buf_virt_addr[i] = 0;
- vout->buf_phy_addr[i] = 0;
- }
- vout->buffer_allocated = num_buffers;
- videobuf_mmap_free(q);
- } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
- if (vout->buffer_allocated) {
- videobuf_mmap_free(q);
- for (i = 0; i < vout->buffer_allocated; i++) {
- kfree(q->bufs[i]);
- q->bufs[i] = NULL;
- }
- vout->buffer_allocated = 0;
- }
+ if (is_rotation_enabled(vout) && vq->num_buffers + *nbufs > VRFB_NUM_BUFS) {
+ *nbufs = VRFB_NUM_BUFS - vq->num_buffers;
+ if (*nbufs == 0)
+ return -EINVAL;
}
- /*store the memory type in data structure */
- vout->memory = req->memory;
-
- INIT_LIST_HEAD(&vout->dma_queue);
-
- /* call videobuf_reqbufs api */
- ret = videobuf_reqbufs(q, req);
- if (ret < 0)
- goto reqbuf_err;
-
- vout->buffer_allocated = req->count;
-
-reqbuf_err:
- mutex_unlock(&vout->lock);
- return ret;
-}
-
-static int vidioc_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *b)
-{
- struct omap_vout_device *vout = fh;
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
- return videobuf_querybuf(&vout->vbq, b);
+ *num_planes = 1;
+ sizes[0] = size;
+ return 0;
}
-static int vidioc_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buffer)
+static int omap_vout_vb2_prepare(struct vb2_buffer *vb)
{
- struct omap_vout_device *vout = fh;
- struct videobuf_queue *q = &vout->vbq;
+ struct omap_vout_device *vout = vb2_get_drv_priv(vb->vb2_queue);
+ struct omapvideo_info *ovid = &vout->vid_info;
+ struct omap_vout_buffer *voutbuf = vb2_to_omap_vout_buffer(vb);
+ dma_addr_t buf_phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
- (buffer->index >= vout->buffer_allocated) ||
- (q->bufs[buffer->index]->memory != buffer->memory)) {
+ if (vb2_plane_size(vb, 0) < vout->pix.sizeimage) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), vout->pix.sizeimage);
return -EINVAL;
}
- if (V4L2_MEMORY_USERPTR == buffer->memory) {
- if ((buffer->length < vout->pix.sizeimage) ||
- (0 == buffer->m.userptr)) {
- return -EINVAL;
- }
- }
- if ((is_rotation_enabled(vout)) &&
- vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
- v4l2_warn(&vout->vid_dev->v4l2_dev,
- "DMA Channel not allocated for Rotation\n");
- return -EINVAL;
- }
+ vb2_set_plane_payload(vb, 0, vout->pix.sizeimage);
+ voutbuf->vbuf.field = V4L2_FIELD_NONE;
- return videobuf_qbuf(q, buffer);
+ vout->queued_buf_addr[vb->index] = (u8 *)buf_phy_addr;
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ return omap_vout_prepare_vrfb(vout, vb);
+ return 0;
}
-static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+static void omap_vout_vb2_queue(struct vb2_buffer *vb)
{
- struct omap_vout_device *vout = fh;
- struct videobuf_queue *q = &vout->vbq;
+ struct omap_vout_device *vout = vb2_get_drv_priv(vb->vb2_queue);
+ struct omap_vout_buffer *voutbuf = vb2_to_omap_vout_buffer(vb);
- int ret;
- u32 addr;
- unsigned long size;
- struct videobuf_buffer *vb;
-
- if (!vout->streaming)
- return -EINVAL;
-
- ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
- if (ret)
- return ret;
-
- vb = q->bufs[b->index];
-
- addr = (unsigned long) vout->buf_phy_addr[vb->i];
- size = (unsigned long) vb->size;
- dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
- size, DMA_TO_DEVICE);
- return 0;
+ list_add_tail(&voutbuf->queue, &vout->dma_queue);
}
-static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- int ret = 0, j;
- u32 addr = 0, mask = 0;
- struct omap_vout_device *vout = fh;
- struct videobuf_queue *q = &vout->vbq;
+ struct omap_vout_device *vout = vb2_get_drv_priv(vq);
struct omapvideo_info *ovid = &vout->vid_info;
-
- mutex_lock(&vout->lock);
-
- if (vout->streaming) {
- ret = -EBUSY;
- goto streamon_err;
- }
-
- ret = videobuf_streamon(q);
- if (ret)
- goto streamon_err;
-
- if (list_empty(&vout->dma_queue)) {
- ret = -EIO;
- goto streamon_err1;
- }
+ struct omap_vout_buffer *buf, *tmp;
+ u32 addr = 0, mask = 0;
+ int ret, j;
/* Get the next frame from the buffer queue */
vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
- struct videobuf_buffer, queue);
+ struct omap_vout_buffer, queue);
/* Remove buffer from the buffer queue */
list_del(&vout->cur_frm->queue);
- /* Mark state of the current frame to active */
- vout->cur_frm->state = VIDEOBUF_ACTIVE;
/* Initialize field_id and started member */
vout->field_id = 0;
-
- /* set flag here. Next QBUF will start DMA */
- vout->streaming = true;
-
vout->first_int = 1;
+ vout->sequence = 0;
if (omap_vout_calculate_offset(vout)) {
ret = -EINVAL;
- goto streamon_err1;
+ goto out;
}
- addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ if (omap_vout_vrfb_buffer_setup(vout, &count, 0)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ addr = (unsigned long)vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
| DISPC_IRQ_VSYNC2;
- /* First save the configuration in ovelray structure */
+ /* First save the configuration in overlay structure */
ret = omapvid_init(vout, addr);
if (ret) {
v4l2_err(&vout->vid_dev->v4l2_dev,
@@ -1617,28 +1049,43 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
goto streamon_err1;
}
}
-
- ret = 0;
+ return 0;
streamon_err1:
- if (ret)
- ret = videobuf_streamoff(q);
-streamon_err:
- mutex_unlock(&vout->lock);
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+ struct omap_dss_device *dssdev = ovl->get_device(ovl);
+
+ if (dssdev)
+ ovl->disable(ovl);
+ }
+ /* Turn of the pipeline */
+ if (omapvid_apply_changes(vout))
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to change mode in streamoff\n");
+
+out:
+ vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
return ret;
}
-static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+static void omap_vout_vb2_stop_streaming(struct vb2_queue *vq)
{
- u32 mask = 0;
- int ret = 0, j;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = vb2_get_drv_priv(vq);
struct omapvideo_info *ovid = &vout->vid_info;
+ struct omap_vout_buffer *buf, *tmp;
+ u32 mask = 0;
+ int j;
- if (!vout->streaming)
- return -EINVAL;
-
- vout->streaming = false;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
| DISPC_IRQ_VSYNC2;
@@ -1651,17 +1098,18 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
if (dssdev)
ovl->disable(ovl);
}
-
/* Turn of the pipeline */
- ret = omapvid_apply_changes(vout);
- if (ret)
+ if (omapvid_apply_changes(vout))
v4l2_err(&vout->vid_dev->v4l2_dev,
"failed to change mode in streamoff\n");
- INIT_LIST_HEAD(&vout->dma_queue);
- ret = videobuf_streamoff(&vout->vbq);
-
- return ret;
+ if (vout->next_frm != vout->cur_frm)
+ vb2_buffer_done(&vout->next_frm->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
}
static int vidioc_s_fbuf(struct file *file, void *fh,
@@ -1670,7 +1118,7 @@ static int vidioc_s_fbuf(struct file *file, void *fh,
int enable = 0;
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omap_overlay_manager_info info;
enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
@@ -1741,17 +1189,36 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
{
struct omap_overlay *ovl;
struct omapvideo_info *ovid;
- struct omap_vout_device *vout = fh;
+ struct omap_vout_device *vout = video_drvdata(file);
struct omap_overlay_manager_info info;
+ struct omap_video_timings *timing;
+ struct omap_dss_device *dssdev;
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ dssdev = ovl->get_device(ovl);
+
+ if (!dssdev)
+ return -EINVAL;
+
+ timing = &dssdev->panel.timings;
- /* The video overlay must stay within the framebuffer and can't be
- positioned independently. */
- a->flags = V4L2_FBUF_FLAG_OVERLAY;
- a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
- | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ a->fmt.field = V4L2_FIELD_NONE;
+ a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ a->fmt.pixelformat = V4L2_PIX_FMT_RGBA32;
+ a->fmt.height = vout->fbuf.fmt.height;
+ a->fmt.width = vout->fbuf.fmt.width;
+ a->fmt.bytesperline = vout->fbuf.fmt.width * 4;
+ a->fmt.sizeimage = a->fmt.height * a->fmt.bytesperline;
+ a->base = vout->fbuf.base;
+
+ a->flags = vout->fbuf.flags;
+ a->capability = vout->fbuf.capability;
+ a->flags &= ~(V4L2_FBUF_FLAG_SRC_CHROMAKEY | V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_LOCAL_ALPHA);
if (ovl->manager && ovl->manager->get_manager_info) {
ovl->manager->get_manager_info(ovl->manager, &info);
@@ -1759,9 +1226,6 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
- }
- if (ovl->manager && ovl->manager->get_manager_info) {
- ovl->manager->get_manager_info(ovl->manager, &info);
if (info.partial_alpha_enabled)
a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
}
@@ -1769,6 +1233,27 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
return 0;
}
+static int vidioc_enum_output(struct file *file, void *priv_fh,
+ struct v4l2_output *out)
+{
+ if (out->index)
+ return -EINVAL;
+ snprintf(out->name, sizeof(out->name), "Overlay");
+ out->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY;
+ return 0;
+}
+
+static int vidioc_g_output(struct file *file, void *priv_fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_output(struct file *file, void *priv_fh, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
@@ -1782,21 +1267,38 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_enum_output = vidioc_enum_output,
+ .vidioc_g_output = vidioc_g_output,
+ .vidioc_s_output = vidioc_s_output,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct v4l2_file_operations omap_vout_fops = {
.owner = THIS_MODULE,
- .poll = omap_vout_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = omap_vout_mmap,
- .open = omap_vout_open,
- .release = omap_vout_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+};
+
+static const struct vb2_ops omap_vout_vb2_ops = {
+ .queue_setup = omap_vout_vb2_queue_setup,
+ .buf_queue = omap_vout_vb2_queue,
+ .buf_prepare = omap_vout_vb2_prepare,
+ .start_streaming = omap_vout_vb2_start_streaming,
+ .stop_streaming = omap_vout_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* Init functions used during driver initialization */
@@ -1808,6 +1310,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
struct omap_overlay *ovl = vout->vid_info.overlays[0];
struct omap_dss_device *display = ovl->get_device(ovl);
struct v4l2_ctrl_handler *hdl;
+ struct vb2_queue *vq;
+ int ret;
/* set the default pix */
pix = &vout->pix;
@@ -1818,37 +1322,48 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
/* Default pixel format is RGB 5-6-5 */
pix->pixelformat = V4L2_PIX_FMT_RGB565;
- pix->field = V4L2_FIELD_ANY;
+ pix->field = V4L2_FIELD_NONE;
pix->bytesperline = pix->width * 2;
pix->sizeimage = pix->bytesperline * pix->height;
- pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
vout->bpp = RGB565_BPP;
vout->fbuf.fmt.width = display->panel.timings.x_res;
vout->fbuf.fmt.height = display->panel.timings.y_res;
+ vout->cropped_offset = 0;
/* Set the data structures for the overlay parameters*/
- vout->win.global_alpha = 255;
- vout->fbuf.flags = 0;
+ vout->fbuf.flags = V4L2_FBUF_FLAG_OVERLAY;
vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
- V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
- vout->win.chromakey = 0;
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY |
+ V4L2_FBUF_CAP_EXTERNOVERLAY;
+ if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) {
+ vout->win.global_alpha = 255;
+ vout->fbuf.capability |= V4L2_FBUF_CAP_GLOBAL_ALPHA;
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA;
+ } else {
+ vout->win.global_alpha = 0;
+ }
+ vout->win.field = V4L2_FIELD_NONE;
omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
hdl = &vout->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 3);
- v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
- V4L2_CID_ROTATE, 0, 270, 90, 0);
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) {
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
V4L2_CID_BG_COLOR, 0, 0xffffff, 1, 0);
- v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
if (hdl->error)
return hdl->error;
vout->rotation = 0;
vout->mirror = false;
+ INIT_LIST_HEAD(&vout->dma_queue);
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
vout->vrfb_bpp = 2;
@@ -1870,63 +1385,54 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vfd->fops = &omap_vout_fops;
vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_TX;
- mutex_init(&vout->lock);
-
vfd->minor = -1;
- return 0;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ mutex_init(&vout->lock);
+ vq = &vout->vq;
+ vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = vout;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->buf_struct_size = sizeof(struct omap_vout_buffer);
+ vq->dev = vfd->v4l2_dev->dev;
+
+ vq->ops = &omap_vout_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->lock = &vout->lock;
+ vq->min_buffers_needed = 1;
+ vfd->queue = vq;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ v4l2_ctrl_handler_free(hdl);
+ video_device_release(vfd);
+ }
+ return ret;
}
/* Setup video buffers */
static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
int vid_num)
{
- u32 numbuffers;
- int ret = 0, i;
struct omapvideo_info *ovid;
struct omap_vout_device *vout;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev =
container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+ int ret = 0;
vout = vid_dev->vouts[vid_num];
ovid = &vout->vid_info;
- numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
- vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
- dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
-
- for (i = 0; i < numbuffers; i++) {
- vout->buf_virt_addr[i] =
- omap_vout_alloc_buffer(vout->buffer_size,
- (u32 *) &vout->buf_phy_addr[i]);
- if (!vout->buf_virt_addr[i]) {
- numbuffers = i;
- ret = -ENOMEM;
- goto free_buffers;
- }
- }
-
- vout->cropped_offset = 0;
-
if (ovid->rotation_type == VOUT_ROT_VRFB) {
bool static_vrfb_allocation = (vid_num == 0) ?
vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
static_vrfb_allocation);
}
-
- return ret;
-
-free_buffers:
- for (i = 0; i < numbuffers; i++) {
- omap_vout_free_buffer(vout->buf_virt_addr[i],
- vout->buffer_size);
- vout->buf_virt_addr[i] = 0;
- vout->buf_phy_addr[i] = 0;
- }
return ret;
-
}
/* Create video out devices */
@@ -1938,6 +1444,10 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev = container_of(v4l2_dev,
struct omap2video_device, v4l2_dev);
+ struct omap_overlay *ovl = vid_dev->overlays[0];
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
for (k = 0; k < pdev->num_resources; k++) {
@@ -1958,6 +1468,15 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
vout->vid_info.num_overlays = 1;
vout->vid_info.id = k + 1;
+ spin_lock_init(&vout->vbq_lock);
+ /*
+ * Set the framebuffer base, this allows applications to find
+ * the fb corresponding to this overlay.
+ *
+ * To be precise: fbuf.base should match smem_start of
+ * struct fb_fix_screeninfo.
+ */
+ vout->fbuf.base = (void *)info.paddr;
/* Set VRFB as rotation_type for omap2 and omap3 */
if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
@@ -2000,7 +1519,6 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
error2:
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
omap_vout_release_vrfb(vout);
- omap_vout_free_buffers(vout);
error1:
video_device_release(vfd);
error:
@@ -2045,7 +1563,6 @@ static void omap_vout_cleanup_device(struct omap_vout_device *vout)
if (vout->vrfb_static_allocation)
omap_vout_free_vrfb_buffers(vout);
}
- omap_vout_free_buffers(vout);
kfree(vout);
}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..6bd672cbdb62 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -14,7 +14,6 @@
#include <linux/videodev2.h>
#include <linux/slab.h>
-#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
#include <video/omapvrfb.h>
@@ -40,7 +39,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
&vout->smsshado_phy_addr[i]);
}
if (!vout->smsshado_virt_addr[i] && startindex != -1) {
- if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ if (vout->vq.memory == V4L2_MEMORY_MMAP && i >= startindex)
break;
}
if (!vout->smsshado_virt_addr[i]) {
@@ -109,8 +108,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
dev_info(&pdev->dev, ": VRFB allocation failed\n");
for (j = 0; j < i; j++)
omap_vrfb_release_ctx(&vout->vrfb_context[j]);
- ret = -ENOMEM;
- goto free_buffers;
+ return -ENOMEM;
}
}
@@ -155,8 +153,10 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
- /* statically allocated the VRFB buffer is done through
- commands line aruments */
+ /*
+ * statically allocated the VRFB buffer is done through
+ * command line arguments
+ */
if (static_vrfb_allocation) {
if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
ret = -ENOMEM;
@@ -169,9 +169,6 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
release_vrfb_ctx:
for (j = 0; j < VRFB_NUM_BUFS; j++)
omap_vrfb_release_ctx(&vout->vrfb_context[j]);
-free_buffers:
- omap_vout_free_buffers(vout);
-
return ret;
}
@@ -231,13 +228,14 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
}
int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
- struct videobuf_buffer *vb)
+ struct vb2_buffer *vb)
{
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_chan *chan = vout->vrfb_dma_tx.chan;
struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt;
dma_cookie_t cookie;
+ dma_addr_t buf_phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
enum dma_status status;
enum dss_rotation rotation;
size_t dst_icg;
@@ -253,11 +251,10 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
*/
pixsize = vout->bpp * vout->vrfb_bpp;
- dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
+ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
- xt->src_start = vout->buf_phy_addr[vb->i];
- xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+ xt->src_start = buf_phy_addr;
+ xt->dst_start = vout->vrfb_context[vb->index].paddr[0];
xt->numf = vout->pix.height;
xt->frame_size = 1;
@@ -308,8 +305,8 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
rotation = calc_rotation(vout);
- vout->queued_buf_addr[vb->i] = (u8 *)
- vout->vrfb_context[vb->i].paddr[rotation];
+ vout->queued_buf_addr[vb->index] = (u8 *)
+ vout->vrfb_context[vb->index].paddr[rotation];
return 0;
}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
index c976975024df..40bc9e54ecc6 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.h
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -20,7 +20,7 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout);
int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
unsigned int *count, unsigned int startindex);
int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
- struct videobuf_buffer *vb);
+ struct vb2_buffer *vb);
void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
#else
static inline void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { };
@@ -32,7 +32,7 @@ static inline int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
unsigned int *count, unsigned int startindex)
{ return 0; };
static inline int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
- struct videobuf_buffer *vb)
+ struct vb2_buffer *vb)
{ return 0; };
static inline void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { };
#endif
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index c740393c8509..1cff6dea1879 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -11,6 +11,7 @@
#ifndef OMAP_VOUTDEF_H
#define OMAP_VOUTDEF_H
+#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-ctrls.h>
#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
@@ -113,6 +114,20 @@ struct omap2video_device {
struct omap_overlay_manager *managers[MAX_MANAGERS];
};
+/* buffer for one video frame */
+struct omap_vout_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head queue;
+};
+
+static inline struct omap_vout_buffer *vb2_to_omap_vout_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct omap_vout_buffer, vbuf);
+}
+
/* per-device data structure */
struct omap_vout_device {
@@ -121,29 +136,12 @@ struct omap_vout_device {
struct omap2video_device *vid_dev;
struct v4l2_ctrl_handler ctrl_handler;
int vid;
- int opened;
- /* we don't allow to change image fmt/size once buffer has
- * been allocated
- */
- int buffer_allocated;
/* allow to reuse previously allocated buffer which is big enough */
int buffer_size;
- /* keep buffer info across opens */
- unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
- unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
enum omap_color_mode dss_mode;
- /* we don't allow to request new buffer when old buffers are
- * still mmapped
- */
- int mmap_count;
-
- spinlock_t vbq_lock; /* spinlock for videobuf queues */
- unsigned long field_count; /* field counter for videobuf_buffer */
-
- /* non-NULL means streaming is in progress. */
- bool streaming;
+ u32 sequence;
struct v4l2_pix_format pix;
struct v4l2_rect crop;
@@ -169,19 +167,14 @@ struct omap_vout_device {
unsigned char pos;
int ps, vr_ps, line_length, first_int, field_id;
- enum v4l2_memory memory;
- struct videobuf_buffer *cur_frm, *next_frm;
+ struct omap_vout_buffer *cur_frm, *next_frm;
+ spinlock_t vbq_lock; /* spinlock for dma_queue */
struct list_head dma_queue;
u8 *queued_buf_addr[VIDEO_MAX_FRAME];
u32 cropped_offset;
s32 tv_field1_offset;
void *isr_handle;
-
- /* Buffer queue variables */
- struct omap_vout_device *vout;
- enum v4l2_buf_type type;
- struct videobuf_queue vbq;
- int io_allowed;
+ struct vb2_queue vq;
};
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 58a25fdf0cce..480a7e95533d 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -95,7 +95,11 @@ int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
/* We now have a valid preview window, so go with it */
new_win->w = try_win;
- new_win->field = V4L2_FIELD_ANY;
+ new_win->field = V4L2_FIELD_NONE;
+ new_win->clips = NULL;
+ new_win->clipcount = 0;
+ new_win->bitmap = NULL;
+
return 0;
}
EXPORT_SYMBOL_GPL(omap_vout_try_window);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 83216fc7156b..327c5716922a 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -719,6 +719,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
s_stream, mode);
pipe->do_propagation = true;
}
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
}
return 0;
@@ -833,6 +837,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
&subdev->entity);
failure = -ETIMEDOUT;
}
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
}
return failure;
@@ -2014,136 +2022,6 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_fwnode_parse(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
-{
- struct isp_async_subdev *isd =
- container_of(asd, struct isp_async_subdev, asd);
- struct isp_bus_cfg *buscfg = &isd->bus;
- bool csi1 = false;
- unsigned int i;
-
- dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
- to_of_node(vep->base.local_fwnode), vep->base.port);
-
- switch (vep->base.port) {
- case ISP_OF_PHY_PARALLEL:
- buscfg->interface = ISP_INTERFACE_PARALLEL;
- buscfg->bus.parallel.data_lane_shift =
- vep->bus.parallel.data_shift;
- buscfg->bus.parallel.clk_pol =
- !!(vep->bus.parallel.flags
- & V4L2_MBUS_PCLK_SAMPLE_FALLING);
- buscfg->bus.parallel.hs_pol =
- !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
- buscfg->bus.parallel.vs_pol =
- !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
- buscfg->bus.parallel.fld_pol =
- !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
- buscfg->bus.parallel.data_pol =
- !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
- buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
- break;
-
- case ISP_OF_PHY_CSIPHY1:
- case ISP_OF_PHY_CSIPHY2:
- switch (vep->bus_type) {
- case V4L2_MBUS_CCP2:
- case V4L2_MBUS_CSI1:
- dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
- csi1 = true;
- break;
- case V4L2_MBUS_CSI2_DPHY:
- dev_dbg(dev, "CSI-2 configuration\n");
- csi1 = false;
- break;
- default:
- dev_err(dev, "unsupported bus type %u\n",
- vep->bus_type);
- return -EINVAL;
- }
-
- switch (vep->base.port) {
- case ISP_OF_PHY_CSIPHY1:
- if (csi1)
- buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
- else
- buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
- break;
- case ISP_OF_PHY_CSIPHY2:
- if (csi1)
- buscfg->interface = ISP_INTERFACE_CCP2B_PHY2;
- else
- buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
- break;
- }
- if (csi1) {
- buscfg->bus.ccp2.lanecfg.clk.pos =
- vep->bus.mipi_csi1.clock_lane;
- buscfg->bus.ccp2.lanecfg.clk.pol =
- vep->bus.mipi_csi1.lane_polarity[0];
- dev_dbg(dev, "clock lane polarity %u, pos %u\n",
- buscfg->bus.ccp2.lanecfg.clk.pol,
- buscfg->bus.ccp2.lanecfg.clk.pos);
-
- buscfg->bus.ccp2.lanecfg.data[0].pos =
- vep->bus.mipi_csi1.data_lane;
- buscfg->bus.ccp2.lanecfg.data[0].pol =
- vep->bus.mipi_csi1.lane_polarity[1];
-
- dev_dbg(dev, "data lane polarity %u, pos %u\n",
- buscfg->bus.ccp2.lanecfg.data[0].pol,
- buscfg->bus.ccp2.lanecfg.data[0].pos);
-
- buscfg->bus.ccp2.strobe_clk_pol =
- vep->bus.mipi_csi1.clock_inv;
- buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
- buscfg->bus.ccp2.ccp2_mode =
- vep->bus_type == V4L2_MBUS_CCP2;
- buscfg->bus.ccp2.vp_clk_pol = 1;
-
- buscfg->bus.ccp2.crc = 1;
- } else {
- buscfg->bus.csi2.lanecfg.clk.pos =
- vep->bus.mipi_csi2.clock_lane;
- buscfg->bus.csi2.lanecfg.clk.pol =
- vep->bus.mipi_csi2.lane_polarities[0];
- dev_dbg(dev, "clock lane polarity %u, pos %u\n",
- buscfg->bus.csi2.lanecfg.clk.pol,
- buscfg->bus.csi2.lanecfg.clk.pos);
-
- buscfg->bus.csi2.num_data_lanes =
- vep->bus.mipi_csi2.num_data_lanes;
-
- for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
- buscfg->bus.csi2.lanecfg.data[i].pos =
- vep->bus.mipi_csi2.data_lanes[i];
- buscfg->bus.csi2.lanecfg.data[i].pol =
- vep->bus.mipi_csi2.lane_polarities[i + 1];
- dev_dbg(dev,
- "data lane %u polarity %u, pos %u\n", i,
- buscfg->bus.csi2.lanecfg.data[i].pol,
- buscfg->bus.csi2.lanecfg.data[i].pos);
- }
- /*
- * FIXME: now we assume the CRC is always there.
- * Implement a way to obtain this information from the
- * sensor. Frame descriptors, perhaps?
- */
- buscfg->bus.csi2.crc = 1;
- }
- break;
-
- default:
- dev_warn(dev, "%pOF: invalid interface %u\n",
- to_of_node(vep->base.local_fwnode), vep->base.port);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct isp_device *isp = container_of(async, struct isp_device,
@@ -2173,6 +2051,201 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&isp->media_dev);
}
+static void isp_parse_of_parallel_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct isp_bus_cfg *buscfg)
+{
+ buscfg->interface = ISP_INTERFACE_PARALLEL;
+ buscfg->bus.parallel.data_lane_shift = vep->bus.parallel.data_shift;
+ buscfg->bus.parallel.clk_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ buscfg->bus.parallel.hs_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.vs_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ buscfg->bus.parallel.fld_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ buscfg->bus.parallel.data_pol =
+ !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
+}
+
+static void isp_parse_of_csi2_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct isp_bus_cfg *buscfg)
+{
+ unsigned int i;
+
+ buscfg->bus.csi2.lanecfg.clk.pos = vep->bus.mipi_csi2.clock_lane;
+ buscfg->bus.csi2.lanecfg.clk.pol =
+ vep->bus.mipi_csi2.lane_polarities[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.csi2.lanecfg.clk.pol,
+ buscfg->bus.csi2.lanecfg.clk.pos);
+
+ buscfg->bus.csi2.num_data_lanes = vep->bus.mipi_csi2.num_data_lanes;
+
+ for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
+ buscfg->bus.csi2.lanecfg.data[i].pos =
+ vep->bus.mipi_csi2.data_lanes[i];
+ buscfg->bus.csi2.lanecfg.data[i].pol =
+ vep->bus.mipi_csi2.lane_polarities[i + 1];
+ dev_dbg(dev,
+ "data lane %u polarity %u, pos %u\n", i,
+ buscfg->bus.csi2.lanecfg.data[i].pol,
+ buscfg->bus.csi2.lanecfg.data[i].pos);
+ }
+ /*
+ * FIXME: now we assume the CRC is always there. Implement a way to
+ * obtain this information from the sensor. Frame descriptors, perhaps?
+ */
+ buscfg->bus.csi2.crc = 1;
+}
+
+static void isp_parse_of_csi1_endpoint(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct isp_bus_cfg *buscfg)
+{
+ buscfg->bus.ccp2.lanecfg.clk.pos = vep->bus.mipi_csi1.clock_lane;
+ buscfg->bus.ccp2.lanecfg.clk.pol = vep->bus.mipi_csi1.lane_polarity[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.clk.pol,
+ buscfg->bus.ccp2.lanecfg.clk.pos);
+
+ buscfg->bus.ccp2.lanecfg.data[0].pos = vep->bus.mipi_csi1.data_lane;
+ buscfg->bus.ccp2.lanecfg.data[0].pol =
+ vep->bus.mipi_csi1.lane_polarity[1];
+
+ dev_dbg(dev, "data lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.data[0].pol,
+ buscfg->bus.ccp2.lanecfg.data[0].pos);
+
+ buscfg->bus.ccp2.strobe_clk_pol = vep->bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
+ buscfg->bus.ccp2.ccp2_mode = vep->bus_type == V4L2_MBUS_CCP2;
+ buscfg->bus.ccp2.vp_clk_pol = 1;
+
+ buscfg->bus.ccp2.crc = 1;
+}
+
+static int isp_alloc_isd(struct isp_async_subdev **isd,
+ struct isp_bus_cfg **buscfg)
+{
+ struct isp_async_subdev *__isd;
+
+ __isd = kzalloc(sizeof(*__isd), GFP_KERNEL);
+ if (!__isd)
+ return -ENOMEM;
+
+ *isd = __isd;
+ *buscfg = &__isd->bus;
+
+ return 0;
+}
+
+static struct {
+ u32 phy;
+ u32 csi2_if;
+ u32 csi1_if;
+} isp_bus_interfaces[2] = {
+ { ISP_OF_PHY_CSIPHY1,
+ ISP_INTERFACE_CSI2C_PHY1, ISP_INTERFACE_CCP2B_PHY1 },
+ { ISP_OF_PHY_CSIPHY2,
+ ISP_INTERFACE_CSI2A_PHY2, ISP_INTERFACE_CCP2B_PHY2 },
+};
+
+static int isp_parse_of_endpoints(struct isp_device *isp)
+{
+ struct fwnode_handle *ep;
+ struct isp_async_subdev *isd = NULL;
+ struct isp_bus_cfg *buscfg;
+ unsigned int i;
+
+ ep = fwnode_graph_get_endpoint_by_id(
+ dev_fwnode(isp->dev), ISP_OF_PHY_PARALLEL, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+
+ if (ep) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
+ int ret;
+
+ dev_dbg(isp->dev, "parsing parallel interface\n");
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+
+ if (!ret) {
+ ret = isp_alloc_isd(&isd, &buscfg);
+ if (ret)
+ return ret;
+ }
+
+ if (!ret) {
+ isp_parse_of_parallel_endpoint(isp->dev, &vep, buscfg);
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &isp->notifier, ep, &isd->asd);
+ }
+
+ fwnode_handle_put(ep);
+ if (ret)
+ kfree(isd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) {
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(
+ dev_fwnode(isp->dev), isp_bus_interfaces[i].phy, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+
+ if (!ep)
+ continue;
+
+ dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i,
+ to_of_node(ep));
+
+ ret = isp_alloc_isd(&isd, &buscfg);
+ if (ret)
+ return ret;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (!ret) {
+ buscfg->interface = isp_bus_interfaces[i].csi2_if;
+ isp_parse_of_csi2_endpoint(isp->dev, &vep, buscfg);
+ } else if (ret == -ENXIO) {
+ vep = (struct v4l2_fwnode_endpoint)
+ { .bus_type = V4L2_MBUS_CSI1 };
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+
+ if (ret == -ENXIO) {
+ vep = (struct v4l2_fwnode_endpoint)
+ { .bus_type = V4L2_MBUS_CCP2 };
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ }
+ if (!ret) {
+ buscfg->interface =
+ isp_bus_interfaces[i].csi1_if;
+ isp_parse_of_csi1_endpoint(isp->dev, &vep,
+ buscfg);
+ }
+ }
+
+ if (!ret)
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(
+ &isp->notifier, ep, &isd->asd);
+
+ fwnode_handle_put(ep);
+ if (ret)
+ kfree(isd);
+ }
+
+ return 0;
+}
+
static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
.complete = isp_subdev_notifier_complete,
};
@@ -2223,14 +2296,12 @@ static int isp_probe(struct platform_device *pdev)
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
v4l2_async_notifier_init(&isp->notifier);
+ isp->dev = &pdev->dev;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
- &pdev->dev, &isp->notifier, sizeof(struct isp_async_subdev),
- isp_fwnode_parse);
+ ret = isp_parse_of_endpoints(isp);
if (ret < 0)
goto error;
- isp->dev = &pdev->dev;
isp->ref_count = 0;
ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
@@ -2324,7 +2395,6 @@ static int isp_probe(struct platform_device *pdev)
/* Interrupt */
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
- dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV;
goto error_iommu;
}
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 1ba8a5ba343f..e2f336c715a4 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -2602,6 +2602,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
int ret;
/* Register the subdev and video node. */
+ ccdc->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index efca45bb02c8..d0a49cdfd22d 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1031,6 +1031,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
int ret;
/* Register the subdev and video nodes. */
+ ccp2->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index e85917f4a50c..fd493c5e4e24 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1198,6 +1198,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
int ret;
/* Register the subdev and video nodes. */
+ csi2->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 40e22400cf5e..97d660606d98 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2225,6 +2225,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
int ret;
/* Register the subdev and video nodes. */
+ prev->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &prev->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index 38e2b99b3f10..86b6ebb0438d 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -45,7 +45,7 @@
#define ISPCCP2_REVISION (0x000)
#define ISPCCP2_SYSCONFIG (0x004)
-#define ISPCCP2_SYSCONFIG_SOFT_RESET (1 << 1)
+#define ISPCCP2_SYSCONFIG_SOFT_RESET BIT(1)
#define ISPCCP2_SYSCONFIG_AUTO_IDLE 0x1
#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT 12
#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_FORCE \
@@ -55,44 +55,44 @@
#define ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART \
(0x2 << ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
#define ISPCCP2_SYSSTATUS (0x008)
-#define ISPCCP2_SYSSTATUS_RESET_DONE (1 << 0)
+#define ISPCCP2_SYSSTATUS_RESET_DONE BIT(0)
#define ISPCCP2_LC01_IRQENABLE (0x00C)
#define ISPCCP2_LC01_IRQSTATUS (0x010)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ (1 << 11)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_LE_IRQ (1 << 10)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_LS_IRQ (1 << 9)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FE_IRQ (1 << 8)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_COUNT_IRQ (1 << 7)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ (1 << 5)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ (1 << 4)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ (1 << 3)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ (1 << 2)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ (1 << 1)
-#define ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ (1 << 0)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ BIT(11)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LE_IRQ BIT(10)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_LS_IRQ BIT(9)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FE_IRQ BIT(8)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_COUNT_IRQ BIT(7)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ BIT(5)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ BIT(4)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ BIT(3)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ BIT(2)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ BIT(1)
+#define ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ BIT(0)
#define ISPCCP2_LC23_IRQENABLE (0x014)
#define ISPCCP2_LC23_IRQSTATUS (0x018)
#define ISPCCP2_LCM_IRQENABLE (0x02C)
-#define ISPCCP2_LCM_IRQSTATUS_EOF_IRQ (1 << 0)
-#define ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ (1 << 1)
+#define ISPCCP2_LCM_IRQSTATUS_EOF_IRQ BIT(0)
+#define ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ BIT(1)
#define ISPCCP2_LCM_IRQSTATUS (0x030)
#define ISPCCP2_CTRL (0x040)
-#define ISPCCP2_CTRL_IF_EN (1 << 0)
-#define ISPCCP2_CTRL_PHY_SEL (1 << 1)
+#define ISPCCP2_CTRL_IF_EN BIT(0)
+#define ISPCCP2_CTRL_PHY_SEL BIT(1)
#define ISPCCP2_CTRL_PHY_SEL_CLOCK (0 << 1)
#define ISPCCP2_CTRL_PHY_SEL_STROBE (1 << 1)
#define ISPCCP2_CTRL_PHY_SEL_MASK 0x1
#define ISPCCP2_CTRL_PHY_SEL_SHIFT 1
-#define ISPCCP2_CTRL_IO_OUT_SEL (1 << 2)
+#define ISPCCP2_CTRL_IO_OUT_SEL BIT(2)
#define ISPCCP2_CTRL_IO_OUT_SEL_MASK 0x1
#define ISPCCP2_CTRL_IO_OUT_SEL_SHIFT 2
-#define ISPCCP2_CTRL_MODE (1 << 4)
-#define ISPCCP2_CTRL_VP_CLK_FORCE_ON (1 << 9)
-#define ISPCCP2_CTRL_INV (1 << 10)
+#define ISPCCP2_CTRL_MODE BIT(4)
+#define ISPCCP2_CTRL_VP_CLK_FORCE_ON BIT(9)
+#define ISPCCP2_CTRL_INV BIT(10)
#define ISPCCP2_CTRL_INV_MASK 0x1
#define ISPCCP2_CTRL_INV_SHIFT 10
-#define ISPCCP2_CTRL_VP_ONLY_EN (1 << 11)
-#define ISPCCP2_CTRL_VP_CLK_POL (1 << 12)
+#define ISPCCP2_CTRL_VP_ONLY_EN BIT(11)
+#define ISPCCP2_CTRL_VP_CLK_POL BIT(12)
#define ISPCCP2_CTRL_VP_CLK_POL_MASK 0x1
#define ISPCCP2_CTRL_VP_CLK_POL_SHIFT 12
#define ISPCCP2_CTRL_VPCLK_DIV_SHIFT 15
@@ -102,12 +102,12 @@
#define ISPCCP2_DBG (0x044)
#define ISPCCP2_GNQ (0x048)
#define ISPCCP2_LCx_CTRL(x) ((0x050)+0x30*(x))
-#define ISPCCP2_LCx_CTRL_CHAN_EN (1 << 0)
-#define ISPCCP2_LCx_CTRL_CRC_EN (1 << 19)
+#define ISPCCP2_LCx_CTRL_CHAN_EN BIT(0)
+#define ISPCCP2_LCx_CTRL_CRC_EN BIT(19)
#define ISPCCP2_LCx_CTRL_CRC_MASK 0x1
#define ISPCCP2_LCx_CTRL_CRC_SHIFT 2
#define ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0 19
-#define ISPCCP2_LCx_CTRL_REGION_EN (1 << 1)
+#define ISPCCP2_LCx_CTRL_REGION_EN BIT(1)
#define ISPCCP2_LCx_CTRL_REGION_MASK 0x1
#define ISPCCP2_LCx_CTRL_REGION_SHIFT 1
#define ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0 0x3f
@@ -127,8 +127,8 @@
#define ISPCCP2_LCx_DAT_PONG_ADDR(x) ((0x074)+0x30*(x))
#define ISPCCP2_LCx_DAT_OFST(x) ((0x078)+0x30*(x))
#define ISPCCP2_LCM_CTRL (0x1D0)
-#define ISPCCP2_LCM_CTRL_CHAN_EN (1 << 0)
-#define ISPCCP2_LCM_CTRL_DST_PORT (1 << 2)
+#define ISPCCP2_LCM_CTRL_CHAN_EN BIT(0)
+#define ISPCCP2_LCM_CTRL_DST_PORT BIT(2)
#define ISPCCP2_LCM_CTRL_DST_PORT_SHIFT 2
#define ISPCCP2_LCM_CTRL_READ_THROTTLE_SHIFT 3
#define ISPCCP2_LCM_CTRL_READ_THROTTLE_MASK 0x11
@@ -138,8 +138,8 @@
#define ISPCCP2_LCM_CTRL_SRC_FORMAT_MASK 0x7
#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT 20
#define ISPCCP2_LCM_CTRL_SRC_DECOMPR_MASK 0x3
-#define ISPCCP2_LCM_CTRL_SRC_DPCM_PRED (1 << 22)
-#define ISPCCP2_LCM_CTRL_SRC_PACK (1 << 23)
+#define ISPCCP2_LCM_CTRL_SRC_DPCM_PRED BIT(22)
+#define ISPCCP2_LCM_CTRL_SRC_PACK BIT(23)
#define ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT 24
#define ISPCCP2_LCM_CTRL_DST_FORMAT_MASK 0x7
#define ISPCCP2_LCM_VSIZE (0x1D4)
@@ -201,19 +201,19 @@
/* SBL */
#define ISPSBL_PCR 0x4
-#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF (1 << 16)
-#define ISPSBL_PCR_H3A_AF_WBL_OVF (1 << 17)
-#define ISPSBL_PCR_RSZ4_WBL_OVF (1 << 18)
-#define ISPSBL_PCR_RSZ3_WBL_OVF (1 << 19)
-#define ISPSBL_PCR_RSZ2_WBL_OVF (1 << 20)
-#define ISPSBL_PCR_RSZ1_WBL_OVF (1 << 21)
-#define ISPSBL_PCR_PRV_WBL_OVF (1 << 22)
-#define ISPSBL_PCR_CCDC_WBL_OVF (1 << 23)
-#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF (1 << 24)
-#define ISPSBL_PCR_CSIA_WBL_OVF (1 << 25)
-#define ISPSBL_PCR_CSIB_WBL_OVF (1 << 26)
+#define ISPSBL_PCR_H3A_AEAWB_WBL_OVF BIT(16)
+#define ISPSBL_PCR_H3A_AF_WBL_OVF BIT(17)
+#define ISPSBL_PCR_RSZ4_WBL_OVF BIT(18)
+#define ISPSBL_PCR_RSZ3_WBL_OVF BIT(19)
+#define ISPSBL_PCR_RSZ2_WBL_OVF BIT(20)
+#define ISPSBL_PCR_RSZ1_WBL_OVF BIT(21)
+#define ISPSBL_PCR_PRV_WBL_OVF BIT(22)
+#define ISPSBL_PCR_CCDC_WBL_OVF BIT(23)
+#define ISPSBL_PCR_CCDCPRV_2_RSZ_OVF BIT(24)
+#define ISPSBL_PCR_CSIA_WBL_OVF BIT(25)
+#define ISPSBL_PCR_CSIB_WBL_OVF BIT(26)
#define ISPSBL_CCDC_WR_0 (0x028)
-#define ISPSBL_CCDC_WR_0_DATA_READY (1 << 21)
+#define ISPSBL_CCDC_WR_0_DATA_READY BIT(21)
#define ISPSBL_CCDC_WR_1 (0x02C)
#define ISPSBL_CCDC_WR_2 (0x030)
#define ISPSBL_CCDC_WR_3 (0x034)
@@ -366,16 +366,16 @@
#define ISP_INT_CLR 0xFF113F11
#define ISPPRV_PCR_EN 1
-#define ISPPRV_PCR_BUSY (1 << 1)
-#define ISPPRV_PCR_SOURCE (1 << 2)
-#define ISPPRV_PCR_ONESHOT (1 << 3)
-#define ISPPRV_PCR_WIDTH (1 << 4)
-#define ISPPRV_PCR_INVALAW (1 << 5)
-#define ISPPRV_PCR_DRKFEN (1 << 6)
-#define ISPPRV_PCR_DRKFCAP (1 << 7)
-#define ISPPRV_PCR_HMEDEN (1 << 8)
-#define ISPPRV_PCR_NFEN (1 << 9)
-#define ISPPRV_PCR_CFAEN (1 << 10)
+#define ISPPRV_PCR_BUSY BIT(1)
+#define ISPPRV_PCR_SOURCE BIT(2)
+#define ISPPRV_PCR_ONESHOT BIT(3)
+#define ISPPRV_PCR_WIDTH BIT(4)
+#define ISPPRV_PCR_INVALAW BIT(5)
+#define ISPPRV_PCR_DRKFEN BIT(6)
+#define ISPPRV_PCR_DRKFCAP BIT(7)
+#define ISPPRV_PCR_HMEDEN BIT(8)
+#define ISPPRV_PCR_NFEN BIT(9)
+#define ISPPRV_PCR_CFAEN BIT(10)
#define ISPPRV_PCR_CFAFMT_SHIFT 11
#define ISPPRV_PCR_CFAFMT_MASK 0x7800
#define ISPPRV_PCR_CFAFMT_BAYER (0 << 11)
@@ -384,22 +384,22 @@
#define ISPPRV_PCR_CFAFMT_DNSPL (3 << 11)
#define ISPPRV_PCR_CFAFMT_HONEYCOMB (4 << 11)
#define ISPPRV_PCR_CFAFMT_RRGGBBFOVEON (5 << 11)
-#define ISPPRV_PCR_YNENHEN (1 << 15)
-#define ISPPRV_PCR_SUPEN (1 << 16)
+#define ISPPRV_PCR_YNENHEN BIT(15)
+#define ISPPRV_PCR_SUPEN BIT(16)
#define ISPPRV_PCR_YCPOS_SHIFT 17
#define ISPPRV_PCR_YCPOS_YCrYCb (0 << 17)
#define ISPPRV_PCR_YCPOS_YCbYCr (1 << 17)
#define ISPPRV_PCR_YCPOS_CbYCrY (2 << 17)
#define ISPPRV_PCR_YCPOS_CrYCbY (3 << 17)
-#define ISPPRV_PCR_RSZPORT (1 << 19)
-#define ISPPRV_PCR_SDRPORT (1 << 20)
-#define ISPPRV_PCR_SCOMP_EN (1 << 21)
+#define ISPPRV_PCR_RSZPORT BIT(19)
+#define ISPPRV_PCR_SDRPORT BIT(20)
+#define ISPPRV_PCR_SCOMP_EN BIT(21)
#define ISPPRV_PCR_SCOMP_SFT_SHIFT (22)
#define ISPPRV_PCR_SCOMP_SFT_MASK (7 << 22)
-#define ISPPRV_PCR_GAMMA_BYPASS (1 << 26)
-#define ISPPRV_PCR_DCOREN (1 << 27)
-#define ISPPRV_PCR_DCCOUP (1 << 28)
-#define ISPPRV_PCR_DRK_FAIL (1 << 31)
+#define ISPPRV_PCR_GAMMA_BYPASS BIT(26)
+#define ISPPRV_PCR_DCOREN BIT(27)
+#define ISPPRV_PCR_DCCOUP BIT(28)
+#define ISPPRV_PCR_DRK_FAIL BIT(31)
#define ISPPRV_HORZ_INFO_EPH_SHIFT 0
#define ISPPRV_HORZ_INFO_EPH_MASK 0x3fff
@@ -423,8 +423,8 @@
#define ISPPRV_AVE_ODDDIST_4 0x3
#define ISPPRV_HMED_THRESHOLD_SHIFT 0
-#define ISPPRV_HMED_EVENDIST (1 << 8)
-#define ISPPRV_HMED_ODDDIST (1 << 9)
+#define ISPPRV_HMED_EVENDIST BIT(8)
+#define ISPPRV_HMED_ODDDIST BIT(9)
#define ISPPRV_WBGAIN_COEF0_SHIFT 0
#define ISPPRV_WBGAIN_COEF1_SHIFT 8
@@ -517,8 +517,8 @@
/* Define bit fields within selected registers */
#define ISP_REVISION_SHIFT 0
-#define ISP_SYSCONFIG_AUTOIDLE (1 << 0)
-#define ISP_SYSCONFIG_SOFTRESET (1 << 1)
+#define ISP_SYSCONFIG_AUTOIDLE BIT(0)
+#define ISP_SYSCONFIG_SOFTRESET BIT(1)
#define ISP_SYSCONFIG_MIDLEMODE_SHIFT 12
#define ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY 0x0
#define ISP_SYSCONFIG_MIDLEMODE_NOSTANBY 0x1
@@ -526,68 +526,68 @@
#define ISP_SYSSTATUS_RESETDONE 0
-#define IRQ0ENABLE_CSIA_IRQ (1 << 0)
-#define IRQ0ENABLE_CSIC_IRQ (1 << 1)
-#define IRQ0ENABLE_CCP2_LCM_IRQ (1 << 3)
-#define IRQ0ENABLE_CCP2_LC0_IRQ (1 << 4)
-#define IRQ0ENABLE_CCP2_LC1_IRQ (1 << 5)
-#define IRQ0ENABLE_CCP2_LC2_IRQ (1 << 6)
-#define IRQ0ENABLE_CCP2_LC3_IRQ (1 << 7)
+#define IRQ0ENABLE_CSIA_IRQ BIT(0)
+#define IRQ0ENABLE_CSIC_IRQ BIT(1)
+#define IRQ0ENABLE_CCP2_LCM_IRQ BIT(3)
+#define IRQ0ENABLE_CCP2_LC0_IRQ BIT(4)
+#define IRQ0ENABLE_CCP2_LC1_IRQ BIT(5)
+#define IRQ0ENABLE_CCP2_LC2_IRQ BIT(6)
+#define IRQ0ENABLE_CCP2_LC3_IRQ BIT(7)
#define IRQ0ENABLE_CSIB_IRQ (IRQ0ENABLE_CCP2_LCM_IRQ | \
IRQ0ENABLE_CCP2_LC0_IRQ | \
IRQ0ENABLE_CCP2_LC1_IRQ | \
IRQ0ENABLE_CCP2_LC2_IRQ | \
IRQ0ENABLE_CCP2_LC3_IRQ)
-#define IRQ0ENABLE_CCDC_VD0_IRQ (1 << 8)
-#define IRQ0ENABLE_CCDC_VD1_IRQ (1 << 9)
-#define IRQ0ENABLE_CCDC_VD2_IRQ (1 << 10)
-#define IRQ0ENABLE_CCDC_ERR_IRQ (1 << 11)
-#define IRQ0ENABLE_H3A_AF_DONE_IRQ (1 << 12)
-#define IRQ0ENABLE_H3A_AWB_DONE_IRQ (1 << 13)
-#define IRQ0ENABLE_HIST_DONE_IRQ (1 << 16)
-#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ (1 << 17)
-#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
-#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
-#define IRQ0ENABLE_PRV_DONE_IRQ (1 << 20)
-#define IRQ0ENABLE_RSZ_DONE_IRQ (1 << 24)
-#define IRQ0ENABLE_OVF_IRQ (1 << 25)
-#define IRQ0ENABLE_PING_IRQ (1 << 26)
-#define IRQ0ENABLE_PONG_IRQ (1 << 27)
-#define IRQ0ENABLE_MMU_ERR_IRQ (1 << 28)
-#define IRQ0ENABLE_OCP_ERR_IRQ (1 << 29)
-#define IRQ0ENABLE_SEC_ERR_IRQ (1 << 30)
-#define IRQ0ENABLE_HS_VS_IRQ (1 << 31)
-
-#define IRQ0STATUS_CSIA_IRQ (1 << 0)
-#define IRQ0STATUS_CSI2C_IRQ (1 << 1)
-#define IRQ0STATUS_CCP2_LCM_IRQ (1 << 3)
-#define IRQ0STATUS_CCP2_LC0_IRQ (1 << 4)
+#define IRQ0ENABLE_CCDC_VD0_IRQ BIT(8)
+#define IRQ0ENABLE_CCDC_VD1_IRQ BIT(9)
+#define IRQ0ENABLE_CCDC_VD2_IRQ BIT(10)
+#define IRQ0ENABLE_CCDC_ERR_IRQ BIT(11)
+#define IRQ0ENABLE_H3A_AF_DONE_IRQ BIT(12)
+#define IRQ0ENABLE_H3A_AWB_DONE_IRQ BIT(13)
+#define IRQ0ENABLE_HIST_DONE_IRQ BIT(16)
+#define IRQ0ENABLE_CCDC_LSC_DONE_IRQ BIT(17)
+#define IRQ0ENABLE_CCDC_LSC_PREF_COMP_IRQ BIT(18)
+#define IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ BIT(19)
+#define IRQ0ENABLE_PRV_DONE_IRQ BIT(20)
+#define IRQ0ENABLE_RSZ_DONE_IRQ BIT(24)
+#define IRQ0ENABLE_OVF_IRQ BIT(25)
+#define IRQ0ENABLE_PING_IRQ BIT(26)
+#define IRQ0ENABLE_PONG_IRQ BIT(27)
+#define IRQ0ENABLE_MMU_ERR_IRQ BIT(28)
+#define IRQ0ENABLE_OCP_ERR_IRQ BIT(29)
+#define IRQ0ENABLE_SEC_ERR_IRQ BIT(30)
+#define IRQ0ENABLE_HS_VS_IRQ BIT(31)
+
+#define IRQ0STATUS_CSIA_IRQ BIT(0)
+#define IRQ0STATUS_CSI2C_IRQ BIT(1)
+#define IRQ0STATUS_CCP2_LCM_IRQ BIT(3)
+#define IRQ0STATUS_CCP2_LC0_IRQ BIT(4)
#define IRQ0STATUS_CSIB_IRQ (IRQ0STATUS_CCP2_LCM_IRQ | \
IRQ0STATUS_CCP2_LC0_IRQ)
-#define IRQ0STATUS_CSIB_LC1_IRQ (1 << 5)
-#define IRQ0STATUS_CSIB_LC2_IRQ (1 << 6)
-#define IRQ0STATUS_CSIB_LC3_IRQ (1 << 7)
-#define IRQ0STATUS_CCDC_VD0_IRQ (1 << 8)
-#define IRQ0STATUS_CCDC_VD1_IRQ (1 << 9)
-#define IRQ0STATUS_CCDC_VD2_IRQ (1 << 10)
-#define IRQ0STATUS_CCDC_ERR_IRQ (1 << 11)
-#define IRQ0STATUS_H3A_AF_DONE_IRQ (1 << 12)
-#define IRQ0STATUS_H3A_AWB_DONE_IRQ (1 << 13)
-#define IRQ0STATUS_HIST_DONE_IRQ (1 << 16)
-#define IRQ0STATUS_CCDC_LSC_DONE_IRQ (1 << 17)
-#define IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ (1 << 18)
-#define IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ (1 << 19)
-#define IRQ0STATUS_PRV_DONE_IRQ (1 << 20)
-#define IRQ0STATUS_RSZ_DONE_IRQ (1 << 24)
-#define IRQ0STATUS_OVF_IRQ (1 << 25)
-#define IRQ0STATUS_PING_IRQ (1 << 26)
-#define IRQ0STATUS_PONG_IRQ (1 << 27)
-#define IRQ0STATUS_MMU_ERR_IRQ (1 << 28)
-#define IRQ0STATUS_OCP_ERR_IRQ (1 << 29)
-#define IRQ0STATUS_SEC_ERR_IRQ (1 << 30)
-#define IRQ0STATUS_HS_VS_IRQ (1 << 31)
+#define IRQ0STATUS_CSIB_LC1_IRQ BIT(5)
+#define IRQ0STATUS_CSIB_LC2_IRQ BIT(6)
+#define IRQ0STATUS_CSIB_LC3_IRQ BIT(7)
+#define IRQ0STATUS_CCDC_VD0_IRQ BIT(8)
+#define IRQ0STATUS_CCDC_VD1_IRQ BIT(9)
+#define IRQ0STATUS_CCDC_VD2_IRQ BIT(10)
+#define IRQ0STATUS_CCDC_ERR_IRQ BIT(11)
+#define IRQ0STATUS_H3A_AF_DONE_IRQ BIT(12)
+#define IRQ0STATUS_H3A_AWB_DONE_IRQ BIT(13)
+#define IRQ0STATUS_HIST_DONE_IRQ BIT(16)
+#define IRQ0STATUS_CCDC_LSC_DONE_IRQ BIT(17)
+#define IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ BIT(18)
+#define IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ BIT(19)
+#define IRQ0STATUS_PRV_DONE_IRQ BIT(20)
+#define IRQ0STATUS_RSZ_DONE_IRQ BIT(24)
+#define IRQ0STATUS_OVF_IRQ BIT(25)
+#define IRQ0STATUS_PING_IRQ BIT(26)
+#define IRQ0STATUS_PONG_IRQ BIT(27)
+#define IRQ0STATUS_MMU_ERR_IRQ BIT(28)
+#define IRQ0STATUS_OCP_ERR_IRQ BIT(29)
+#define IRQ0STATUS_SEC_ERR_IRQ BIT(30)
+#define IRQ0STATUS_HS_VS_IRQ BIT(31)
#define TCTRL_GRESET_LEN 0
@@ -607,20 +607,20 @@
#define ISPCTRL_PAR_BRIDGE_MASK (0x3 << 2)
#define ISPCTRL_PAR_CLK_POL_SHIFT 4
-#define ISPCTRL_PAR_CLK_POL_INV (1 << 4)
-#define ISPCTRL_PING_PONG_EN (1 << 5)
+#define ISPCTRL_PAR_CLK_POL_INV BIT(4)
+#define ISPCTRL_PING_PONG_EN BIT(5)
#define ISPCTRL_SHIFT_SHIFT 6
#define ISPCTRL_SHIFT_0 (0x0 << 6)
#define ISPCTRL_SHIFT_2 (0x1 << 6)
#define ISPCTRL_SHIFT_4 (0x2 << 6)
#define ISPCTRL_SHIFT_MASK (0x3 << 6)
-#define ISPCTRL_CCDC_CLK_EN (1 << 8)
-#define ISPCTRL_SCMP_CLK_EN (1 << 9)
-#define ISPCTRL_H3A_CLK_EN (1 << 10)
-#define ISPCTRL_HIST_CLK_EN (1 << 11)
-#define ISPCTRL_PREV_CLK_EN (1 << 12)
-#define ISPCTRL_RSZ_CLK_EN (1 << 13)
+#define ISPCTRL_CCDC_CLK_EN BIT(8)
+#define ISPCTRL_SCMP_CLK_EN BIT(9)
+#define ISPCTRL_H3A_CLK_EN BIT(10)
+#define ISPCTRL_HIST_CLK_EN BIT(11)
+#define ISPCTRL_PREV_CLK_EN BIT(12)
+#define ISPCTRL_RSZ_CLK_EN BIT(13)
#define ISPCTRL_SYNC_DETECT_SHIFT 14
#define ISPCTRL_SYNC_DETECT_HSFALL (0x0 << ISPCTRL_SYNC_DETECT_SHIFT)
#define ISPCTRL_SYNC_DETECT_HSRISE (0x1 << ISPCTRL_SYNC_DETECT_SHIFT)
@@ -628,17 +628,17 @@
#define ISPCTRL_SYNC_DETECT_VSRISE (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
#define ISPCTRL_SYNC_DETECT_MASK (0x3 << ISPCTRL_SYNC_DETECT_SHIFT)
-#define ISPCTRL_CCDC_RAM_EN (1 << 16)
-#define ISPCTRL_PREV_RAM_EN (1 << 17)
-#define ISPCTRL_SBL_RD_RAM_EN (1 << 18)
-#define ISPCTRL_SBL_WR1_RAM_EN (1 << 19)
-#define ISPCTRL_SBL_WR0_RAM_EN (1 << 20)
-#define ISPCTRL_SBL_AUTOIDLE (1 << 21)
-#define ISPCTRL_SBL_SHARED_WPORTC (1 << 26)
-#define ISPCTRL_SBL_SHARED_RPORTA (1 << 27)
-#define ISPCTRL_SBL_SHARED_RPORTB (1 << 28)
-#define ISPCTRL_JPEG_FLUSH (1 << 30)
-#define ISPCTRL_CCDC_FLUSH (1 << 31)
+#define ISPCTRL_CCDC_RAM_EN BIT(16)
+#define ISPCTRL_PREV_RAM_EN BIT(17)
+#define ISPCTRL_SBL_RD_RAM_EN BIT(18)
+#define ISPCTRL_SBL_WR1_RAM_EN BIT(19)
+#define ISPCTRL_SBL_WR0_RAM_EN BIT(20)
+#define ISPCTRL_SBL_AUTOIDLE BIT(21)
+#define ISPCTRL_SBL_SHARED_WPORTC BIT(26)
+#define ISPCTRL_SBL_SHARED_RPORTA BIT(27)
+#define ISPCTRL_SBL_SHARED_RPORTB BIT(28)
+#define ISPCTRL_JPEG_FLUSH BIT(30)
+#define ISPCTRL_CCDC_FLUSH BIT(31)
#define ISPSECURE_SECUREMODE 0
@@ -655,20 +655,20 @@
#define ISPTCTRL_CTRL_DIVC_SHIFT 10
#define ISPTCTRL_CTRL_DIVC_NOCLOCK (0x0 << 10)
-#define ISPTCTRL_CTRL_SHUTEN (1 << 21)
-#define ISPTCTRL_CTRL_PSTRBEN (1 << 22)
-#define ISPTCTRL_CTRL_STRBEN (1 << 23)
-#define ISPTCTRL_CTRL_SHUTPOL (1 << 24)
-#define ISPTCTRL_CTRL_STRBPSTRBPOL (1 << 26)
+#define ISPTCTRL_CTRL_SHUTEN BIT(21)
+#define ISPTCTRL_CTRL_PSTRBEN BIT(22)
+#define ISPTCTRL_CTRL_STRBEN BIT(23)
+#define ISPTCTRL_CTRL_SHUTPOL BIT(24)
+#define ISPTCTRL_CTRL_STRBPSTRBPOL BIT(26)
#define ISPTCTRL_CTRL_INSEL_SHIFT 27
#define ISPTCTRL_CTRL_INSEL_PARALLEL (0x0 << 27)
#define ISPTCTRL_CTRL_INSEL_CSIA (0x1 << 27)
#define ISPTCTRL_CTRL_INSEL_CSIB (0x2 << 27)
-#define ISPTCTRL_CTRL_GRESETEn (1 << 29)
-#define ISPTCTRL_CTRL_GRESETPOL (1 << 30)
-#define ISPTCTRL_CTRL_GRESETDIR (1 << 31)
+#define ISPTCTRL_CTRL_GRESETEn BIT(29)
+#define ISPTCTRL_CTRL_GRESETPOL BIT(30)
+#define ISPTCTRL_CTRL_GRESETDIR BIT(31)
#define ISPTCTRL_FRAME_SHUT_SHIFT 0
#define ISPTCTRL_FRAME_PSTRB_SHIFT 6
@@ -679,33 +679,33 @@
#define ISPCCDC_PID_TID_SHIFT 16
#define ISPCCDC_PCR_EN 1
-#define ISPCCDC_PCR_BUSY (1 << 1)
+#define ISPCCDC_PCR_BUSY BIT(1)
#define ISPCCDC_SYN_MODE_VDHDOUT 0x1
-#define ISPCCDC_SYN_MODE_FLDOUT (1 << 1)
-#define ISPCCDC_SYN_MODE_VDPOL (1 << 2)
-#define ISPCCDC_SYN_MODE_HDPOL (1 << 3)
-#define ISPCCDC_SYN_MODE_FLDPOL (1 << 4)
-#define ISPCCDC_SYN_MODE_EXWEN (1 << 5)
-#define ISPCCDC_SYN_MODE_DATAPOL (1 << 6)
-#define ISPCCDC_SYN_MODE_FLDMODE (1 << 7)
+#define ISPCCDC_SYN_MODE_FLDOUT BIT(1)
+#define ISPCCDC_SYN_MODE_VDPOL BIT(2)
+#define ISPCCDC_SYN_MODE_HDPOL BIT(3)
+#define ISPCCDC_SYN_MODE_FLDPOL BIT(4)
+#define ISPCCDC_SYN_MODE_EXWEN BIT(5)
+#define ISPCCDC_SYN_MODE_DATAPOL BIT(6)
+#define ISPCCDC_SYN_MODE_FLDMODE BIT(7)
#define ISPCCDC_SYN_MODE_DATSIZ_MASK (0x7 << 8)
#define ISPCCDC_SYN_MODE_DATSIZ_8_16 (0x0 << 8)
#define ISPCCDC_SYN_MODE_DATSIZ_12 (0x4 << 8)
#define ISPCCDC_SYN_MODE_DATSIZ_11 (0x5 << 8)
#define ISPCCDC_SYN_MODE_DATSIZ_10 (0x6 << 8)
#define ISPCCDC_SYN_MODE_DATSIZ_8 (0x7 << 8)
-#define ISPCCDC_SYN_MODE_PACK8 (1 << 11)
+#define ISPCCDC_SYN_MODE_PACK8 BIT(11)
#define ISPCCDC_SYN_MODE_INPMOD_MASK (3 << 12)
#define ISPCCDC_SYN_MODE_INPMOD_RAW (0 << 12)
#define ISPCCDC_SYN_MODE_INPMOD_YCBCR16 (1 << 12)
#define ISPCCDC_SYN_MODE_INPMOD_YCBCR8 (2 << 12)
-#define ISPCCDC_SYN_MODE_LPF (1 << 14)
-#define ISPCCDC_SYN_MODE_FLDSTAT (1 << 15)
-#define ISPCCDC_SYN_MODE_VDHDEN (1 << 16)
-#define ISPCCDC_SYN_MODE_WEN (1 << 17)
-#define ISPCCDC_SYN_MODE_VP2SDR (1 << 18)
-#define ISPCCDC_SYN_MODE_SDR2RSZ (1 << 19)
+#define ISPCCDC_SYN_MODE_LPF BIT(14)
+#define ISPCCDC_SYN_MODE_FLDSTAT BIT(15)
+#define ISPCCDC_SYN_MODE_VDHDEN BIT(16)
+#define ISPCCDC_SYN_MODE_WEN BIT(17)
+#define ISPCCDC_SYN_MODE_VP2SDR BIT(18)
+#define ISPCCDC_SYN_MODE_SDR2RSZ BIT(19)
#define ISPCCDC_HD_VD_WID_VDW_SHIFT 0
#define ISPCCDC_HD_VD_WID_HDW_SHIFT 16
@@ -731,7 +731,7 @@
#define ISPCCDC_HSIZE_OFF_SHIFT 0
-#define ISPCCDC_SDOFST_FIINV (1 << 14)
+#define ISPCCDC_SDOFST_FIINV BIT(14)
#define ISPCCDC_SDOFST_FOFST_SHIFT 12
#define ISPCCDC_SDOFST_FOFST_MASK (3 << 12)
#define ISPCCDC_SDOFST_LOFST3_SHIFT 0
@@ -743,7 +743,7 @@
#define ISPCCDC_CLAMP_OBST_SHIFT 10
#define ISPCCDC_CLAMP_OBSLN_SHIFT 25
#define ISPCCDC_CLAMP_OBSLEN_SHIFT 28
-#define ISPCCDC_CLAMP_CLAMPEN (1 << 31)
+#define ISPCCDC_CLAMP_CLAMPEN BIT(31)
#define ISPCCDC_COLPTN_R_Ye 0x0
#define ISPCCDC_COLPTN_Gr_Cy 0x1
@@ -772,8 +772,8 @@
#define ISPCCDC_BLKCMP_R_YE_SHIFT 24
#define ISPCCDC_FPC_FPNUM_SHIFT 0
-#define ISPCCDC_FPC_FPCEN (1 << 15)
-#define ISPCCDC_FPC_FPERR (1 << 16)
+#define ISPCCDC_FPC_FPCEN BIT(15)
+#define ISPCCDC_FPC_FPERR BIT(16)
#define ISPCCDC_VDINT_1_SHIFT 0
#define ISPCCDC_VDINT_1_MASK 0x00007fff
@@ -784,23 +784,23 @@
#define ISPCCDC_ALAW_GWDI_11_2 (0x4 << 0)
#define ISPCCDC_ALAW_GWDI_10_1 (0x5 << 0)
#define ISPCCDC_ALAW_GWDI_9_0 (0x6 << 0)
-#define ISPCCDC_ALAW_CCDTBL (1 << 3)
+#define ISPCCDC_ALAW_CCDTBL BIT(3)
#define ISPCCDC_REC656IF_R656ON 1
-#define ISPCCDC_REC656IF_ECCFVH (1 << 1)
+#define ISPCCDC_REC656IF_ECCFVH BIT(1)
-#define ISPCCDC_CFG_BW656 (1 << 5)
+#define ISPCCDC_CFG_BW656 BIT(5)
#define ISPCCDC_CFG_FIDMD_SHIFT 6
-#define ISPCCDC_CFG_WENLOG (1 << 8)
+#define ISPCCDC_CFG_WENLOG BIT(8)
#define ISPCCDC_CFG_WENLOG_AND (0 << 8)
#define ISPCCDC_CFG_WENLOG_OR (1 << 8)
-#define ISPCCDC_CFG_Y8POS (1 << 11)
-#define ISPCCDC_CFG_BSWD (1 << 12)
-#define ISPCCDC_CFG_MSBINVI (1 << 13)
-#define ISPCCDC_CFG_VDLC (1 << 15)
+#define ISPCCDC_CFG_Y8POS BIT(11)
+#define ISPCCDC_CFG_BSWD BIT(12)
+#define ISPCCDC_CFG_MSBINVI BIT(13)
+#define ISPCCDC_CFG_VDLC BIT(15)
#define ISPCCDC_FMTCFG_FMTEN 0x1
-#define ISPCCDC_FMTCFG_LNALT (1 << 1)
+#define ISPCCDC_FMTCFG_LNALT BIT(1)
#define ISPCCDC_FMTCFG_LNUM_SHIFT 2
#define ISPCCDC_FMTCFG_PLEN_ODD_SHIFT 4
#define ISPCCDC_FMTCFG_PLEN_EVEN_SHIFT 8
@@ -809,7 +809,7 @@
#define ISPCCDC_FMTCFG_VPIN_11_2 (0x4 << 12)
#define ISPCCDC_FMTCFG_VPIN_10_1 (0x5 << 12)
#define ISPCCDC_FMTCFG_VPIN_9_0 (0x6 << 12)
-#define ISPCCDC_FMTCFG_VPEN (1 << 15)
+#define ISPCCDC_FMTCFG_VPEN BIT(15)
#define ISPCCDC_FMTCFG_VPIF_FRQ_MASK 0x003f0000
#define ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT 16
@@ -839,9 +839,9 @@
#define ISPRSZ_PID_CID_SHIFT 8
#define ISPRSZ_PID_TID_SHIFT 16
-#define ISPRSZ_PCR_ENABLE (1 << 0)
-#define ISPRSZ_PCR_BUSY (1 << 1)
-#define ISPRSZ_PCR_ONESHOT (1 << 2)
+#define ISPRSZ_PCR_ENABLE BIT(0)
+#define ISPRSZ_PCR_BUSY BIT(1)
+#define ISPRSZ_PCR_ONESHOT BIT(2)
#define ISPRSZ_CNT_HRSZ_SHIFT 0
#define ISPRSZ_CNT_HRSZ_MASK \
@@ -853,10 +853,10 @@
#define ISPRSZ_CNT_HSTPH_MASK (0x7 << ISPRSZ_CNT_HSTPH_SHIFT)
#define ISPRSZ_CNT_VSTPH_SHIFT 23
#define ISPRSZ_CNT_VSTPH_MASK (0x7 << ISPRSZ_CNT_VSTPH_SHIFT)
-#define ISPRSZ_CNT_YCPOS (1 << 26)
-#define ISPRSZ_CNT_INPTYP (1 << 27)
-#define ISPRSZ_CNT_INPSRC (1 << 28)
-#define ISPRSZ_CNT_CBILIN (1 << 29)
+#define ISPRSZ_CNT_YCPOS BIT(26)
+#define ISPRSZ_CNT_INPTYP BIT(27)
+#define ISPRSZ_CNT_INPSRC BIT(28)
+#define ISPRSZ_CNT_CBILIN BIT(29)
#define ISPRSZ_OUT_SIZE_HORZ_SHIFT 0
#define ISPRSZ_OUT_SIZE_HORZ_MASK \
@@ -1081,8 +1081,8 @@
#define ISPH3A_PCR_AF_RGBPOS_SHIFT 11
#define ISPH3A_PCR_AEW_AVE2LMT_SHIFT 22
#define ISPH3A_PCR_AEW_AVE2LMT_MASK 0xFFC00000
-#define ISPH3A_PCR_BUSYAF (1 << 15)
-#define ISPH3A_PCR_BUSYAEAWB (1 << 18)
+#define ISPH3A_PCR_BUSYAF BIT(15)
+#define ISPH3A_PCR_BUSYAEAWB BIT(18)
#define ISPH3A_AEWWIN1_WINHC_SHIFT 0
#define ISPH3A_AEWWIN1_WINHC_MASK 0x3F
@@ -1166,15 +1166,15 @@
#define ISPHIST_HV_INFO_MASK 0x3FFF3FFF
-#define ISPCCDC_LSC_ENABLE 1
-#define ISPCCDC_LSC_BUSY (1 << 7)
+#define ISPCCDC_LSC_ENABLE BIT(0)
+#define ISPCCDC_LSC_BUSY BIT(7)
#define ISPCCDC_LSC_GAIN_MODE_N_MASK 0x700
#define ISPCCDC_LSC_GAIN_MODE_N_SHIFT 8
#define ISPCCDC_LSC_GAIN_MODE_M_MASK 0x3800
#define ISPCCDC_LSC_GAIN_MODE_M_SHIFT 12
#define ISPCCDC_LSC_GAIN_FORMAT_MASK 0xE
#define ISPCCDC_LSC_GAIN_FORMAT_SHIFT 1
-#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK (1<<6)
+#define ISPCCDC_LSC_AFTER_REFORMATTER_MASK BIT(6)
#define ISPCCDC_LSC_INITIAL_X_MASK 0x3F
#define ISPCCDC_LSC_INITIAL_X_SHIFT 0
@@ -1196,43 +1196,43 @@
(0x1 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
#define ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART \
(0x2 << ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SHIFT)
-#define ISPCSI2_SYSCONFIG_SOFT_RESET (1 << 1)
-#define ISPCSI2_SYSCONFIG_AUTO_IDLE (1 << 0)
+#define ISPCSI2_SYSCONFIG_SOFT_RESET BIT(1)
+#define ISPCSI2_SYSCONFIG_AUTO_IDLE BIT(0)
#define ISPCSI2_SYSSTATUS (0x014)
-#define ISPCSI2_SYSSTATUS_RESET_DONE (1 << 0)
+#define ISPCSI2_SYSSTATUS_RESET_DONE BIT(0)
#define ISPCSI2_IRQSTATUS (0x018)
-#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ (1 << 14)
-#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ (1 << 13)
-#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 12)
-#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ (1 << 11)
-#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ (1 << 10)
-#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ (1 << 9)
-#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ (1 << 8)
-#define ISPCSI2_IRQSTATUS_CONTEXT(n) (1 << (n))
+#define ISPCSI2_IRQSTATUS_OCP_ERR_IRQ BIT(14)
+#define ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ BIT(13)
+#define ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ BIT(12)
+#define ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ BIT(11)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ BIT(10)
+#define ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ BIT(9)
+#define ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ BIT(8)
+#define ISPCSI2_IRQSTATUS_CONTEXT(n) BIT(n)
#define ISPCSI2_IRQENABLE (0x01c)
#define ISPCSI2_CTRL (0x040)
-#define ISPCSI2_CTRL_VP_CLK_EN (1 << 15)
-#define ISPCSI2_CTRL_VP_ONLY_EN (1 << 11)
+#define ISPCSI2_CTRL_VP_CLK_EN BIT(15)
+#define ISPCSI2_CTRL_VP_ONLY_EN BIT(11)
#define ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT 8
#define ISPCSI2_CTRL_VP_OUT_CTRL_MASK \
(3 << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT)
-#define ISPCSI2_CTRL_DBG_EN (1 << 7)
+#define ISPCSI2_CTRL_DBG_EN BIT(7)
#define ISPCSI2_CTRL_BURST_SIZE_SHIFT 5
#define ISPCSI2_CTRL_BURST_SIZE_MASK \
(3 << ISPCSI2_CTRL_BURST_SIZE_SHIFT)
-#define ISPCSI2_CTRL_FRAME (1 << 3)
-#define ISPCSI2_CTRL_ECC_EN (1 << 2)
-#define ISPCSI2_CTRL_SECURE (1 << 1)
-#define ISPCSI2_CTRL_IF_EN (1 << 0)
+#define ISPCSI2_CTRL_FRAME BIT(3)
+#define ISPCSI2_CTRL_ECC_EN BIT(2)
+#define ISPCSI2_CTRL_SECURE BIT(1)
+#define ISPCSI2_CTRL_IF_EN BIT(0)
#define ISPCSI2_DBG_H (0x044)
#define ISPCSI2_GNQ (0x048)
#define ISPCSI2_PHY_CFG (0x050)
-#define ISPCSI2_PHY_CFG_RESET_CTRL (1 << 30)
-#define ISPCSI2_PHY_CFG_RESET_DONE (1 << 29)
+#define ISPCSI2_PHY_CFG_RESET_CTRL BIT(30)
+#define ISPCSI2_PHY_CFG_RESET_DONE BIT(29)
#define ISPCSI2_PHY_CFG_PWR_CMD_SHIFT 27
#define ISPCSI2_PHY_CFG_PWR_CMD_MASK \
(0x3 << ISPCSI2_PHY_CFG_PWR_CMD_SHIFT)
@@ -1251,7 +1251,7 @@
(0x1 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
#define ISPCSI2_PHY_CFG_PWR_STATUS_ULPW \
(0x2 << ISPCSI2_PHY_CFG_PWR_STATUS_SHIFT)
-#define ISPCSI2_PHY_CFG_PWR_AUTO (1 << 24)
+#define ISPCSI2_PHY_CFG_PWR_AUTO BIT(24)
#define ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n) (3 + ((n) * 4))
#define ISPCSI2_PHY_CFG_DATA_POL_MASK(n) \
@@ -1300,63 +1300,63 @@
(0x5 << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT)
#define ISPCSI2_PHY_IRQSTATUS (0x054)
-#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMEXIT (1 << 26)
-#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMENTER (1 << 25)
-#define ISPCSI2_PHY_IRQSTATUS_STATEULPM5 (1 << 24)
-#define ISPCSI2_PHY_IRQSTATUS_STATEULPM4 (1 << 23)
-#define ISPCSI2_PHY_IRQSTATUS_STATEULPM3 (1 << 22)
-#define ISPCSI2_PHY_IRQSTATUS_STATEULPM2 (1 << 21)
-#define ISPCSI2_PHY_IRQSTATUS_STATEULPM1 (1 << 20)
-#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL5 (1 << 19)
-#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL4 (1 << 18)
-#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL3 (1 << 17)
-#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL2 (1 << 16)
-#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL1 (1 << 15)
-#define ISPCSI2_PHY_IRQSTATUS_ERRESC5 (1 << 14)
-#define ISPCSI2_PHY_IRQSTATUS_ERRESC4 (1 << 13)
-#define ISPCSI2_PHY_IRQSTATUS_ERRESC3 (1 << 12)
-#define ISPCSI2_PHY_IRQSTATUS_ERRESC2 (1 << 11)
-#define ISPCSI2_PHY_IRQSTATUS_ERRESC1 (1 << 10)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS5 (1 << 9)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS4 (1 << 8)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS3 (1 << 7)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS2 (1 << 6)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS1 (1 << 5)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS5 (1 << 4)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS4 (1 << 3)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS3 (1 << 2)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS2 (1 << 1)
-#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS1 1
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMEXIT BIT(26)
+#define ISPCSI2_PHY_IRQSTATUS_STATEALLULPMENTER BIT(25)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM5 BIT(24)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM4 BIT(23)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM3 BIT(22)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM2 BIT(21)
+#define ISPCSI2_PHY_IRQSTATUS_STATEULPM1 BIT(20)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL5 BIT(19)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL4 BIT(18)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL3 BIT(17)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL2 BIT(16)
+#define ISPCSI2_PHY_IRQSTATUS_ERRCONTROL1 BIT(15)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC5 BIT(14)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC4 BIT(13)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC3 BIT(12)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC2 BIT(11)
+#define ISPCSI2_PHY_IRQSTATUS_ERRESC1 BIT(10)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS5 BIT(9)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS4 BIT(8)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS3 BIT(7)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS2 BIT(6)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTSYNCHS1 BIT(5)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS5 BIT(4)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS4 BIT(3)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS3 BIT(2)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS2 BIT(1)
+#define ISPCSI2_PHY_IRQSTATUS_ERRSOTHS1 BIT(0)
#define ISPCSI2_SHORT_PACKET (0x05c)
#define ISPCSI2_PHY_IRQENABLE (0x060)
-#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT (1 << 26)
-#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER (1 << 25)
-#define ISPCSI2_PHY_IRQENABLE_STATEULPM5 (1 << 24)
-#define ISPCSI2_PHY_IRQENABLE_STATEULPM4 (1 << 23)
-#define ISPCSI2_PHY_IRQENABLE_STATEULPM3 (1 << 22)
-#define ISPCSI2_PHY_IRQENABLE_STATEULPM2 (1 << 21)
-#define ISPCSI2_PHY_IRQENABLE_STATEULPM1 (1 << 20)
-#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 (1 << 19)
-#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 (1 << 18)
-#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 (1 << 17)
-#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 (1 << 16)
-#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 (1 << 15)
-#define ISPCSI2_PHY_IRQENABLE_ERRESC5 (1 << 14)
-#define ISPCSI2_PHY_IRQENABLE_ERRESC4 (1 << 13)
-#define ISPCSI2_PHY_IRQENABLE_ERRESC3 (1 << 12)
-#define ISPCSI2_PHY_IRQENABLE_ERRESC2 (1 << 11)
-#define ISPCSI2_PHY_IRQENABLE_ERRESC1 (1 << 10)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 (1 << 9)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 (1 << 8)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 (1 << 7)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 (1 << 6)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 (1 << 5)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 (1 << 4)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 (1 << 3)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 (1 << 2)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 (1 << 1)
-#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS1 (1 << 0)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT BIT(26)
+#define ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER BIT(25)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM5 BIT(24)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM4 BIT(23)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM3 BIT(22)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM2 BIT(21)
+#define ISPCSI2_PHY_IRQENABLE_STATEULPM1 BIT(20)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 BIT(19)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 BIT(18)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 BIT(17)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 BIT(16)
+#define ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 BIT(15)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC5 BIT(14)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC4 BIT(13)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC3 BIT(12)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC2 BIT(11)
+#define ISPCSI2_PHY_IRQENABLE_ERRESC1 BIT(10)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 BIT(9)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 BIT(8)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 BIT(7)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 BIT(6)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 BIT(5)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 BIT(4)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 BIT(3)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 BIT(2)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 BIT(1)
+#define ISPCSI2_PHY_IRQENABLE_ERRSOTHS1 BIT(0)
#define ISPCSI2_DBG_P (0x068)
#define ISPCSI2_TIMING (0x06c)
@@ -1371,12 +1371,12 @@
#define ISPCSI2_CTX_CTRL1_COUNT_SHIFT 8
#define ISPCSI2_CTX_CTRL1_COUNT_MASK \
(0xff << ISPCSI2_CTX_CTRL1_COUNT_SHIFT)
-#define ISPCSI2_CTX_CTRL1_EOF_EN (1 << 7)
-#define ISPCSI2_CTX_CTRL1_EOL_EN (1 << 6)
-#define ISPCSI2_CTX_CTRL1_CS_EN (1 << 5)
-#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK (1 << 4)
-#define ISPCSI2_CTX_CTRL1_PING_PONG (1 << 3)
-#define ISPCSI2_CTX_CTRL1_CTX_EN (1 << 0)
+#define ISPCSI2_CTX_CTRL1_EOF_EN BIT(7)
+#define ISPCSI2_CTX_CTRL1_EOL_EN BIT(6)
+#define ISPCSI2_CTX_CTRL1_CS_EN BIT(5)
+#define ISPCSI2_CTX_CTRL1_COUNT_UNLOCK BIT(4)
+#define ISPCSI2_CTX_CTRL1_PING_PONG BIT(3)
+#define ISPCSI2_CTX_CTRL1_CTX_EN BIT(0)
#define ISPCSI2_CTX_CTRL2(n) ((0x074) + 0x20 * (n))
#define ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT 13
@@ -1385,7 +1385,7 @@
#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT 11
#define ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK \
(0x3 << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT)
-#define ISPCSI2_CTX_CTRL2_DPCM_PRED (1 << 10)
+#define ISPCSI2_CTX_CTRL2_DPCM_PRED BIT(10)
#define ISPCSI2_CTX_CTRL2_FORMAT_SHIFT 0
#define ISPCSI2_CTX_CTRL2_FORMAT_MASK \
(0x3ff << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT)
@@ -1401,24 +1401,24 @@
#define ISPCSI2_CTX_DAT_PING_ADDR(n) ((0x07c) + 0x20 * (n))
#define ISPCSI2_CTX_DAT_PONG_ADDR(n) ((0x080) + 0x20 * (n))
#define ISPCSI2_CTX_IRQENABLE(n) ((0x084) + 0x20 * (n))
-#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ (1 << 8)
-#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ (1 << 7)
-#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ (1 << 6)
-#define ISPCSI2_CTX_IRQENABLE_CS_IRQ (1 << 5)
-#define ISPCSI2_CTX_IRQENABLE_LE_IRQ (1 << 3)
-#define ISPCSI2_CTX_IRQENABLE_LS_IRQ (1 << 2)
-#define ISPCSI2_CTX_IRQENABLE_FE_IRQ (1 << 1)
-#define ISPCSI2_CTX_IRQENABLE_FS_IRQ (1 << 0)
+#define ISPCSI2_CTX_IRQENABLE_ECC_CORRECTION_IRQ BIT(8)
+#define ISPCSI2_CTX_IRQENABLE_LINE_NUMBER_IRQ BIT(7)
+#define ISPCSI2_CTX_IRQENABLE_FRAME_NUMBER_IRQ BIT(6)
+#define ISPCSI2_CTX_IRQENABLE_CS_IRQ BIT(5)
+#define ISPCSI2_CTX_IRQENABLE_LE_IRQ BIT(3)
+#define ISPCSI2_CTX_IRQENABLE_LS_IRQ BIT(2)
+#define ISPCSI2_CTX_IRQENABLE_FE_IRQ BIT(1)
+#define ISPCSI2_CTX_IRQENABLE_FS_IRQ BIT(0)
#define ISPCSI2_CTX_IRQSTATUS(n) ((0x088) + 0x20 * (n))
-#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ (1 << 8)
-#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ (1 << 7)
-#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ (1 << 6)
-#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ (1 << 5)
-#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ (1 << 3)
-#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ (1 << 2)
-#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ (1 << 1)
-#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ (1 << 0)
+#define ISPCSI2_CTX_IRQSTATUS_ECC_CORRECTION_IRQ BIT(8)
+#define ISPCSI2_CTX_IRQSTATUS_LINE_NUMBER_IRQ BIT(7)
+#define ISPCSI2_CTX_IRQSTATUS_FRAME_NUMBER_IRQ BIT(6)
+#define ISPCSI2_CTX_IRQSTATUS_CS_IRQ BIT(5)
+#define ISPCSI2_CTX_IRQSTATUS_LE_IRQ BIT(3)
+#define ISPCSI2_CTX_IRQSTATUS_LS_IRQ BIT(2)
+#define ISPCSI2_CTX_IRQSTATUS_FE_IRQ BIT(1)
+#define ISPCSI2_CTX_IRQSTATUS_FS_IRQ BIT(0)
#define ISPCSI2_CTX_CTRL3(n) ((0x08c) + 0x20 * (n))
#define ISPCSI2_CTX_CTRL3_ALPHA_SHIFT 5
@@ -1454,9 +1454,9 @@
(0xff << ISPCSIPHY_REG0_THS_SETTLE_SHIFT)
#define ISPCSIPHY_REG1 (0x004)
-#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK (1 << 29)
+#define ISPCSIPHY_REG1_RESET_DONE_CTRLCLK BIT(29)
/* This field is for OMAP3630 only */
-#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS (1 << 25)
+#define ISPCSIPHY_REG1_CLOCK_MISS_DETECTOR_STATUS BIT(25)
#define ISPCSIPHY_REG1_TCLK_TERM_SHIFT 18
#define ISPCSIPHY_REG1_TCLK_TERM_MASK \
(0x7f << ISPCSIPHY_REG1_TCLK_TERM_SHIFT)
@@ -1498,11 +1498,11 @@
*/
/* OMAP343X_CONTROL_CSIRXFE */
-#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV (1 << 7)
-#define OMAP343X_CONTROL_CSIRXFE_RESENABLE (1 << 8)
-#define OMAP343X_CONTROL_CSIRXFE_SELFORM (1 << 10)
-#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ (1 << 12)
-#define OMAP343X_CONTROL_CSIRXFE_RESET (1 << 13)
+#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV BIT(7)
+#define OMAP343X_CONTROL_CSIRXFE_RESENABLE BIT(8)
+#define OMAP343X_CONTROL_CSIRXFE_SELFORM BIT(10)
+#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ BIT(12)
+#define OMAP343X_CONTROL_CSIRXFE_RESET BIT(13)
/* OMAP3630_CONTROL_CAMERA_PHY_CTRL */
#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT 2
@@ -1513,6 +1513,6 @@
#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_GPI 0x3
#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK 0x3
/* CCP2B: set to receive data from PHY2 instead of PHY1 */
-#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 (1 << 4)
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 BIT(4)
#endif /* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 21ca6954df72..78d9dd7ea2da 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1681,6 +1681,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
int ret;
/* Register the subdev and video nodes. */
+ res->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &res->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 62b2eacb96fd..5b9b57f4d9bf 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1026,6 +1026,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
int omap3isp_stat_register_entities(struct ispstat *stat,
struct v4l2_device *vdev)
{
+ stat->subdev.dev = vdev->mdev->dev;
+
return v4l2_device_register_subdev(vdev, &stat->subdev);
}
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 499a7284c5a8..ee183c35ff3b 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -658,10 +658,6 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
| V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
@@ -1024,8 +1020,8 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
ctrls.count = 1;
ctrls.controls = &ctrl;
-
- ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, NULL, &ctrls);
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video,
+ NULL, &ctrls);
if (ret < 0) {
dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
pipe->external->name);
@@ -1460,6 +1456,13 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
video->video.vfl_type = VFL_TYPE_GRABBER;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &isp_video_ioctl_ops;
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_STREAMING;
+ else
+ video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+
video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
video_set_drvdata(&video->video, video);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 1c9bfaabc54c..8d47ea0c33f8 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -64,7 +64,7 @@
#define CIBR1 0x0030
#define CIBR2 0x0038
-#define CICR0_DMAEN (1 << 31) /* DMA request enable */
+#define CICR0_DMAEN (1UL << 31) /* DMA request enable */
#define CICR0_PAR_EN (1 << 30) /* Parity enable */
#define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */
#define CICR0_ENB (1 << 28) /* Camera interface enable */
@@ -81,7 +81,7 @@
#define CICR0_EOFM (1 << 1) /* End-of-frame mask */
#define CICR0_FOM (1 << 0) /* FIFO-overrun mask */
-#define CICR1_TBIT (1 << 31) /* Transparency bit */
+#define CICR1_TBIT (1UL << 31) /* Transparency bit */
#define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */
#define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */
#define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */
@@ -1992,9 +1992,6 @@ static int pxac_vidioc_querycap(struct file *file, void *priv,
strscpy(cap->bus_info, "platform:pxa-camera", sizeof(cap->bus_info));
strscpy(cap->driver, PXA_CAM_DRV_NAME, sizeof(cap->driver));
strscpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 63da18773d24..3fdc9f964a3c 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -486,9 +486,9 @@ static int camss_of_parse_ports(struct camss *camss)
asd = v4l2_async_notifier_add_fwnode_subdev(
&camss->notifier, of_fwnode_handle(remote),
sizeof(*csd));
+ of_node_put(remote);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
- of_node_put(remote);
goto err_cleanup;
}
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 0acc7576cc58..e6eff512a8a1 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -198,7 +198,7 @@ static int venus_enumerate_codecs(struct venus_core *core, u32 type)
goto err;
for (i = 0; i < MAX_CODEC_NUM; i++) {
- codec = (1 << i) & codecs;
+ codec = (1UL << i) & codecs;
if (!codec)
continue;
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 9ab95fd57760..922cb7e64bfa 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -46,6 +46,7 @@ struct venus_format {
u32 pixfmt;
unsigned int num_planes;
u32 type;
+ u32 flags;
};
#define MAX_PLANES 4
@@ -209,6 +210,25 @@ struct venus_buffer {
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
+enum venus_dec_state {
+ VENUS_DEC_STATE_DEINIT = 0,
+ VENUS_DEC_STATE_INIT = 1,
+ VENUS_DEC_STATE_CAPTURE_SETUP = 2,
+ VENUS_DEC_STATE_STOPPED = 3,
+ VENUS_DEC_STATE_SEEK = 4,
+ VENUS_DEC_STATE_DRAIN = 5,
+ VENUS_DEC_STATE_DECODING = 6,
+ VENUS_DEC_STATE_DRC = 7
+};
+
+struct venus_ts_metadata {
+ bool used;
+ u64 ts_ns;
+ u64 ts_us;
+ u32 flags;
+ struct v4l2_timecode tc;
+};
+
/**
* struct venus_inst - holds per instance parameters
*
@@ -232,6 +252,10 @@ struct venus_buffer {
* @colorspace: current color space
* @quantization: current quantization
* @xfer_func: current xfer function
+ * @codec_state: current codec API state (see DEC/ENC_STATE_)
+ * @reconf_wait: wait queue for resolution change event
+ * @subscriptions: used to hold current events subscriptions
+ * @buf_count: used to count number of buffers (reqbuf(0))
* @fps: holds current FPS
* @timeperframe: holds current time per frame structure
* @fmt_out: a reference to output format structure
@@ -246,8 +270,6 @@ struct venus_buffer {
* @opb_buftype: output picture buffer type
* @opb_fmt: output picture buffer raw format
* @reconfig: a flag raised by decoder when the stream resolution changed
- * @reconfig_width: holds the new width
- * @reconfig_height: holds the new height
* @hfi_codec: current codec for this instance in HFI space
* @sequence_cap: a sequence counter for capture queue
* @sequence_out: a sequence counter for output queue
@@ -287,6 +309,11 @@ struct venus_inst {
u8 ycbcr_enc;
u8 quantization;
u8 xfer_func;
+ enum venus_dec_state codec_state;
+ wait_queue_head_t reconf_wait;
+ unsigned int subscriptions;
+ int buf_count;
+ struct venus_ts_metadata tss[VIDEO_MAX_FRAME];
u64 fps;
struct v4l2_fract timeperframe;
const struct venus_format *fmt_out;
@@ -301,8 +328,6 @@ struct venus_inst {
u32 opb_buftype;
u32 opb_fmt;
bool reconfig;
- u32 reconfig_width;
- u32 reconfig_height;
u32 hfi_codec;
u32 sequence_cap;
u32 sequence_out;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 71b06dfc6dc4..1ad96c25ab09 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -79,7 +79,7 @@ bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
}
EXPORT_SYMBOL_GPL(venus_helper_check_codec);
-static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
+int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
{
struct intbuf *buf;
int ret = 0;
@@ -100,6 +100,7 @@ static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
fail:
return ret;
}
+EXPORT_SYMBOL_GPL(venus_helper_queue_dpb_bufs);
int venus_helper_free_dpb_bufs(struct venus_inst *inst)
{
@@ -278,7 +279,7 @@ static const unsigned int intbuf_types_4xx[] = {
HFI_BUFFER_INTERNAL_PERSIST_1,
};
-static int intbufs_alloc(struct venus_inst *inst)
+int venus_helper_intbufs_alloc(struct venus_inst *inst)
{
const unsigned int *intbuf;
size_t arr_sz, i;
@@ -304,11 +305,59 @@ error:
intbufs_unset_buffers(inst);
return ret;
}
+EXPORT_SYMBOL_GPL(venus_helper_intbufs_alloc);
-static int intbufs_free(struct venus_inst *inst)
+int venus_helper_intbufs_free(struct venus_inst *inst)
{
return intbufs_unset_buffers(inst);
}
+EXPORT_SYMBOL_GPL(venus_helper_intbufs_free);
+
+int venus_helper_intbufs_realloc(struct venus_inst *inst)
+{
+ enum hfi_version ver = inst->core->res->hfi_version;
+ struct hfi_buffer_desc bd;
+ struct intbuf *buf, *n;
+ int ret;
+
+ list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
+ if (buf->type == HFI_BUFFER_INTERNAL_PERSIST ||
+ buf->type == HFI_BUFFER_INTERNAL_PERSIST_1)
+ continue;
+
+ memset(&bd, 0, sizeof(bd));
+ bd.buffer_size = buf->size;
+ bd.buffer_type = buf->type;
+ bd.num_buffers = 1;
+ bd.device_addr = buf->da;
+ bd.response_required = true;
+
+ ret = hfi_session_unset_buffers(inst, &bd);
+
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+
+ list_del_init(&buf->list);
+ kfree(buf);
+ }
+
+ ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH(ver));
+ if (ret)
+ goto err;
+
+ ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_1(ver));
+ if (ret)
+ goto err;
+
+ ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_2(ver));
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc);
static u32 load_per_instance(struct venus_inst *inst)
{
@@ -339,7 +388,7 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
return mbs_per_sec;
}
-static int load_scale_clocks(struct venus_core *core)
+int venus_helper_load_scale_clocks(struct venus_core *core)
{
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
@@ -388,6 +437,7 @@ err:
dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
return ret;
}
+EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
static void fill_buffer_desc(const struct venus_buffer *buf,
struct hfi_buffer_desc *bd, bool response)
@@ -413,6 +463,57 @@ static void return_buf_error(struct venus_inst *inst,
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
+static void
+put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ unsigned int i;
+ int slot = -1;
+ u64 ts_us = vb->timestamp;
+
+ for (i = 0; i < ARRAY_SIZE(inst->tss); i++) {
+ if (!inst->tss[i].used) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ dev_dbg(inst->core->dev, "%s: no free slot\n", __func__);
+ return;
+ }
+
+ do_div(ts_us, NSEC_PER_USEC);
+
+ inst->tss[slot].used = true;
+ inst->tss[slot].flags = vbuf->flags;
+ inst->tss[slot].tc = vbuf->timecode;
+ inst->tss[slot].ts_us = ts_us;
+ inst->tss[slot].ts_ns = vb->timestamp;
+}
+
+void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
+ if (!inst->tss[i].used)
+ continue;
+
+ if (inst->tss[i].ts_us != timestamp_us)
+ continue;
+
+ inst->tss[i].used = false;
+ vbuf->flags |= inst->tss[i].flags;
+ vbuf->timecode = inst->tss[i].tc;
+ vb->timestamp = inst->tss[i].ts_ns;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_ts_metadata);
+
static int
session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
@@ -437,6 +538,9 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
fdata.flags |= HFI_BUFFERFLAG_EOS;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC)
+ put_ts_metadata(inst, vbuf);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -472,7 +576,7 @@ static bool is_dynamic_bufmode(struct venus_inst *inst)
return caps->cap_bufs_mode_dynamic;
}
-static int session_unregister_bufs(struct venus_inst *inst)
+int venus_helper_unregister_bufs(struct venus_inst *inst)
{
struct venus_buffer *buf, *n;
struct hfi_buffer_desc bd;
@@ -489,6 +593,7 @@ static int session_unregister_bufs(struct venus_inst *inst)
return ret;
}
+EXPORT_SYMBOL_GPL(venus_helper_unregister_bufs);
static int session_register_bufs(struct venus_inst *inst)
{
@@ -947,6 +1052,17 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
unsigned int out_buf_size = venus_helper_get_opb_size(inst);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_err(inst->core->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
vb2_plane_size(vb, 0) < out_buf_size)
@@ -970,16 +1086,19 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
- if (!(inst->streamon_out & inst->streamon_cap))
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
+ !(inst->streamon_out && inst->streamon_cap))
goto unlock;
- ret = is_buf_refed(inst, vbuf);
- if (ret)
- goto unlock;
+ if (vb2_start_streaming_called(vb->vb2_queue)) {
+ ret = is_buf_refed(inst, vbuf);
+ if (ret)
+ goto unlock;
- ret = session_process_buf(inst, vbuf);
- if (ret)
- return_buf_error(inst, vbuf);
+ ret = session_process_buf(inst, vbuf);
+ if (ret)
+ return_buf_error(inst, vbuf);
+ }
unlock:
mutex_unlock(&inst->lock);
@@ -1009,8 +1128,8 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
if (inst->streamon_out & inst->streamon_cap) {
ret = hfi_session_stop(inst);
ret |= hfi_session_unload_res(inst);
- ret |= session_unregister_bufs(inst);
- ret |= intbufs_free(inst);
+ ret |= venus_helper_unregister_bufs(inst);
+ ret |= venus_helper_intbufs_free(inst);
ret |= hfi_session_deinit(inst);
if (inst->session_error || core->sys_error)
@@ -1021,7 +1140,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- load_scale_clocks(core);
+ venus_helper_load_scale_clocks(core);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1036,12 +1155,48 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret) {
+ return_buf_error(inst, &buf->vb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_process_initial_cap_bufs);
+
+int venus_helper_process_initial_out_bufs(struct venus_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret) {
+ return_buf_error(inst, &buf->vb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
+
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
int ret;
- ret = intbufs_alloc(inst);
+ ret = venus_helper_intbufs_alloc(inst);
if (ret)
return ret;
@@ -1049,7 +1204,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- load_scale_clocks(core);
+ venus_helper_load_scale_clocks(core);
ret = hfi_session_load_res(inst);
if (ret)
@@ -1059,20 +1214,14 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_unload_res;
- ret = venus_helper_queue_dpb_bufs(inst);
- if (ret)
- goto err_session_stop;
-
return 0;
-err_session_stop:
- hfi_session_stop(inst);
err_unload_res:
hfi_session_unload_res(inst);
err_unreg_bufs:
- session_unregister_bufs(inst);
+ venus_helper_unregister_bufs(inst);
err_bufs_free:
- intbufs_free(inst);
+ venus_helper_intbufs_free(inst);
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 153783687a0c..01f411b12f81 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -9,6 +9,7 @@
#include <media/videobuf2-v4l2.h>
struct venus_inst;
+struct venus_core;
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
@@ -53,4 +54,14 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst);
int venus_helper_free_dpb_bufs(struct venus_inst *inst);
int venus_helper_power_enable(struct venus_core *core, u32 session_type,
bool enable);
+int venus_helper_intbufs_alloc(struct venus_inst *inst);
+int venus_helper_intbufs_free(struct venus_inst *inst);
+int venus_helper_intbufs_realloc(struct venus_inst *inst);
+int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
+int venus_helper_unregister_bufs(struct venus_inst *inst);
+int venus_helper_load_scale_clocks(struct venus_core *core);
+int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
+int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
+void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
+ struct vb2_v4l2_buffer *vbuf);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 6ad0c1772ea7..3d8b1284d1f3 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -198,6 +198,9 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
const struct hfi_ops *ops = core->ops;
int ret;
+ if (inst->state != INST_UNINIT)
+ return -EINVAL;
+
inst->hfi_codec = to_codec_type(pixfmt);
reinit_completion(&inst->done);
@@ -276,6 +279,7 @@ int hfi_session_start(struct venus_inst *inst)
return 0;
}
+EXPORT_SYMBOL_GPL(hfi_session_start);
int hfi_session_stop(struct venus_inst *inst)
{
@@ -299,6 +303,7 @@ int hfi_session_stop(struct venus_inst *inst)
return 0;
}
+EXPORT_SYMBOL_GPL(hfi_session_stop);
int hfi_session_continue(struct venus_inst *inst)
{
@@ -328,6 +333,7 @@ int hfi_session_abort(struct venus_inst *inst)
return 0;
}
+EXPORT_SYMBOL_GPL(hfi_session_abort);
int hfi_session_load_res(struct venus_inst *inst)
{
@@ -374,15 +380,16 @@ int hfi_session_unload_res(struct venus_inst *inst)
return 0;
}
+EXPORT_SYMBOL_GPL(hfi_session_unload_res);
-int hfi_session_flush(struct venus_inst *inst)
+int hfi_session_flush(struct venus_inst *inst, u32 type)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
reinit_completion(&inst->done);
- ret = ops->session_flush(inst, HFI_FLUSH_ALL);
+ ret = ops->session_flush(inst, type);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index b121cb1427ac..855822c9f39b 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -161,7 +161,7 @@ int hfi_session_continue(struct venus_inst *inst);
int hfi_session_abort(struct venus_inst *inst);
int hfi_session_load_res(struct venus_inst *inst);
int hfi_session_unload_res(struct venus_inst *inst);
-int hfi_session_flush(struct venus_inst *inst);
+int hfi_session_flush(struct venus_inst *inst, u32 type);
int hfi_session_set_buffers(struct venus_inst *inst,
struct hfi_buffer_desc *bd);
int hfi_session_unset_buffers(struct venus_inst *inst,
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index e1f998656c07..7f4660555ddb 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -37,42 +37,52 @@ static const struct venus_format vdec_formats[] = {
.pixfmt = V4L2_PIX_FMT_MPEG4,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H263,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_H264,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_VP9,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_XVID,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
}, {
.pixfmt = V4L2_PIX_FMT_HEVC,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
};
@@ -133,6 +143,7 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
+ u32 szimage;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -161,14 +172,17 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
- pixmp->width,
- pixmp->height);
+ szimage = venus_helper_get_framesz(pixmp->pixelformat, pixmp->width,
+ pixmp->height);
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pfmt[0].sizeimage = szimage;
pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
- else
+ } else {
+ pfmt[0].sizeimage = clamp_t(u32, pfmt[0].sizeimage, 0, SZ_8M);
+ pfmt[0].sizeimage = max(pfmt[0].sizeimage, szimage);
pfmt[0].bytesperline = 0;
+ }
return fmt;
}
@@ -182,33 +196,56 @@ static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
+static int vdec_check_src_change(struct venus_inst *inst)
+{
+ int ret;
+
+ if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE &&
+ inst->codec_state == VENUS_DEC_STATE_INIT &&
+ !inst->reconfig)
+ return -EINVAL;
+
+ if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE)
+ return 0;
+
+ /*
+ * The code snippet below is a workaround for backward compatibility
+ * with applications which doesn't support V4L2 events. It will be
+ * dropped in future once those applications are fixed.
+ */
+
+ if (inst->codec_state != VENUS_DEC_STATE_INIT)
+ goto done;
+
+ ret = wait_event_timeout(inst->reconf_wait, inst->reconfig,
+ msecs_to_jiffies(100));
+ if (!ret)
+ return -EINVAL;
+
+ if (!(inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) ||
+ !inst->reconfig)
+ dev_dbg(inst->core->dev, "%s: wrong state\n", __func__);
+
+done:
+ return 0;
+}
+
static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt = NULL;
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ int ret;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmt_cap;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fmt = inst->fmt_out;
- if (inst->reconfig) {
- struct v4l2_format format = {};
-
- inst->out_width = inst->reconfig_width;
- inst->out_height = inst->reconfig_height;
- inst->reconfig = false;
-
- format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt;
- format.fmt.pix_mp.width = inst->out_width;
- format.fmt.pix_mp.height = inst->out_height;
-
- vdec_try_fmt_common(inst, &format);
-
- inst->width = format.fmt.pix_mp.width;
- inst->height = format.fmt.pix_mp.height;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = vdec_check_src_change(inst);
+ if (ret)
+ return ret;
}
pixmp->pixelformat = fmt->pixfmt;
@@ -266,6 +303,7 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
inst->ycbcr_enc = pixmp->ycbcr_enc;
inst->quantization = pixmp->quantization;
inst->xfer_func = pixmp->xfer_func;
+ inst->input_buf_size = pixmp->plane_fmt[0].sizeimage;
}
memset(&format, 0, sizeof(format));
@@ -351,6 +389,7 @@ static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return -EINVAL;
f->pixelformat = fmt->pixfmt;
+ f->flags = fmt->flags;
return 0;
}
@@ -422,11 +461,18 @@ static int vdec_enum_framesizes(struct file *file, void *fh,
static int vdec_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct venus_inst *inst = container_of(fh, struct venus_inst, fh);
+ int ret;
+
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 2, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
- return v4l2_src_change_event_subscribe(fh, sub);
+ ret = v4l2_src_change_event_subscribe(fh, sub);
+ if (ret)
+ return ret;
+ inst->subscriptions |= V4L2_EVENT_SOURCE_CHANGE;
+ return 0;
case V4L2_EVENT_CTRL:
return v4l2_ctrl_subscribe_event(fh, sub);
default:
@@ -435,45 +481,35 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
}
static int
-vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
-{
- switch (cmd->cmd) {
- case V4L2_DEC_CMD_STOP:
- if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
struct hfi_frame_data fdata = {0};
int ret;
- ret = vdec_try_decoder_cmd(file, fh, cmd);
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
if (ret)
return ret;
mutex_lock(&inst->lock);
- /*
- * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
- * input to signal EOS.
- */
- if (!(inst->streamon_out & inst->streamon_cap))
- goto unlock;
+ if (cmd->cmd == V4L2_DEC_CMD_STOP) {
+ /*
+ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on
+ * decoder input to signal EOS.
+ */
+ if (!(inst->streamon_out && inst->streamon_cap))
+ goto unlock;
- fdata.buffer_type = HFI_BUFFER_INPUT;
- fdata.flags |= HFI_BUFFERFLAG_EOS;
- fdata.device_addr = 0xdeadbeef;
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ fdata.device_addr = 0xdeadb000;
- ret = hfi_session_process_buf(inst, &fdata);
+ ret = hfi_session_process_buf(inst, &fdata);
+
+ if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING)
+ inst->codec_state = VENUS_DEC_STATE_DRAIN;
+ }
unlock:
mutex_unlock(&inst->lock);
@@ -504,7 +540,7 @@ static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
.vidioc_enum_framesizes = vdec_enum_framesizes,
.vidioc_subscribe_event = vdec_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
- .vidioc_try_decoder_cmd = vdec_try_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
.vidioc_decoder_cmd = vdec_decoder_cmd,
};
@@ -634,20 +670,18 @@ static int vdec_output_conf(struct venus_inst *inst)
return 0;
}
-static int vdec_init_session(struct venus_inst *inst)
+static int vdec_session_init(struct venus_inst *inst)
{
int ret;
ret = hfi_session_init(inst, inst->fmt_out->pixfmt);
- if (ret)
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
return ret;
- ret = venus_helper_set_input_resolution(inst, inst->out_width,
- inst->out_height);
- if (ret)
- goto deinit;
-
- ret = venus_helper_set_color_format(inst, inst->fmt_cap->pixfmt);
+ ret = venus_helper_set_input_resolution(inst, frame_width_min(inst),
+ frame_height_min(inst));
if (ret)
goto deinit;
@@ -666,26 +700,19 @@ static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
*in_num = *out_num = 0;
- ret = vdec_init_session(inst);
- if (ret)
- return ret;
-
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
- goto deinit;
+ return ret;
*in_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
- goto deinit;
+ return ret;
*out_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
-deinit:
- hfi_session_deinit(inst);
-
- return ret;
+ return 0;
}
static int vdec_queue_setup(struct vb2_queue *q,
@@ -718,6 +745,10 @@ static int vdec_queue_setup(struct vb2_queue *q,
return 0;
}
+ ret = vdec_session_init(inst);
+ if (ret)
+ return ret;
+
ret = vdec_num_buffers(inst, &in_num, &out_num);
if (ret)
return ret;
@@ -728,6 +759,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
inst->out_width,
inst->out_height);
+ sizes[0] = max(sizes[0], inst->input_buf_size);
inst->input_buf_size = sizes[0];
*num_buffers = max(*num_buffers, in_num);
inst->num_input_bufs = *num_buffers;
@@ -741,6 +773,11 @@ static int vdec_queue_setup(struct vb2_queue *q,
inst->output_buf_size = sizes[0];
*num_buffers = max(*num_buffers, out_num);
inst->num_output_bufs = *num_buffers;
+
+ mutex_lock(&inst->lock);
+ if (inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP)
+ inst->codec_state = VENUS_DEC_STATE_STOPPED;
+ mutex_unlock(&inst->lock);
break;
default:
ret = -EINVAL;
@@ -777,80 +814,295 @@ static int vdec_verify_conf(struct venus_inst *inst)
return 0;
}
-static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+static int vdec_start_capture(struct venus_inst *inst)
{
- struct venus_inst *inst = vb2_get_drv_priv(q);
int ret;
- mutex_lock(&inst->lock);
+ if (!inst->streamon_out)
+ return 0;
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- inst->streamon_out = 1;
- else
- inst->streamon_cap = 1;
+ if (inst->codec_state == VENUS_DEC_STATE_DECODING) {
+ if (inst->reconfig)
+ goto reconfigure;
- if (!(inst->streamon_out & inst->streamon_cap)) {
- mutex_unlock(&inst->lock);
+ venus_helper_queue_dpb_bufs(inst);
+ venus_helper_process_initial_cap_bufs(inst);
+ inst->streamon_cap = 1;
return 0;
}
- venus_helper_init_instance(inst);
+ if (inst->codec_state != VENUS_DEC_STATE_STOPPED)
+ return 0;
- inst->reconfig = false;
- inst->sequence_cap = 0;
- inst->sequence_out = 0;
+reconfigure:
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT);
+ if (ret)
+ return ret;
- ret = vdec_init_session(inst);
+ ret = vdec_output_conf(inst);
if (ret)
- goto bufs_done;
+ return ret;
+
+ ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
+ VB2_MAX_FRAME, VB2_MAX_FRAME);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_intbufs_realloc(inst);
+ if (ret)
+ goto err;
+
+ ret = venus_helper_alloc_dpb_bufs(inst);
+ if (ret)
+ goto err;
+
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ goto free_dpb_bufs;
+
+ ret = venus_helper_process_initial_cap_bufs(inst);
+ if (ret)
+ goto free_dpb_bufs;
+
+ venus_helper_load_scale_clocks(inst->core);
+
+ ret = hfi_session_continue(inst);
+ if (ret)
+ goto free_dpb_bufs;
+
+ inst->codec_state = VENUS_DEC_STATE_DECODING;
+
+ inst->streamon_cap = 1;
+ inst->sequence_cap = 0;
+ inst->reconfig = false;
+
+ return 0;
+
+free_dpb_bufs:
+ venus_helper_free_dpb_bufs(inst);
+err:
+ return ret;
+}
+
+static int vdec_start_output(struct venus_inst *inst)
+{
+ int ret;
+
+ if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
+ ret = venus_helper_process_initial_out_bufs(inst);
+ inst->codec_state = VENUS_DEC_STATE_DECODING;
+ goto done;
+ }
+
+ if (inst->codec_state == VENUS_DEC_STATE_INIT ||
+ inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) {
+ ret = venus_helper_process_initial_out_bufs(inst);
+ goto done;
+ }
+
+ if (inst->codec_state != VENUS_DEC_STATE_DEINIT)
+ return -EINVAL;
+
+ venus_helper_init_instance(inst);
+ inst->sequence_out = 0;
+ inst->reconfig = false;
ret = vdec_set_properties(inst);
if (ret)
- goto deinit_sess;
+ return ret;
ret = vdec_output_conf(inst);
if (ret)
- goto deinit_sess;
+ return ret;
ret = vdec_verify_conf(inst);
if (ret)
- goto deinit_sess;
+ return ret;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
VB2_MAX_FRAME, VB2_MAX_FRAME);
if (ret)
- goto deinit_sess;
+ return ret;
- ret = venus_helper_alloc_dpb_bufs(inst);
+ ret = venus_helper_vb2_start_streaming(inst);
if (ret)
- goto deinit_sess;
+ return ret;
- ret = venus_helper_vb2_start_streaming(inst);
+ ret = venus_helper_process_initial_out_bufs(inst);
if (ret)
- goto deinit_sess;
+ return ret;
- mutex_unlock(&inst->lock);
+ inst->codec_state = VENUS_DEC_STATE_INIT;
+
+done:
+ inst->streamon_out = 1;
+ return ret;
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ret = vdec_start_capture(inst);
+ else
+ ret = vdec_start_output(inst);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&inst->lock);
return 0;
-deinit_sess:
- hfi_session_deinit(inst);
-bufs_done:
+error:
venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static void vdec_cancel_dst_buffers(struct venus_inst *inst)
+{
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+}
+
+static int vdec_stop_capture(struct venus_inst *inst)
+{
+ int ret = 0;
+
+ switch (inst->codec_state) {
+ case VENUS_DEC_STATE_DECODING:
+ ret = hfi_session_flush(inst, HFI_FLUSH_ALL);
+ /* fallthrough */
+ case VENUS_DEC_STATE_DRAIN:
+ vdec_cancel_dst_buffers(inst);
+ inst->codec_state = VENUS_DEC_STATE_STOPPED;
+ break;
+ case VENUS_DEC_STATE_DRC:
+ ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT);
+ vdec_cancel_dst_buffers(inst);
+ inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ venus_helper_free_dpb_bufs(inst);
+ break;
+ default:
+ return 0;
+ }
+
+ return ret;
+}
+
+static int vdec_stop_output(struct venus_inst *inst)
+{
+ int ret = 0;
+
+ switch (inst->codec_state) {
+ case VENUS_DEC_STATE_DECODING:
+ case VENUS_DEC_STATE_DRAIN:
+ case VENUS_DEC_STATE_STOPPED:
+ ret = hfi_session_flush(inst, HFI_FLUSH_ALL);
+ inst->codec_state = VENUS_DEC_STATE_SEEK;
+ break;
+ case VENUS_DEC_STATE_INIT:
+ case VENUS_DEC_STATE_CAPTURE_SETUP:
+ ret = hfi_session_flush(inst, HFI_FLUSH_INPUT);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void vdec_stop_streaming(struct vb2_queue *q)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ int ret = -EINVAL;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ret = vdec_stop_capture(inst);
+ else
+ ret = vdec_stop_output(inst);
+
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
+
+ if (ret)
+ goto unlock;
+
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
else
inst->streamon_cap = 0;
+
+unlock:
mutex_unlock(&inst->lock);
- return ret;
+}
+
+static void vdec_session_release(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ int ret, abort = 0;
+
+ mutex_lock(&inst->lock);
+
+ inst->codec_state = VENUS_DEC_STATE_DEINIT;
+
+ ret = hfi_session_stop(inst);
+ abort = (ret && ret != -EINVAL) ? 1 : 0;
+ ret = hfi_session_unload_res(inst);
+ abort = (ret && ret != -EINVAL) ? 1 : 0;
+ ret = venus_helper_unregister_bufs(inst);
+ abort = (ret && ret != -EINVAL) ? 1 : 0;
+ ret = venus_helper_intbufs_free(inst);
+ abort = (ret && ret != -EINVAL) ? 1 : 0;
+ ret = hfi_session_deinit(inst);
+ abort = (ret && ret != -EINVAL) ? 1 : 0;
+
+ if (inst->session_error || core->sys_error)
+ abort = 1;
+
+ if (abort)
+ hfi_session_abort(inst);
+
+ venus_helper_free_dpb_bufs(inst);
+ venus_helper_load_scale_clocks(core);
+ INIT_LIST_HEAD(&inst->registeredbufs);
+
+ mutex_unlock(&inst->lock);
+}
+
+static int vdec_buf_init(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ inst->buf_count++;
+
+ return venus_helper_vb2_buf_init(vb);
+}
+
+static void vdec_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ inst->buf_count--;
+ if (!inst->buf_count)
+ vdec_session_release(inst);
}
static const struct vb2_ops vdec_vb2_ops = {
.queue_setup = vdec_queue_setup,
- .buf_init = venus_helper_vb2_buf_init,
+ .buf_init = vdec_buf_init,
+ .buf_cleanup = vdec_buf_cleanup,
.buf_prepare = venus_helper_vb2_buf_prepare,
.start_streaming = vdec_start_streaming,
- .stop_streaming = venus_helper_vb2_stop_streaming,
+ .stop_streaming = vdec_stop_streaming,
.buf_queue = venus_helper_vb2_buf_queue,
};
@@ -874,9 +1126,9 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vbuf->flags = flags;
vbuf->field = V4L2_FIELD_NONE;
+ vb = &vbuf->vb2_buf;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- vb = &vbuf->vb2_buf;
vb2_set_plane_payload(vb, 0, bytesused);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
@@ -886,28 +1138,85 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
v4l2_event_queue_fh(&inst->fh, &ev);
+
+ if (inst->codec_state == VENUS_DEC_STATE_DRAIN)
+ inst->codec_state = VENUS_DEC_STATE_STOPPED;
}
} else {
vbuf->sequence = inst->sequence_out++;
}
+ venus_helper_get_ts_metadata(inst, timestamp_us, vbuf);
+
if (hfi_flags & HFI_BUFFERFLAG_READONLY)
venus_helper_acquire_buf_ref(vbuf);
if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT)
state = VB2_BUF_STATE_ERROR;
+ if (hfi_flags & HFI_BUFFERFLAG_DROP_FRAME) {
+ state = VB2_BUF_STATE_ERROR;
+ vb2_set_plane_payload(vb, 0, 0);
+ vb->timestamp = 0;
+ }
+
v4l2_m2m_buf_done(vbuf, state);
}
+static void vdec_event_change(struct venus_inst *inst,
+ struct hfi_event_data *ev_data, bool sufficient)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+ struct device *dev = inst->core->dev_dec;
+ struct v4l2_format format = {};
+
+ mutex_lock(&inst->lock);
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt;
+ format.fmt.pix_mp.width = ev_data->width;
+ format.fmt.pix_mp.height = ev_data->height;
+
+ vdec_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+
+ inst->out_width = ev_data->width;
+ inst->out_height = ev_data->height;
+
+ dev_dbg(dev, "event %s sufficient resources (%ux%u)\n",
+ sufficient ? "" : "not", ev_data->width, ev_data->height);
+
+ if (sufficient) {
+ hfi_session_continue(inst);
+ } else {
+ switch (inst->codec_state) {
+ case VENUS_DEC_STATE_INIT:
+ inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
+ break;
+ case VENUS_DEC_STATE_DECODING:
+ inst->codec_state = VENUS_DEC_STATE_DRC;
+ break;
+ default:
+ break;
+ }
+ }
+
+ inst->reconfig = true;
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ wake_up(&inst->reconf_wait);
+
+ mutex_unlock(&inst->lock);
+}
+
static void vdec_event_notify(struct venus_inst *inst, u32 event,
struct hfi_event_data *data)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
- static const struct v4l2_event ev = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
switch (event) {
case EVT_SESSION_ERROR:
@@ -917,18 +1226,10 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
case EVT_SYS_EVENT_CHANGE:
switch (data->event_type) {
case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
- hfi_session_continue(inst);
- dev_dbg(dev, "event sufficient resources\n");
+ vdec_event_change(inst, data, true);
break;
case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
- inst->reconfig_height = data->height;
- inst->reconfig_width = data->width;
- inst->reconfig = true;
-
- v4l2_event_queue_fh(&inst->fh, &ev);
-
- dev_dbg(dev, "event not sufficient resources (%ux%u)\n",
- data->width, data->height);
+ vdec_event_change(inst, data, false);
break;
case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
venus_helper_release_buf_ref(inst, data->tag);
@@ -949,20 +1250,25 @@ static const struct hfi_inst_ops vdec_hfi_ops = {
static void vdec_inst_init(struct venus_inst *inst)
{
+ inst->hfi_codec = HFI_VIDEO_CODEC_H264;
inst->fmt_out = &vdec_formats[6];
inst->fmt_cap = &vdec_formats[0];
- inst->width = 1280;
- inst->height = ALIGN(720, 32);
- inst->out_width = 1280;
- inst->out_height = 720;
+ inst->width = frame_width_min(inst);
+ inst->height = ALIGN(frame_height_min(inst), 32);
+ inst->out_width = frame_width_min(inst);
+ inst->out_height = frame_height_min(inst);
inst->fps = 30;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 30;
- inst->hfi_codec = HFI_VIDEO_CODEC_H264;
+ inst->opb_buftype = HFI_BUFFER_OUTPUT;
+}
+
+static void vdec_m2m_device_run(void *priv)
+{
}
static const struct v4l2_m2m_ops vdec_m2m_ops = {
- .device_run = venus_helper_m2m_device_run,
+ .device_run = vdec_m2m_device_run,
.job_abort = venus_helper_m2m_job_abort,
};
@@ -980,7 +1286,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
- src_vq->min_buffers_needed = 1;
+ src_vq->min_buffers_needed = 0;
src_vq->dev = inst->core->dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -994,7 +1300,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
- dst_vq->min_buffers_needed = 1;
+ dst_vq->min_buffers_needed = 0;
dst_vq->dev = inst->core->dev;
ret = vb2_queue_init(dst_vq);
if (ret) {
@@ -1024,7 +1330,9 @@ static int vdec_open(struct file *file)
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_DEC;
inst->num_output_bufs = 1;
-
+ inst->codec_state = VENUS_DEC_STATE_DEINIT;
+ inst->buf_count = 0;
+ init_waitqueue_head(&inst->reconf_wait);
venus_helper_init_instance(inst);
ret = pm_runtime_get_sync(core->dev_dec);
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index 300350bfe8bd..3a963cbd342a 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -7,6 +7,7 @@
#include <media/v4l2-ctrls.h>
#include "core.h"
+#include "helpers.h"
#include "vdec.h"
static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -38,7 +39,9 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct vdec_controls *ctr = &inst->controls.dec;
+ struct hfi_buffer_requirements bufreq;
union hfi_get_property hprop;
+ enum hfi_version ver = inst->core->res->hfi_version;
u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
int ret;
@@ -62,7 +65,9 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
ctrl->val = ctr->post_loop_deb_mode;
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- ctrl->val = inst->num_output_bufs;
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (!ret)
+ ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index a5f3d2c46bea..1b7fb2d5887c 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -294,6 +294,7 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
+ u32 sizeimage;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
@@ -325,9 +326,10 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
- pfmt[0].sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
- pixmp->width,
- pixmp->height);
+ sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
+ pixmp->width,
+ pixmp->height);
+ pfmt[0].sizeimage = max(ALIGN(pfmt[0].sizeimage, SZ_4K), sizeimage);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
@@ -399,8 +401,10 @@ static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
+ inst->output_buf_size = pixmp->plane_fmt[0].sizeimage;
+ }
return 0;
}
@@ -918,6 +922,7 @@ static int venc_queue_setup(struct vb2_queue *q,
sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
inst->width,
inst->height);
+ sizes[0] = max(sizes[0], inst->output_buf_size);
inst->output_buf_size = sizes[0];
break;
default:
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 64f9cf790445..6993484ff0f3 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -390,6 +390,28 @@ out:
}
/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int rvin_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rvin_dev *vin =
+ container_of(ctrl->handler, struct rvin_dev, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ rvin_set_alpha(vin, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rvin_ctrl_ops = {
+ .s_ctrl = rvin_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
* Async notifier
*/
@@ -478,6 +500,15 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
if (ret < 0)
return ret;
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ return ret;
+ }
+
ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
NULL, true);
if (ret < 0) {
@@ -633,7 +664,7 @@ static int rvin_parallel_init(struct rvin_dev *vin)
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -870,6 +901,21 @@ static int rvin_mc_init(struct rvin_dev *vin)
if (ret)
rvin_group_put(vin);
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 1);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ return ret;
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
return ret;
}
@@ -1245,6 +1291,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
vin->dev = &pdev->dev;
vin->info = of_device_get_match_data(&pdev->dev);
+ vin->alpha = 0xff;
/*
* Special care is needed on r8a7795 ES1.x since it
@@ -1288,6 +1335,8 @@ static int rcar_vin_probe(struct platform_device *pdev)
return 0;
error_group_unregister:
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+
if (vin->info->use_mc) {
mutex_lock(&vin->group->lock);
if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
@@ -1323,10 +1372,10 @@ static int rcar_vin_remove(struct platform_device *pdev)
}
mutex_unlock(&vin->group->lock);
rvin_group_put(vin);
- } else {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
}
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+
rvin_dma_unregister(vin);
return 0;
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 91ab064404a1..3cb29b2e0b2b 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -111,10 +111,13 @@
#define VNIE_EFE (1 << 1)
/* Video n Data Mode Register bits */
+#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
+#define VNDMR_A8BIT_MASK (0xff << 24)
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
+#define VNDMR_ABIT (1 << 2)
#define VNDMR_DTMD_YCSEP (1 << 1)
-#define VNDMR_DTMD_ARGB1555 (1 << 0)
+#define VNDMR_DTMD_ARGB (1 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -574,6 +577,9 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
void rvin_crop_scale_comp(struct rvin_dev *vin)
{
+ const struct rvin_video_format *fmt;
+ u32 stride;
+
/* Set Start/End Pixel/Line Pre-Clip */
rvin_write(vin, vin->crop.left, VNSPPRC_REG);
rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
@@ -597,10 +603,9 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
if (vin->info->model != RCAR_GEN3)
rvin_crop_scale_comp_gen2(vin);
- if (vin->format.pixelformat == V4L2_PIX_FMT_NV16)
- rvin_write(vin, ALIGN(vin->format.width, 0x20), VNIS_REG);
- else
- rvin_write(vin, ALIGN(vin->format.width, 0x10), VNIS_REG);
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
+ stride = vin->format.bytesperline / fmt->bpp;
+ rvin_write(vin, stride, VNIS_REG);
}
/* -----------------------------------------------------------------------------
@@ -721,7 +726,7 @@ static int rvin_setup(struct rvin_dev *vin)
output_is_yuv = true;
break;
case V4L2_PIX_FMT_XRGB555:
- dmr = VNDMR_DTMD_ARGB1555;
+ dmr = VNDMR_DTMD_ARGB;
break;
case V4L2_PIX_FMT_RGB565:
dmr = 0;
@@ -730,6 +735,12 @@ static int rvin_setup(struct rvin_dev *vin)
/* Note: not supported on M1 */
dmr = VNDMR_EXRGB;
break;
+ case V4L2_PIX_FMT_ARGB555:
+ dmr = (vin->alpha ? VNDMR_ABIT : 0) | VNDMR_DTMD_ARGB;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ dmr = VNDMR_A8BIT(vin->alpha) | VNDMR_EXRGB | VNDMR_DTMD_ARGB;
+ break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
vin->format.pixelformat);
@@ -794,7 +805,7 @@ static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
int offsetx, offsety;
dma_addr_t offset;
- fmt = rvin_format_from_pixel(vin->format.pixelformat);
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
/*
* There is no HW support for composition do the beast we can
@@ -1343,3 +1354,34 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
return 0;
}
+
+void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha)
+{
+ unsigned long flags;
+ u32 dmr;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->alpha = alpha;
+
+ if (vin->state == STOPPED)
+ goto out;
+
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_ARGB555:
+ dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_ABIT;
+ if (vin->alpha)
+ dmr |= VNDMR_ABIT;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_A8BIT_MASK;
+ dmr |= VNDMR_A8BIT(vin->alpha);
+ break;
+ default:
+ goto out;
+ }
+
+ rvin_write(vin, dmr, VNDMR_REG);
+out:
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0936bcd98df1..cbc1c07f0a96 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -54,12 +54,24 @@ static const struct rvin_video_format rvin_formats[] = {
.fourcc = V4L2_PIX_FMT_XBGR32,
.bpp = 4,
},
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .bpp = 4,
+ },
};
-const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat)
+const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
+ u32 pixelformat)
{
int i;
+ if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
return rvin_formats + i;
@@ -67,16 +79,20 @@ const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat)
return NULL;
}
-static u32 rvin_format_bytesperline(struct v4l2_pix_format *pix)
+static u32 rvin_format_bytesperline(struct rvin_dev *vin,
+ struct v4l2_pix_format *pix)
{
const struct rvin_video_format *fmt;
+ u32 align;
- fmt = rvin_format_from_pixel(pix->pixelformat);
+ fmt = rvin_format_from_pixel(vin, pix->pixelformat);
if (WARN_ON(!fmt))
return -EINVAL;
- return pix->width * fmt->bpp;
+ align = pix->pixelformat == V4L2_PIX_FMT_NV16 ? 0x20 : 0x10;
+
+ return ALIGN(pix->width, align) * fmt->bpp;
}
static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
@@ -91,9 +107,7 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
{
u32 walign;
- if (!rvin_format_from_pixel(pix->pixelformat) ||
- (vin->info->model == RCAR_M1 &&
- pix->pixelformat == V4L2_PIX_FMT_XBGR32))
+ if (!rvin_format_from_pixel(vin, pix->pixelformat))
pix->pixelformat = RVIN_DEFAULT_FORMAT;
switch (pix->field) {
@@ -125,7 +139,7 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
&pix->height, 4, vin->info->max_height, 2, 0);
- pix->bytesperline = rvin_format_bytesperline(pix);
+ pix->bytesperline = rvin_format_bytesperline(vin, pix);
pix->sizeimage = rvin_format_sizeimage(pix);
vin_dbg(vin, "Format %ux%u bpl: %u size: %u\n",
@@ -181,9 +195,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
if (pad_cfg == NULL)
return -ENOMEM;
- if (!rvin_format_from_pixel(pix->pixelformat) ||
- (vin->info->model == RCAR_M1 &&
- pix->pixelformat == V4L2_PIX_FMT_XBGR32))
+ if (!rvin_format_from_pixel(vin, pix->pixelformat))
pix->pixelformat = RVIN_DEFAULT_FORMAT;
v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code);
@@ -384,7 +396,7 @@ static int rvin_s_selection(struct file *file, void *fh,
while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK)
r.top--;
- fmt = rvin_format_from_pixel(vin->format.pixelformat);
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
while ((r.left * fmt->bpp) & HW_BUFFER_MASK)
r.left--;
@@ -781,26 +793,26 @@ static int rvin_open(struct file *file)
if (ret)
goto err_unlock;
- if (vin->info->use_mc) {
+ if (vin->info->use_mc)
ret = v4l2_pipeline_pm_use(&vin->vdev.entity, 1);
- if (ret < 0)
- goto err_open;
- } else {
- if (v4l2_fh_is_singular_file(file)) {
- ret = rvin_power_parallel(vin, true);
- if (ret < 0)
- goto err_open;
-
- ret = v4l2_ctrl_handler_setup(&vin->ctrl_handler);
- if (ret)
- goto err_parallel;
- }
- }
+ else if (v4l2_fh_is_singular_file(file))
+ ret = rvin_power_parallel(vin, true);
+
+ if (ret < 0)
+ goto err_open;
+
+ ret = v4l2_ctrl_handler_setup(&vin->ctrl_handler);
+ if (ret)
+ goto err_power;
+
mutex_unlock(&vin->lock);
return 0;
-err_parallel:
- rvin_power_parallel(vin, false);
+err_power:
+ if (vin->info->use_mc)
+ v4l2_pipeline_pm_use(&vin->vdev.entity, 0);
+ else if (v4l2_fh_is_singular_file(file))
+ rvin_power_parallel(vin, false);
err_open:
v4l2_fh_release(file);
err_unlock:
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 0b13b34d03e3..e562c2ff21ec 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -178,6 +178,8 @@ struct rvin_info {
* @compose: active composing
* @source: active size of the video source
* @std: active video standard of the video source
+ *
+ * @alpha: Alpha component to fill in for supported pixel formats
*/
struct rvin_dev {
struct device *dev;
@@ -215,6 +217,8 @@ struct rvin_dev {
struct v4l2_rect compose;
struct v4l2_rect source;
v4l2_std_id std;
+
+ unsigned int alpha;
};
#define vin_to_source(vin) ((vin)->parallel->subdev)
@@ -260,11 +264,14 @@ void rvin_dma_unregister(struct rvin_dev *vin);
int rvin_v4l2_register(struct rvin_dev *vin);
void rvin_v4l2_unregister(struct rvin_dev *vin);
-const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat);
+const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
+ u32 pixelformat);
+
/* Cropping, composing and scaling */
void rvin_crop_scale_comp(struct rvin_dev *vin);
int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel);
+void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha);
#endif
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 43aae9b6bb20..cb93a13e1777 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2122,6 +2122,7 @@ static int fdp1_open(struct file *file)
if (ctx->hdl.error) {
ret = ctx->hdl.error;
v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
goto done;
}
@@ -2306,7 +2307,7 @@ static int fdp1_probe(struct platform_device *pdev)
fdp1->fcp = rcar_fcp_get(fcp_node);
of_node_put(fcp_node);
if (IS_ERR(fdp1->fcp)) {
- dev_err(&pdev->dev, "FCP not found (%ld)\n",
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
PTR_ERR(fdp1->fcp));
return PTR_ERR(fdp1->fcp);
}
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index 57d0c0f9fa4b..197b3991330d 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1659,10 +1659,8 @@ static int ceu_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to get irq: %d\n", ret);
+ if (ret < 0)
goto error_free_ceudev;
- }
irq = ret;
ret = devm_request_irq(dev, irq, ceu_irq,
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 5283d4533fa0..e9ff12b6b5bb 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -831,7 +831,6 @@ static int rga_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(rga->dev, "failed to get irq\n");
ret = irq;
goto err_put_clk;
}
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index a876d0873ebc..2fb45db8e4ba 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -641,10 +641,6 @@ static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
strscpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
dev_name(vp->camif->dev), vp->id);
-
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -685,10 +681,7 @@ static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
-
- pr_debug("fmt(%d): %s\n", f->index, f->description);
return 0;
}
@@ -802,10 +795,10 @@ static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
if (vp->owner == NULL)
vp->owner = priv;
- pr_debug("%ux%u. payload: %u. fmt: %s. %d %d. sizeimage: %d. bpl: %d\n",
- out_frame->f_width, out_frame->f_height, vp->payload, fmt->name,
- pix->width * pix->height * fmt->depth, fmt->depth,
- pix->sizeimage, pix->bytesperline);
+ pr_debug("%ux%u. payload: %u. fmt: 0x%08x. %d %d. sizeimage: %d. bpl: %d\n",
+ out_frame->f_width, out_frame->f_height, vp->payload,
+ fmt->fourcc, pix->width * pix->height * fmt->depth,
+ fmt->depth, pix->sizeimage, pix->bytesperline);
return 0;
}
@@ -1163,6 +1156,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
goto err_me_cleanup;
vfd->ctrl_handler = &vp->ctrl_handler;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index b05ce0149ca1..c6fbcd7036d6 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -42,7 +42,6 @@ static char *camif_clocks[CLK_MAX_NUM] = {
static const struct camif_fmt camif_formats[] = {
{
- .name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16,
.ybpp = 1,
@@ -51,7 +50,6 @@ static const struct camif_fmt camif_formats[] = {
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
- .name = "YUV 4:2:0 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.ybpp = 1,
@@ -60,7 +58,6 @@ static const struct camif_fmt camif_formats[] = {
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
- .name = "YVU 4:2:0 planar, Y/Cr/Cb",
.fourcc = V4L2_PIX_FMT_YVU420,
.depth = 12,
.ybpp = 1,
@@ -69,7 +66,6 @@ static const struct camif_fmt camif_formats[] = {
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
- .name = "RGB565, 16 bpp",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.ybpp = 2,
@@ -78,7 +74,6 @@ static const struct camif_fmt camif_formats[] = {
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
- .name = "XRGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.ybpp = 4,
@@ -87,7 +82,6 @@ static const struct camif_fmt camif_formats[] = {
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
- .name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = 32,
.ybpp = 4,
@@ -386,10 +380,8 @@ static int camif_request_irqs(struct platform_device *pdev,
init_waitqueue_head(&vp->irq_queue);
irq = platform_get_irq(pdev, i);
- if (irq <= 0) {
- dev_err(&pdev->dev, "failed to get IRQ %d\n", i);
+ if (irq <= 0)
return -ENXIO;
- }
ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
0, dev_name(&pdev->dev), vp);
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index efdc00b4ec6f..f937e638490f 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -89,7 +89,6 @@ enum img_fmt {
* @ybpp: number of luminance bytes per pixel
*/
struct camif_fmt {
- char *name;
u32 fourcc;
u32 color;
u16 colplanes;
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h
index 29f839cdb486..052948a7b669 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.h
+++ b/drivers/media/platform/s3c-camif/camif-regs.h
@@ -9,6 +9,8 @@
#ifndef CAMIF_REGS_H_
#define CAMIF_REGS_H_
+#include <linux/bitops.h>
+
#include "camif-core.h"
#include <media/drv-intf/s3c_camif.h>
@@ -19,7 +21,7 @@
/* Camera input format */
#define S3C_CAMIF_REG_CISRCFMT 0x00
-#define CISRCFMT_ITU601_8BIT (1 << 31)
+#define CISRCFMT_ITU601_8BIT BIT(31)
#define CISRCFMT_ITU656_8BIT (0 << 31)
#define CISRCFMT_ORDER422_YCBYCR (0 << 14)
#define CISRCFMT_ORDER422_YCRYCB (1 << 14)
@@ -30,14 +32,14 @@
/* Window offset */
#define S3C_CAMIF_REG_CIWDOFST 0x04
-#define CIWDOFST_WINOFSEN (1 << 31)
-#define CIWDOFST_CLROVCOFIY (1 << 30)
-#define CIWDOFST_CLROVRLB_PR (1 << 28)
-/* #define CIWDOFST_CLROVPRFIY (1 << 27) */
-#define CIWDOFST_CLROVCOFICB (1 << 15)
-#define CIWDOFST_CLROVCOFICR (1 << 14)
-#define CIWDOFST_CLROVPRFICB (1 << 13)
-#define CIWDOFST_CLROVPRFICR (1 << 12)
+#define CIWDOFST_WINOFSEN BIT(31)
+#define CIWDOFST_CLROVCOFIY BIT(30)
+#define CIWDOFST_CLROVRLB_PR BIT(28)
+/* #define CIWDOFST_CLROVPRFIY BIT(27) */
+#define CIWDOFST_CLROVCOFICB BIT(15)
+#define CIWDOFST_CLROVCOFICR BIT(14)
+#define CIWDOFST_CLROVPRFICB BIT(13)
+#define CIWDOFST_CLROVPRFICR BIT(12)
#define CIWDOFST_OFST_MASK (0x7ff << 16 | 0x7ff)
/* Window offset 2 */
@@ -46,24 +48,24 @@
/* Global control */
#define S3C_CAMIF_REG_CIGCTRL 0x08
-#define CIGCTRL_SWRST (1 << 31)
-#define CIGCTRL_CAMRST (1 << 30)
+#define CIGCTRL_SWRST BIT(31)
+#define CIGCTRL_CAMRST BIT(30)
#define CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
#define CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
#define CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
#define CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
#define CIGCTRL_TESTPATTERN_MASK (3 << 27)
-#define CIGCTRL_INVPOLPCLK (1 << 26)
-#define CIGCTRL_INVPOLVSYNC (1 << 25)
-#define CIGCTRL_INVPOLHREF (1 << 24)
-#define CIGCTRL_IRQ_OVFEN (1 << 22)
-#define CIGCTRL_HREF_MASK (1 << 21)
-#define CIGCTRL_IRQ_LEVEL (1 << 20)
+#define CIGCTRL_INVPOLPCLK BIT(26)
+#define CIGCTRL_INVPOLVSYNC BIT(25)
+#define CIGCTRL_INVPOLHREF BIT(24)
+#define CIGCTRL_IRQ_OVFEN BIT(22)
+#define CIGCTRL_HREF_MASK BIT(21)
+#define CIGCTRL_IRQ_LEVEL BIT(20)
/* IRQ_CLR_C, IRQ_CLR_P */
-#define CIGCTRL_IRQ_CLR(id) (1 << (19 - (id)))
-#define CIGCTRL_FIELDMODE (1 << 2)
-#define CIGCTRL_INVPOLFIELD (1 << 1)
-#define CIGCTRL_CAM_INTERLACE (1 << 0)
+#define CIGCTRL_IRQ_CLR(id) BIT(19 - (id))
+#define CIGCTRL_FIELDMODE BIT(2)
+#define CIGCTRL_INVPOLFIELD BIT(1)
+#define CIGCTRL_CAM_INTERLACE BIT(0)
/* Y DMA output frame start address. n = 0..3. */
#define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4)
@@ -74,8 +76,8 @@
/* CICOTRGFMT, CIPRTRGFMT - Target format */
#define S3C_CAMIF_REG_CITRGFMT(id, _offs) (0x48 + (id) * (0x34 + (_offs)))
-#define CITRGFMT_IN422 (1 << 31) /* only for s3c24xx */
-#define CITRGFMT_OUT422 (1 << 30) /* only for s3c24xx */
+#define CITRGFMT_IN422 BIT(31) /* only for s3c24xx */
+#define CITRGFMT_OUT422 BIT(30) /* only for s3c24xx */
#define CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29) /* only for s3c6410 */
#define CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29) /* only for s3c6410 */
#define CITRGFMT_OUTFORMAT_YCBCR422I (2 << 29) /* only for s3c6410 */
@@ -88,7 +90,7 @@
#define CITRGFMT_FLIP_180 (3 << 14)
#define CITRGFMT_FLIP_MASK (3 << 14)
/* Preview path only */
-#define CITRGFMT_ROT90_PR (1 << 13)
+#define CITRGFMT_ROT90_PR BIT(13)
#define CITRGFMT_TARGETVSIZE(x) ((x) << 0)
#define CITRGFMT_TARGETSIZE_MASK ((0x1fff << 16) | 0x1fff)
@@ -102,7 +104,7 @@
#define CICTRL_RGBBURST2(x) ((x) << 14)
#define CICTRL_CBURST1(x) ((x) << 9)
#define CICTRL_CBURST2(x) ((x) << 4)
-#define CICTRL_LASTIRQ_ENABLE (1 << 2)
+#define CICTRL_LASTIRQ_ENABLE BIT(2)
#define CICTRL_ORDER422_MASK (3 << 0)
/* CICOSCPRERATIO, CIPRSCPRERATIO. Pre-scaler control 1. */
@@ -113,22 +115,22 @@
/* CICOSCCTRL, CIPRSCCTRL. Main scaler control. */
#define S3C_CAMIF_REG_CISCCTRL(id, _offs) (0x58 + (id) * (0x34 + (_offs)))
-#define CISCCTRL_SCALERBYPASS (1 << 31)
+#define CISCCTRL_SCALERBYPASS BIT(31)
/* s3c244x preview path only, s3c64xx both */
-#define CIPRSCCTRL_SAMPLE (1 << 31)
+#define CIPRSCCTRL_SAMPLE BIT(31)
/* 0 - 16-bit RGB, 1 - 24-bit RGB */
-#define CIPRSCCTRL_RGB_FORMAT_24BIT (1 << 30) /* only for s3c244x */
-#define CIPRSCCTRL_SCALEUP_H (1 << 29) /* only for s3c244x */
-#define CIPRSCCTRL_SCALEUP_V (1 << 28) /* only for s3c244x */
+#define CIPRSCCTRL_RGB_FORMAT_24BIT BIT(30) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_H BIT(29) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_V BIT(28) /* only for s3c244x */
/* s3c64xx */
-#define CISCCTRL_SCALEUP_H (1 << 30)
-#define CISCCTRL_SCALEUP_V (1 << 29)
+#define CISCCTRL_SCALEUP_H BIT(30)
+#define CISCCTRL_SCALEUP_V BIT(29)
#define CISCCTRL_SCALEUP_MASK (0x3 << 29)
-#define CISCCTRL_CSCR2Y_WIDE (1 << 28)
-#define CISCCTRL_CSCY2R_WIDE (1 << 27)
-#define CISCCTRL_LCDPATHEN_FIFO (1 << 26)
-#define CISCCTRL_INTERLACE (1 << 25)
-#define CISCCTRL_SCALERSTART (1 << 15)
+#define CISCCTRL_CSCR2Y_WIDE BIT(28)
+#define CISCCTRL_CSCY2R_WIDE BIT(27)
+#define CISCCTRL_LCDPATHEN_FIFO BIT(26)
+#define CISCCTRL_INTERLACE BIT(25)
+#define CISCCTRL_SCALERSTART BIT(15)
#define CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
#define CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
#define CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
@@ -137,8 +139,8 @@
#define CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
#define CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
#define CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
-#define CISCCTRL_EXTRGB_EXTENSION (1 << 10)
-#define CISCCTRL_ONE2ONE (1 << 9)
+#define CISCCTRL_EXTRGB_EXTENSION BIT(10)
+#define CISCCTRL_ONE2ONE BIT(9)
#define CISCCTRL_MAIN_RATIO_MASK (0x1ff << 16 | 0x1ff)
/* CICOTAREA, CIPRTAREA. Target area for DMA (Hsize x Vsize). */
@@ -147,38 +149,38 @@
/* Codec (id = 0) or preview (id = 1) path status. */
#define S3C_CAMIF_REG_CISTATUS(id, _offs) (0x64 + (id) * (0x34 + (_offs)))
-#define CISTATUS_OVFIY_STATUS (1 << 31)
-#define CISTATUS_OVFICB_STATUS (1 << 30)
-#define CISTATUS_OVFICR_STATUS (1 << 29)
+#define CISTATUS_OVFIY_STATUS BIT(31)
+#define CISTATUS_OVFICB_STATUS BIT(30)
+#define CISTATUS_OVFICR_STATUS BIT(29)
#define CISTATUS_OVF_MASK (0x7 << 29)
#define CIPRSTATUS_OVF_MASK (0x3 << 30)
-#define CISTATUS_VSYNC_STATUS (1 << 28)
+#define CISTATUS_VSYNC_STATUS BIT(28)
#define CISTATUS_FRAMECNT_MASK (3 << 26)
#define CISTATUS_FRAMECNT(__reg) (((__reg) >> 26) & 0x3)
-#define CISTATUS_WINOFSTEN_STATUS (1 << 25)
-#define CISTATUS_IMGCPTEN_STATUS (1 << 22)
-#define CISTATUS_IMGCPTENSC_STATUS (1 << 21)
-#define CISTATUS_VSYNC_A_STATUS (1 << 20)
-#define CISTATUS_FRAMEEND_STATUS (1 << 19) /* 17 on s3c64xx */
+#define CISTATUS_WINOFSTEN_STATUS BIT(25)
+#define CISTATUS_IMGCPTEN_STATUS BIT(22)
+#define CISTATUS_IMGCPTENSC_STATUS BIT(21)
+#define CISTATUS_VSYNC_A_STATUS BIT(20)
+#define CISTATUS_FRAMEEND_STATUS BIT(19) /* 17 on s3c64xx */
/* Image capture enable */
#define S3C_CAMIF_REG_CIIMGCPT(_offs) (0xa0 + (_offs))
-#define CIIMGCPT_IMGCPTEN (1 << 31)
-#define CIIMGCPT_IMGCPTEN_SC(id) (1 << (30 - (id)))
+#define CIIMGCPT_IMGCPTEN BIT(31)
+#define CIIMGCPT_IMGCPTEN_SC(id) BIT(30 - (id))
/* Frame control: 1 - one-shot, 0 - free run */
-#define CIIMGCPT_CPT_FREN_ENABLE(id) (1 << (25 - (id)))
+#define CIIMGCPT_CPT_FREN_ENABLE(id) BIT(25 - (id))
#define CIIMGCPT_CPT_FRMOD_ENABLE (0 << 18)
-#define CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+#define CIIMGCPT_CPT_FRMOD_CNT BIT(18)
/* Capture sequence */
#define S3C_CAMIF_REG_CICPTSEQ 0xc4
/* Image effects */
#define S3C_CAMIF_REG_CIIMGEFF(_offs) (0xb0 + (_offs))
-#define CIIMGEFF_IE_ENABLE(id) (1 << (30 + (id)))
+#define CIIMGEFF_IE_ENABLE(id) BIT(30 + (id))
#define CIIMGEFF_IE_ENABLE_MASK (3 << 30)
/* Image effect: 1 - after scaler, 0 - before scaler */
-#define CIIMGEFF_IE_AFTER_SC (1 << 29)
+#define CIIMGEFF_IE_AFTER_SC BIT(29)
#define CIIMGEFF_FIN_MASK (7 << 26)
#define CIIMGEFF_FIN_BYPASS (0 << 26)
#define CIIMGEFF_FIN_ARBITRARY (1 << 26)
@@ -207,8 +209,8 @@
/* Real input DMA data size. n = 0 - codec, 1 - preview. */
#define S3C_CAMIF_REG_MSWIDTH(id) (0xf8 + (id) * 0x2c)
-#define AUTOLOAD_ENABLE (1 << 31)
-#define ADDR_CH_DIS (1 << 30)
+#define AUTOLOAD_ENABLE BIT(31)
+#define ADDR_CH_DIS BIT(30)
#define MSHEIGHT(x) (((x) & 0x3ff) << 16)
#define MSWIDTH(x) ((x) & 0x3ff)
@@ -219,12 +221,12 @@
#define MSCTRL_ORDER422_M_CBYCRY (2 << 4)
#define MSCTRL_ORDER422_M_CRYCBY (3 << 4)
/* 0 - camera, 1 - DMA */
-#define MSCTRL_SEL_DMA_CAM (1 << 3)
+#define MSCTRL_SEL_DMA_CAM BIT(3)
#define MSCTRL_INFORMAT_M_YCBCR420 (0 << 1)
#define MSCTRL_INFORMAT_M_YCBCR422 (1 << 1)
#define MSCTRL_INFORMAT_M_YCBCR422I (2 << 1)
#define MSCTRL_INFORMAT_M_RGB (3 << 1)
-#define MSCTRL_ENVID_M (1 << 0)
+#define MSCTRL_ENVID_M BIT(0)
/* CICOSCOSY, CIPRSCOSY. Scan line Y/Cb/Cr offset. */
#define S3C_CAMIF_REG_CISSY(id) (0x12c + (id) * 0x0c)
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index ea6231b387ed..6ddcc35b0bbd 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -214,21 +214,23 @@ static int s5p_cec_probe(struct platform_device *pdev)
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
- cec->notifier = cec_notifier_get(hdmi_dev);
- if (cec->notifier == NULL)
- return -ENOMEM;
-
cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME,
- CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0), 1);
+ CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0) |
+ CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret)
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
goto err_delete_adapter;
+ }
- cec_register_cec_notifier(cec->adap, cec->notifier);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto err_notifier;
platform_set_drvdata(pdev, cec);
pm_runtime_enable(dev);
@@ -236,6 +238,9 @@ static int s5p_cec_probe(struct platform_device *pdev)
dev_dbg(dev, "successfully probed\n");
return 0;
+err_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier);
+
err_delete_adapter:
cec_delete_adapter(cec->adap);
return ret;
@@ -245,8 +250,8 @@ static int s5p_cec_remove(struct platform_device *pdev)
{
struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier);
cec_unregister_adapter(cec->adap);
- cec_notifier_put(cec->notifier);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 152d192d5c3f..f5f05ea9f521 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -29,31 +29,26 @@
static struct g2d_fmt formats[] = {
{
- .name = "XRGB_8888",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888),
},
{
- .name = "RGB_565",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_RGB_565),
},
{
- .name = "XRGB_1555",
.fourcc = V4L2_PIX_FMT_RGB555X,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555),
},
{
- .name = "XRGB_4444",
.fourcc = V4L2_PIX_FMT_RGB444,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444),
},
{
- .name = "PACKED_RGB_888",
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 24,
.hw = COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888),
@@ -296,19 +291,14 @@ static int vidioc_querycap(struct file *file, void *priv,
strscpy(cap->driver, G2D_NAME, sizeof(cap->driver));
strscpy(cap->card, G2D_NAME, sizeof(cap->card));
cap->bus_info[0] = 0;
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
{
- struct g2d_fmt *fmt;
if (f->index >= NUM_FORMATS)
return -EINVAL;
- fmt = &formats[f->index];
- f->pixelformat = fmt->fourcc;
- strscpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = formats[f->index].fourcc;
return 0;
}
@@ -704,6 +694,7 @@ static int g2d_probe(struct platform_device *pdev)
set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
vfd->lock = &dev->mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index def0ec0dabeb..c2309c1370da 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -61,7 +61,6 @@ struct g2d_ctx {
};
struct g2d_fmt {
- char *name;
u32 fourcc;
int depth;
u32 hw;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index a3bc884b7df1..8dbbd5f2a40a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -35,7 +35,6 @@
static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
- .name = "JPEG JFIF",
.fourcc = V4L2_PIX_FMT_JPEG,
.flags = SJPEG_FMT_FLAG_ENC_CAPTURE |
SJPEG_FMT_FLAG_DEC_OUTPUT |
@@ -44,7 +43,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
SJPEG_FMT_FLAG_EXYNOS4,
},
{
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
@@ -57,7 +55,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
@@ -70,7 +67,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
@@ -83,7 +79,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.colplanes = 1,
@@ -96,7 +91,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.colplanes = 1,
@@ -109,7 +103,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.colplanes = 1,
@@ -122,7 +115,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = 16,
.colplanes = 1,
@@ -135,7 +127,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
@@ -148,7 +139,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
@@ -161,7 +151,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "RGB565X",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.colplanes = 1,
@@ -174,7 +163,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
@@ -186,7 +174,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "ARGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.colplanes = 1,
@@ -199,7 +186,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "ARGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.colplanes = 1,
@@ -212,7 +198,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "YUV 4:4:4 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV24,
.depth = 24,
.colplanes = 2,
@@ -225,7 +210,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "YUV 4:4:4 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV42,
.depth = 24,
.colplanes = 2,
@@ -238,7 +222,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
- .name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = 16,
.colplanes = 2,
@@ -251,7 +234,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = 16,
.colplanes = 2,
@@ -264,7 +246,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
- .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
@@ -277,7 +258,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
@@ -290,7 +270,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
@@ -303,7 +282,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV21,
.depth = 12,
.colplanes = 2,
@@ -316,7 +294,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV21,
.depth = 12,
.colplanes = 2,
@@ -330,7 +307,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.colplanes = 3,
@@ -343,7 +319,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.colplanes = 3,
@@ -356,7 +331,6 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
- .name = "Gray",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8,
.colplanes = 1,
@@ -1285,8 +1259,6 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
}
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(ctx->jpeg->dev));
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1314,7 +1286,6 @@ static int enum_fmt(struct s5p_jpeg_ctx *ctx,
if (i >= n)
return -EINVAL;
- strscpy(f->description, sjpeg_formats[i].name, sizeof(f->description));
f->pixelformat = sjpeg_formats[i].fourcc;
return 0;
@@ -2974,6 +2945,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_encoder->lock = &jpeg->lock;
jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
jpeg->vfd_encoder->vfl_dir = VFL_DIR_M2M;
+ jpeg->vfd_encoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -3003,6 +2975,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_decoder->lock = &jpeg->lock;
jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
+ jpeg->vfd_decoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 34f87f6c02f2..3bc52f83f5bc 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -150,7 +150,6 @@ struct s5p_jpeg_variant {
/**
* struct jpeg_fmt - driver's internal color format data
- * @name: format description
* @fourcc: the fourcc code, 0 if not applicable
* @depth: number of bits per pixel
* @colplanes: number of color planes (1 for packed formats)
@@ -159,7 +158,6 @@ struct s5p_jpeg_variant {
* @flags: flags describing format applicability
*/
struct s5p_jpeg_fmt {
- char *name;
u32 fourcc;
int depth;
int colplanes;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
index bab7fa46b89a..86f376b50581 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h
@@ -121,14 +121,14 @@
/* JPEG timer setting register */
#define S5P_JPG_TIMER_SE 0x7c
-#define S5P_TIMER_INT_EN_MASK (0x1 << 31)
-#define S5P_TIMER_INT_EN (0x1 << 31)
+#define S5P_TIMER_INT_EN_MASK (0x1UL << 31)
+#define S5P_TIMER_INT_EN (0x1UL << 31)
#define S5P_TIMER_INIT_MASK 0x7fffffff
/* JPEG timer status register */
#define S5P_JPG_TIMER_ST 0x80
#define S5P_TIMER_INT_STAT_SHIFT 31
-#define S5P_TIMER_INT_STAT_MASK (0x1 << S5P_TIMER_INT_STAT_SHIFT)
+#define S5P_TIMER_INT_STAT_MASK (0x1UL << S5P_TIMER_INT_STAT_SHIFT)
#define S5P_TIMER_CNT_SHIFT 0
#define S5P_TIMER_CNT_MASK 0x7fffffff
@@ -562,13 +562,13 @@
/* JPEG timer setting register */
#define EXYNOS3250_TIMER_SE 0x148
#define EXYNOS3250_TIMER_INT_EN_SHIFT 31
-#define EXYNOS3250_TIMER_INT_EN (1 << EXYNOS3250_TIMER_INT_EN_SHIFT)
+#define EXYNOS3250_TIMER_INT_EN (1UL << EXYNOS3250_TIMER_INT_EN_SHIFT)
#define EXYNOS3250_TIMER_INIT_MASK 0x7fffffff
/* JPEG timer status register */
#define EXYNOS3250_TIMER_ST 0x14c
#define EXYNOS3250_TIMER_INT_STAT_SHIFT 31
-#define EXYNOS3250_TIMER_INT_STAT (1 << EXYNOS3250_TIMER_INT_STAT_SHIFT)
+#define EXYNOS3250_TIMER_INT_STAT (1UL << EXYNOS3250_TIMER_INT_STAT_SHIFT)
#define EXYNOS3250_TIMER_CNT_SHIFT 0
#define EXYNOS3250_TIMER_CNT_MASK 0x7fffffff
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 5dc086516360..96d1ecd1521b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -718,7 +718,6 @@ struct s5p_mfc_ctx {
* used by the MFC
*/
struct s5p_mfc_fmt {
- char *name;
u32 fourcc;
u32 codec_mode;
enum s5p_mfc_fmt_type type;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 4017c8b471f4..61e144a35201 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -29,7 +29,6 @@
static struct s5p_mfc_fmt formats[] = {
{
- .name = "4:2:0 2 Planes 16x16 Tiles",
.fourcc = V4L2_PIX_FMT_NV12MT_16X16,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -37,7 +36,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6_BIT | MFC_V7_BIT,
},
{
- .name = "4:2:0 2 Planes 64x32 Tiles",
.fourcc = V4L2_PIX_FMT_NV12MT,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -45,7 +43,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5_BIT,
},
{
- .name = "4:2:0 2 Planes Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -53,7 +50,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
- .name = "4:2:0 2 Planes Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV21M,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -61,7 +57,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
- .name = "H264 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H264,
.codec_mode = S5P_MFC_CODEC_H264_DEC,
.type = MFC_FMT_DEC,
@@ -69,7 +64,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "H264/MVC Encoded Stream",
.fourcc = V4L2_PIX_FMT_H264_MVC,
.codec_mode = S5P_MFC_CODEC_H264_MVC_DEC,
.type = MFC_FMT_DEC,
@@ -77,7 +71,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
- .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_MFC_CODEC_H263_DEC,
.type = MFC_FMT_DEC,
@@ -85,7 +78,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "MPEG1 Encoded Stream",
.fourcc = V4L2_PIX_FMT_MPEG1,
.codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
.type = MFC_FMT_DEC,
@@ -93,7 +85,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "MPEG2 Encoded Stream",
.fourcc = V4L2_PIX_FMT_MPEG2,
.codec_mode = S5P_MFC_CODEC_MPEG2_DEC,
.type = MFC_FMT_DEC,
@@ -101,7 +92,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "MPEG4 Encoded Stream",
.fourcc = V4L2_PIX_FMT_MPEG4,
.codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
.type = MFC_FMT_DEC,
@@ -109,7 +99,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "XviD Encoded Stream",
.fourcc = V4L2_PIX_FMT_XVID,
.codec_mode = S5P_MFC_CODEC_MPEG4_DEC,
.type = MFC_FMT_DEC,
@@ -117,7 +106,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "VC1 Encoded Stream",
.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
.codec_mode = S5P_MFC_CODEC_VC1_DEC,
.type = MFC_FMT_DEC,
@@ -125,7 +113,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "VC1 RCV Encoded Stream",
.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
.codec_mode = S5P_MFC_CODEC_VC1RCV_DEC,
.type = MFC_FMT_DEC,
@@ -133,7 +120,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "VP8 Encoded Stream",
.fourcc = V4L2_PIX_FMT_VP8,
.codec_mode = S5P_MFC_CODEC_VP8_DEC,
.type = MFC_FMT_DEC,
@@ -279,7 +265,6 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
bool out)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_fmt *fmt;
int i, j = 0;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
@@ -296,9 +281,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
}
if (i == ARRAY_SIZE(formats))
return -EINVAL;
- fmt = &formats[i];
- strscpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
+ f->pixelformat = formats[i].fourcc;
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 97e76480e942..912fe0c5ab18 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -32,7 +32,6 @@
static struct s5p_mfc_fmt formats[] = {
{
- .name = "4:2:0 2 Planes 16x16 Tiles",
.fourcc = V4L2_PIX_FMT_NV12MT_16X16,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -40,7 +39,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6_BIT | MFC_V7_BIT,
},
{
- .name = "4:2:0 2 Planes 64x32 Tiles",
.fourcc = V4L2_PIX_FMT_NV12MT,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -48,7 +46,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5_BIT,
},
{
- .name = "4:2:0 2 Planes Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -56,7 +53,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "4:2:0 2 Planes Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV21M,
.codec_mode = S5P_MFC_CODEC_NONE,
.type = MFC_FMT_RAW,
@@ -64,7 +60,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
- .name = "H264 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H264,
.codec_mode = S5P_MFC_CODEC_H264_ENC,
.type = MFC_FMT_ENC,
@@ -72,7 +67,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "MPEG4 Encoded Stream",
.fourcc = V4L2_PIX_FMT_MPEG4,
.codec_mode = S5P_MFC_CODEC_MPEG4_ENC,
.type = MFC_FMT_ENC,
@@ -80,7 +74,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_MFC_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
@@ -88,7 +81,6 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V5PLUS_BITS,
},
{
- .name = "VP8 Encoded Stream",
.fourcc = V4L2_PIX_FMT_VP8,
.codec_mode = S5P_MFC_CODEC_VP8_ENC,
.type = MFC_FMT_ENC,
@@ -1320,7 +1312,6 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
bool out)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_fmt *fmt;
int i, j = 0;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
@@ -1332,10 +1323,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
continue;
if (j == f->index) {
- fmt = &formats[i];
- strscpy(f->description, fmt->name,
- sizeof(f->description));
- f->pixelformat = fmt->fourcc;
+ f->pixelformat = formats[i].fourcc;
return 0;
}
++j;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index f76a07400966..49503c20d320 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -711,7 +711,7 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
if (p->pad) {
/** enable */
- reg |= (1 << 31);
+ reg |= (1UL << 31);
/** cr value */
reg &= ~(0xFF << 16);
reg |= (p->pad_cr << 16);
@@ -955,7 +955,7 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
S5P_FIMV_ENC_RC_FRAME_RATE);
shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING);
shm &= ~(0xFFFFFFFF);
- shm |= (1 << 31);
+ shm |= (1UL << 31);
shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
shm |= (p->rc_framerate_denom & 0xFFFF);
s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index f7621a9051cb..a1453053e31a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -840,7 +840,7 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
if (p->pad) {
reg = 0;
/** enable */
- reg |= (1 << 31);
+ reg |= (1UL << 31);
/** cr value */
reg |= ((p->pad_cr & 0xFF) << 16);
/** cb value */
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index 1d0133f01e00..9cd60fe1867c 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -507,10 +507,10 @@ err:
}
struct cec_dmi_match {
- char *sys_vendor;
- char *product_name;
- char *devname;
- char *conn;
+ const char *sys_vendor;
+ const char *product_name;
+ const char *devname;
+ const char *conn;
};
static const struct cec_dmi_match secocec_dmi_match_table[] = {
@@ -518,7 +518,8 @@ static const struct cec_dmi_match secocec_dmi_match_table[] = {
{ "SECO", "UDOO x86", "0000:00:02.0", "Port B" },
};
-static int secocec_cec_get_notifier(struct cec_notifier **notify)
+static struct device *secocec_cec_find_hdmi_dev(struct device *dev,
+ const char **conn)
{
int i;
@@ -533,16 +534,15 @@ static int secocec_cec_get_notifier(struct cec_notifier **notify)
d = bus_find_device_by_name(&pci_bus_type, NULL,
m->devname);
if (!d)
- return -EPROBE_DEFER;
+ return ERR_PTR(-EPROBE_DEFER);
- *notify = cec_notifier_get_conn(d, m->conn);
put_device(d);
-
- return 0;
+ *conn = m->conn;
+ return d;
}
}
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
static int secocec_acpi_probe(struct secocec_data *sdev)
@@ -573,9 +573,15 @@ static int secocec_probe(struct platform_device *pdev)
{
struct secocec_data *secocec;
struct device *dev = &pdev->dev;
+ struct device *hdmi_dev;
+ const char *conn = NULL;
int ret;
u16 val;
+ hdmi_dev = secocec_cec_find_hdmi_dev(&pdev->dev, &conn);
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
+
secocec = devm_kzalloc(dev, sizeof(*secocec), GFP_KERNEL);
if (!secocec)
return -ENOMEM;
@@ -617,12 +623,6 @@ static int secocec_probe(struct platform_device *pdev)
goto err;
}
- ret = secocec_cec_get_notifier(&secocec->notifier);
- if (ret) {
- dev_err(dev, "no CEC notifier available\n");
- goto err;
- }
-
ret = devm_request_threaded_irq(dev,
secocec->irq,
NULL,
@@ -640,7 +640,8 @@ static int secocec_probe(struct platform_device *pdev)
secocec->cec_adap = cec_allocate_adapter(&secocec_cec_adap_ops,
secocec,
dev_name(dev),
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
SECOCEC_MAX_ADDRS);
if (IS_ERR(secocec->cec_adap)) {
@@ -648,16 +649,20 @@ static int secocec_probe(struct platform_device *pdev)
goto err;
}
- ret = cec_register_adapter(secocec->cec_adap, dev);
- if (ret)
+ secocec->notifier = cec_notifier_cec_adap_register(hdmi_dev, conn,
+ secocec->cec_adap);
+ if (!secocec->notifier) {
+ ret = -ENOMEM;
goto err_delete_adapter;
+ }
- if (secocec->notifier)
- cec_register_cec_notifier(secocec->cec_adap, secocec->notifier);
+ ret = cec_register_adapter(secocec->cec_adap, dev);
+ if (ret)
+ goto err_notifier;
ret = secocec_ir_probe(secocec);
if (ret)
- goto err_delete_adapter;
+ goto err_notifier;
platform_set_drvdata(pdev, secocec);
@@ -665,6 +670,8 @@ static int secocec_probe(struct platform_device *pdev)
return ret;
+err_notifier:
+ cec_notifier_cec_adap_unregister(secocec->notifier);
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
@@ -685,11 +692,9 @@ static int secocec_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "IR disabled");
}
+ cec_notifier_cec_adap_unregister(secocec->notifier);
cec_unregister_adapter(secocec->cec_adap);
- if (secocec->notifier)
- cec_notifier_put(secocec->notifier);
-
release_region(BRA_SMB_BASE_ADDR, 7);
dev_dbg(&pdev->dev, "CEC device removed");
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 5a9ba05c996e..2b4c0d9d6928 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -81,12 +81,12 @@
struct sh_veu_dev;
struct sh_veu_file {
+ struct v4l2_fh fh;
struct sh_veu_dev *veu_dev;
bool cfg_needed;
};
struct sh_veu_format {
- char *name;
u32 fourcc;
unsigned int depth;
unsigned int ydepth;
@@ -144,14 +144,14 @@ enum sh_veu_fmt_idx {
* aligned for NV24.
*/
static const struct sh_veu_format sh_veu_fmt[] = {
- [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
- [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
- [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
- [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
- [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
- [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
- [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
- [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
+ [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .fourcc = V4L2_PIX_FMT_NV12 },
+ [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .fourcc = V4L2_PIX_FMT_NV16 },
+ [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .fourcc = V4L2_PIX_FMT_NV24 },
+ [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .fourcc = V4L2_PIX_FMT_RGB332 },
+ [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB444 },
+ [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB565 },
+ [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .fourcc = V4L2_PIX_FMT_BGR666 },
+ [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .fourcc = V4L2_PIX_FMT_RGB24 },
};
#define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
@@ -348,9 +348,6 @@ static int sh_veu_querycap(struct file *file, void *priv,
strscpy(cap->driver, "sh-veu", sizeof(cap->driver));
strscpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
strscpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -359,8 +356,6 @@ static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
if (f->index >= fmt_num)
return -EINVAL;
- strscpy(f->description, sh_veu_fmt[fmt[f->index]].name,
- sizeof(f->description));
f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
return 0;
}
@@ -967,12 +962,14 @@ static int sh_veu_open(struct file *file)
if (!veu_file)
return -ENOMEM;
+ v4l2_fh_init(&veu_file->fh, video_devdata(file));
veu_file->veu_dev = veu;
veu_file->cfg_needed = true;
file->private_data = veu_file;
pm_runtime_get_sync(veu->dev);
+ v4l2_fh_add(&veu_file->fh);
dev_dbg(veu->dev, "Created instance %p\n", veu_file);
@@ -1002,6 +999,8 @@ static int sh_veu_release(struct file *file)
}
pm_runtime_put(veu->dev);
+ v4l2_fh_del(&veu_file->fh);
+ v4l2_fh_exit(&veu_file->fh);
kfree(veu_file);
@@ -1039,6 +1038,7 @@ static const struct video_device sh_veu_videodev = {
.minor = -1,
.release = video_device_release_empty,
.vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 5799aa4b9323..2236702c21b4 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -138,7 +138,6 @@ static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
struct sh_vou_fmt {
u32 pfmt;
- char *desc;
unsigned char bpp;
unsigned char bpl;
unsigned char rgb;
@@ -152,7 +151,6 @@ static struct sh_vou_fmt vou_fmt[] = {
.pfmt = V4L2_PIX_FMT_NV12,
.bpp = 12,
.bpl = 1,
- .desc = "YVU420 planar",
.yf = 0,
.rgb = 0,
},
@@ -160,7 +158,6 @@ static struct sh_vou_fmt vou_fmt[] = {
.pfmt = V4L2_PIX_FMT_NV16,
.bpp = 16,
.bpl = 1,
- .desc = "YVYU planar",
.yf = 1,
.rgb = 0,
},
@@ -168,7 +165,6 @@ static struct sh_vou_fmt vou_fmt[] = {
.pfmt = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.bpl = 3,
- .desc = "RGB24",
.pkf = 2,
.rgb = 1,
},
@@ -176,7 +172,6 @@ static struct sh_vou_fmt vou_fmt[] = {
.pfmt = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.bpl = 2,
- .desc = "RGB565",
.pkf = 3,
.rgb = 1,
},
@@ -184,7 +179,6 @@ static struct sh_vou_fmt vou_fmt[] = {
.pfmt = V4L2_PIX_FMT_RGB565X,
.bpp = 16,
.bpl = 2,
- .desc = "RGB565 byteswapped",
.pkf = 3,
.rgb = 1,
},
@@ -381,9 +375,6 @@ static int sh_vou_querycap(struct file *file, void *priv,
strscpy(cap->card, "SuperH VOU", sizeof(cap->card));
strscpy(cap->driver, "sh-vou", sizeof(cap->driver));
strscpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -398,9 +389,6 @@ static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- strscpy(fmt->description, vou_fmt[fmt->index].desc,
- sizeof(fmt->description));
fmt->pixelformat = vou_fmt[fmt->index].pfmt;
return 0;
@@ -494,7 +482,8 @@ static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
if (h_idx)
vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
- dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+ dev_dbg(vou_dev->v4l2_dev.dev, "0x%08x: scaling 0x%x\n",
+ fmt->pfmt, vouvcr);
/* To produce a colour bar for testing set bit 23 of VOUVCR */
sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
@@ -1218,6 +1207,8 @@ static const struct video_device sh_vou_video_template = {
.ioctl_ops = &sh_vou_ioctl_ops,
.tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
.vfl_dir = VFL_DIR_TX,
+ .device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
static int sh_vou_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 79f7db1a9d18..e90f1ba30574 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -692,11 +692,6 @@ static int bdisp_querycap(struct file *file, void *fh,
strscpy(cap->card, bdisp->pdev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d",
BDISP_NAME, bdisp->id);
-
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
-
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
return 0;
}
@@ -1059,6 +1054,7 @@ static int bdisp_register_device(struct bdisp_dev *bdisp)
bdisp->vdev.lock = &bdisp->lock;
bdisp->vdev.vfl_dir = VFL_DIR_M2M;
bdisp->vdev.v4l2_dev = &bdisp->v4l2_dev;
+ bdisp->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
snprintf(bdisp->vdev.name, sizeof(bdisp->vdev.name), "%s.%d",
BDISP_NAME, bdisp->id);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 3c05b3dc49ec..5baada4f65e5 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -693,16 +693,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
fei->sram_size = resource_size(res);
fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
- if (fei->idle_irq < 0) {
- dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
+ if (fei->idle_irq < 0)
return fei->idle_irq;
- }
fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
- if (fei->error_irq < 0) {
- dev_err(dev, "Can't get c8sectpfe-error-irq\n");
+ if (fei->error_irq < 0)
return fei->error_irq;
- }
platform_set_drvdata(pdev, fei);
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index fc37efe1d554..8118c7365d3f 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -313,10 +313,6 @@ static int stih_cec_probe(struct platform_device *pdev)
if (!cec)
return -ENOMEM;
- cec->notifier = cec_notifier_get(hdmi_dev);
- if (!cec->notifier)
- return -ENOMEM;
-
cec->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -340,30 +336,42 @@ static int stih_cec_probe(struct platform_device *pdev)
return PTR_ERR(cec->clk);
}
- cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
- CEC_NAME, CEC_CAP_DEFAULTS, CEC_MAX_LOG_ADDRS);
+ cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec, CEC_NAME,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
+ CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto err_delete_adapter;
}
- cec_register_cec_notifier(cec->adap, cec->notifier);
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto err_notifier;
platform_set_drvdata(pdev, cec);
return 0;
+
+err_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier);
+
+err_delete_adapter:
+ cec_delete_adapter(cec->adap);
+ return ret;
}
static int stih_cec_remove(struct platform_device *pdev)
{
struct stih_cec *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier);
cec_unregister_adapter(cec->adap);
- cec_notifier_put(cec->notifier);
return 0;
}
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 7917fd2c4bd4..401aaafa1710 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -341,10 +341,8 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
/* get status interruption resource */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "%s failed to get status IRQ\n", HVA_PREFIX);
+ if (ret < 0)
goto err_clk;
- }
hva->irq_its = ret;
ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
@@ -360,10 +358,8 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
/* get error interruption resource */
ret = platform_get_irq(pdev, 1);
- if (ret < 0) {
- dev_err(dev, "%s failed to get error IRQ\n", HVA_PREFIX);
+ if (ret < 0)
goto err_clk;
- }
hva->irq_err = ret;
ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d855e9c09c08..9392e3409fba 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -100,10 +100,10 @@ enum state {
#define OVERRUN_ERROR_THRESHOLD 3
struct dcmi_graph_entity {
- struct device_node *node;
-
struct v4l2_async_subdev asd;
- struct v4l2_subdev *subdev;
+
+ struct device_node *remote_node;
+ struct v4l2_subdev *source;
};
struct dcmi_format {
@@ -169,6 +169,10 @@ struct stm32_dcmi {
/* Ensure DMA operations atomicity */
struct mutex dma_lock;
+
+ struct media_device mdev;
+ struct media_pad vid_cap_pad;
+ struct media_pipeline pipeline;
};
static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
@@ -580,6 +584,144 @@ static void dcmi_buf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&dcmi->irqlock);
}
+static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
+{
+ struct media_entity *entity = &dcmi->vdev->entity;
+ struct media_pad *pad;
+
+ /* Walk searching for entity having no sink */
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ }
+
+ return entity;
+}
+
+static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
+ struct v4l2_subdev_pad_config *pad_cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct media_entity *entity = &dcmi->entity.source->entity;
+ struct v4l2_subdev *subdev;
+ struct media_pad *sink_pad = NULL;
+ struct media_pad *src_pad = NULL;
+ struct media_pad *pad = NULL;
+ struct v4l2_subdev_format fmt = *format;
+ bool found = false;
+ int ret;
+
+ /*
+ * Starting from sensor subdevice, walk within
+ * pipeline and set format on each subdevice
+ */
+ while (1) {
+ unsigned int i;
+
+ /* Search if current entity has a source pad */
+ for (i = 0; i < entity->num_pads; i++) {
+ pad = &entity->pads[i];
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ src_pad = pad;
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ break;
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* Propagate format on sink pad if any, otherwise source pad */
+ if (sink_pad)
+ pad = sink_pad;
+
+ dev_dbg(dcmi->dev, "\"%s\":%d pad format set to 0x%x %ux%u\n",
+ subdev->name, pad->index, format->format.code,
+ format->format.width, format->format.height);
+
+ fmt.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to set format 0x%x %ux%u on \"%s\":%d pad (%d)\n",
+ __func__, format->format.code,
+ format->format.width, format->format.height,
+ subdev->name, pad->index, ret);
+ return ret;
+ }
+
+ if (fmt.format.code != format->format.code ||
+ fmt.format.width != format->format.width ||
+ fmt.format.height != format->format.height) {
+ dev_dbg(dcmi->dev, "\"%s\":%d pad format has been changed to 0x%x %ux%u\n",
+ subdev->name, pad->index, fmt.format.code,
+ fmt.format.width, fmt.format.height);
+ }
+
+ /* Walk to next entity */
+ sink_pad = media_entity_remote_pad(src_pad);
+ if (!sink_pad || !is_media_entity_v4l2_subdev(sink_pad->entity))
+ break;
+
+ entity = sink_pad->entity;
+ }
+ *format = fmt;
+
+ return 0;
+}
+
+static int dcmi_pipeline_s_stream(struct stm32_dcmi *dcmi, int state)
+{
+ struct media_entity *entity = &dcmi->vdev->entity;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ int ret;
+
+ /* Start/stop all entities within pipeline */
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, state);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(dcmi->dev, "%s: \"%s\" failed to %s streaming (%d)\n",
+ __func__, subdev->name,
+ state ? "start" : "stop", ret);
+ return ret;
+ }
+
+ dev_dbg(dcmi->dev, "\"%s\" is %s\n",
+ subdev->name, state ? "started" : "stopped");
+ }
+
+ return 0;
+}
+
+static int dcmi_pipeline_start(struct stm32_dcmi *dcmi)
+{
+ return dcmi_pipeline_s_stream(dcmi, 1);
+}
+
+static void dcmi_pipeline_stop(struct stm32_dcmi *dcmi)
+{
+ dcmi_pipeline_s_stream(dcmi, 0);
+}
+
static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
@@ -594,14 +736,17 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_release_buffers;
}
- /* Enable stream on the sub device */
- ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
- if (ret && ret != -ENOIOCTLCMD) {
- dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
- __func__);
+ ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
+ __func__, ret);
goto err_pm_put;
}
+ ret = dcmi_pipeline_start(dcmi);
+ if (ret)
+ goto err_media_pipeline_stop;
+
spin_lock_irq(&dcmi->irqlock);
/* Set bus width */
@@ -673,7 +818,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret) {
dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
__func__);
- goto err_subdev_streamoff;
+ goto err_pipeline_stop;
}
/* Enable interruptions */
@@ -684,8 +829,11 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
-err_subdev_streamoff:
- v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
+err_pipeline_stop:
+ dcmi_pipeline_stop(dcmi);
+
+err_media_pipeline_stop:
+ media_pipeline_stop(&dcmi->vdev->entity);
err_pm_put:
pm_runtime_put(dcmi->dev);
@@ -710,13 +858,10 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
struct dcmi_buf *buf, *node;
- int ret;
- /* Disable stream on the sub device */
- ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
- if (ret && ret != -ENOIOCTLCMD)
- dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
- __func__, ret);
+ dcmi_pipeline_stop(dcmi);
+
+ media_pipeline_stop(&dcmi->vdev->entity);
spin_lock_irq(&dcmi->irqlock);
@@ -857,7 +1002,7 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
+ ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
return ret;
@@ -934,8 +1079,7 @@ static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
mf->width = sd_framesize.width;
mf->height = sd_framesize.height;
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
- set_fmt, NULL, &format);
+ ret = dcmi_pipeline_s_fmt(dcmi, NULL, &format);
if (ret < 0)
return ret;
@@ -991,7 +1135,7 @@ static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
};
int ret;
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
+ ret = v4l2_subdev_call(dcmi->entity.source, pad, get_fmt, NULL, &fmt);
if (ret)
return ret;
@@ -1020,7 +1164,7 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
}
v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
+ ret = v4l2_subdev_call(dcmi->entity.source, pad, set_fmt,
&pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1043,7 +1187,7 @@ static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
/*
* Get sensor bounds first
*/
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
+ ret = v4l2_subdev_call(dcmi->entity.source, pad, get_selection,
NULL, &bounds);
if (!ret)
*r = bounds.r;
@@ -1224,7 +1368,7 @@ static int dcmi_enum_framesizes(struct file *file, void *fh,
fse.code = sd_fmt->mbus_code;
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
+ ret = v4l2_subdev_call(dcmi->entity.source, pad, enum_frame_size,
NULL, &fse);
if (ret)
return ret;
@@ -1241,7 +1385,7 @@ static int dcmi_g_parm(struct file *file, void *priv,
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
+ return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.source, p);
}
static int dcmi_s_parm(struct file *file, void *priv,
@@ -1249,7 +1393,7 @@ static int dcmi_s_parm(struct file *file, void *priv,
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
+ return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.source, p);
}
static int dcmi_enum_frameintervals(struct file *file, void *fh,
@@ -1271,7 +1415,7 @@ static int dcmi_enum_frameintervals(struct file *file, void *fh,
fie.code = sd_fmt->mbus_code;
- ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
+ ret = v4l2_subdev_call(dcmi->entity.source, pad,
enum_frame_interval, NULL, &fie);
if (ret)
return ret;
@@ -1291,7 +1435,7 @@ MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
static int dcmi_open(struct file *file)
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- struct v4l2_subdev *sd = dcmi->entity.subdev;
+ struct v4l2_subdev *sd = dcmi->entity.source;
int ret;
if (mutex_lock_interruptible(&dcmi->lock))
@@ -1322,7 +1466,7 @@ unlock:
static int dcmi_release(struct file *file)
{
struct stm32_dcmi *dcmi = video_drvdata(file);
- struct v4l2_subdev *sd = dcmi->entity.subdev;
+ struct v4l2_subdev *sd = dcmi->entity.source;
bool fh_singular;
int ret;
@@ -1409,6 +1553,12 @@ static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
return 0;
}
+/*
+ * FIXME: For the time being we only support subdevices
+ * which expose RGB & YUV "parallel form" mbus code (_2X8).
+ * Nevertheless, this allows to support serial source subdevices
+ * and serial to parallel bridges which conform to this.
+ */
static const struct dcmi_format dcmi_formats[] = {
{
.fourcc = V4L2_PIX_FMT_RGB565,
@@ -1433,7 +1583,7 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
{
const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
unsigned int num_fmts = 0, i, j;
- struct v4l2_subdev *subdev = dcmi->entity.subdev;
+ struct v4l2_subdev *subdev = dcmi->entity.source;
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
@@ -1447,12 +1597,20 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
/* Code supported, have we got this fourcc yet? */
for (j = 0; j < num_fmts; j++)
if (sd_fmts[j]->fourcc ==
- dcmi_formats[i].fourcc)
+ dcmi_formats[i].fourcc) {
/* Already available */
+ dev_dbg(dcmi->dev, "Skipping fourcc/code: %4.4s/0x%x\n",
+ (char *)&sd_fmts[j]->fourcc,
+ mbus_code.code);
break;
- if (j == num_fmts)
+ }
+ if (j == num_fmts) {
/* New */
sd_fmts[num_fmts++] = dcmi_formats + i;
+ dev_dbg(dcmi->dev, "Supported fourcc/code: %4.4s/0x%x\n",
+ (char *)&sd_fmts[num_fmts - 1]->fourcc,
+ sd_fmts[num_fmts - 1]->mbus_code);
+ }
}
mbus_code.index++;
}
@@ -1479,7 +1637,7 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
{
unsigned int num_fsize = 0;
- struct v4l2_subdev *subdev = dcmi->entity.subdev;
+ struct v4l2_subdev *subdev = dcmi->entity.source;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = dcmi->sd_format->mbus_code,
@@ -1526,7 +1684,20 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
int ret;
- dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
+ /*
+ * Now that the graph is complete,
+ * we search for the source subdevice
+ * in order to expose it through V4L2 interface
+ */
+ dcmi->entity.source =
+ media_entity_to_v4l2_subdev(dcmi_find_source(dcmi));
+ if (!dcmi->entity.source) {
+ dev_err(dcmi->dev, "Source subdevice not found\n");
+ return -ENODEV;
+ }
+
+ dcmi->vdev->ctrl_handler = dcmi->entity.source->ctrl_handler;
+
ret = dcmi_formats_init(dcmi);
if (ret) {
dev_err(dcmi->dev, "No supported mediabus format found\n");
@@ -1551,14 +1722,6 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
- if (ret) {
- dev_err(dcmi->dev, "Failed to register video device\n");
- return ret;
- }
-
- dev_dbg(dcmi->dev, "Device registered as %s\n",
- video_device_node_name(dcmi->vdev));
return 0;
}
@@ -1579,12 +1742,31 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+ unsigned int ret;
+ int src_pad;
- dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
+ dev_dbg(dcmi->dev, "Subdev \"%s\" bound\n", subdev->name);
- dcmi->entity.subdev = subdev;
+ /*
+ * Link this sub-device to DCMI, it could be
+ * a parallel camera sensor or a bridge
+ */
+ src_pad = media_entity_get_fwnode_pad(&subdev->entity,
+ subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+
+ ret = media_create_pad_link(&subdev->entity, src_pad,
+ &dcmi->vdev->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ dev_err(dcmi->dev, "Failed to create media pad link with subdev \"%s\"\n",
+ subdev->name);
+ else
+ dev_dbg(dcmi->dev, "DCMI is now linked to \"%s\"\n",
+ subdev->name);
- return 0;
+ return ret;
}
static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
@@ -1608,7 +1790,7 @@ static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
return -EINVAL;
/* Remote node to connect */
- dcmi->entity.node = remote;
+ dcmi->entity.remote_node = remote;
dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
return 0;
@@ -1631,7 +1813,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
&dcmi->entity.asd);
if (ret) {
dev_err(dcmi->dev, "Failed to add subdev notifier\n");
- of_node_put(dcmi->entity.node);
+ of_node_put(dcmi->entity.remote_node);
return ret;
}
@@ -1679,7 +1861,6 @@ static int dcmi_probe(struct platform_device *pdev)
np = of_graph_get_next_endpoint(np, NULL);
if (!np) {
dev_err(&pdev->dev, "Could not find the endpoint\n");
- of_node_put(np);
return -ENODEV;
}
@@ -1699,11 +1880,8 @@ static int dcmi_probe(struct platform_device *pdev)
dcmi->bus.data_shift = ep.bus.parallel.data_shift;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get irq\n");
+ if (irq <= 0)
return irq ? irq : -ENXIO;
- }
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!dcmi->res) {
@@ -1751,10 +1929,19 @@ static int dcmi_probe(struct platform_device *pdev)
q = &dcmi->queue;
+ dcmi->v4l2_dev.mdev = &dcmi->mdev;
+
+ /* Initialize media device */
+ strscpy(dcmi->mdev.model, DRV_NAME, sizeof(dcmi->mdev.model));
+ snprintf(dcmi->mdev.bus_info, sizeof(dcmi->mdev.bus_info),
+ "platform:%s", DRV_NAME);
+ dcmi->mdev.dev = &pdev->dev;
+ media_device_init(&dcmi->mdev);
+
/* Initialize the top-level structure */
ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
if (ret)
- goto err_dma_release;
+ goto err_media_device_cleanup;
dcmi->vdev = video_device_alloc();
if (!dcmi->vdev) {
@@ -1774,6 +1961,25 @@ static int dcmi_probe(struct platform_device *pdev)
V4L2_CAP_READWRITE;
video_set_drvdata(dcmi->vdev, dcmi);
+ /* Media entity pads */
+ dcmi->vid_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&dcmi->vdev->entity,
+ 1, &dcmi->vid_cap_pad);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to init media entity pad\n");
+ goto err_device_release;
+ }
+ dcmi->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
+
+ ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to register video device\n");
+ goto err_media_entity_cleanup;
+ }
+
+ dev_dbg(dcmi->dev, "Device registered as %s\n",
+ video_device_node_name(dcmi->vdev));
+
/* Buffer queue */
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
@@ -1789,12 +1995,12 @@ static int dcmi_probe(struct platform_device *pdev)
ret = vb2_queue_init(q);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
- goto err_device_release;
+ goto err_media_entity_cleanup;
}
ret = dcmi_graph_init(dcmi);
if (ret < 0)
- goto err_device_release;
+ goto err_media_entity_cleanup;
/* Reset device */
ret = reset_control_assert(dcmi->rstc);
@@ -1821,11 +2027,14 @@ static int dcmi_probe(struct platform_device *pdev)
err_cleanup:
v4l2_async_notifier_cleanup(&dcmi->notifier);
+err_media_entity_cleanup:
+ media_entity_cleanup(&dcmi->vdev->entity);
err_device_release:
video_device_release(dcmi->vdev);
err_device_unregister:
v4l2_device_unregister(&dcmi->v4l2_dev);
-err_dma_release:
+err_media_device_cleanup:
+ media_device_cleanup(&dcmi->mdev);
dma_release_channel(dcmi->dma_chan);
return ret;
@@ -1839,7 +2048,9 @@ static int dcmi_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&dcmi->notifier);
v4l2_async_notifier_cleanup(&dcmi->notifier);
+ media_entity_cleanup(&dcmi->vdev->entity);
v4l2_device_unregister(&dcmi->v4l2_dev);
+ media_device_cleanup(&dcmi->mdev);
dma_release_channel(dcmi->dma_chan);
diff --git a/drivers/media/platform/sunxi/Kconfig b/drivers/media/platform/sunxi/Kconfig
new file mode 100644
index 000000000000..71808e93ac2e
--- /dev/null
+++ b/drivers/media/platform/sunxi/Kconfig
@@ -0,0 +1,2 @@
+source "drivers/media/platform/sunxi/sun4i-csi/Kconfig"
+source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
new file mode 100644
index 000000000000..a05127529006
--- /dev/null
+++ b/drivers/media/platform/sunxi/Makefile
@@ -0,0 +1,2 @@
+obj-y += sun4i-csi/
+obj-y += sun6i-csi/
diff --git a/drivers/media/platform/sunxi/sun4i-csi/Kconfig b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
new file mode 100644
index 000000000000..e86e29b6a603
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_SUN4I_CSI
+ tristate "Allwinner A10 CMOS Sensor Interface Support"
+ depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ This is a V4L2 driver for the Allwinner A10 CSI
+
+ To compile this driver as a module, choose M here: the module
+ will be called sun4i_csi.
diff --git a/drivers/media/platform/sunxi/sun4i-csi/Makefile b/drivers/media/platform/sunxi/sun4i-csi/Makefile
new file mode 100644
index 000000000000..7c790a57f5ee
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/Makefile
@@ -0,0 +1,5 @@
+sun4i-csi-y += sun4i_csi.o
+sun4i-csi-y += sun4i_dma.o
+sun4i-csi-y += sun4i_v4l2.o
+
+obj-$(CONFIG_VIDEO_SUN4I_CSI) += sun4i-csi.o
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
new file mode 100644
index 000000000000..f36dc6258900
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 NextThing Co
+ * Copyright (C) 2016-2019 Bootlin
+ *
+ * Author: Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "sun4i_csi.h"
+
+static const struct media_entity_operations sun4i_csi_video_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int sun4i_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct sun4i_csi *csi = container_of(notifier, struct sun4i_csi,
+ notifier);
+
+ csi->src_subdev = subdev;
+ csi->src_pad = media_entity_get_fwnode_pad(&subdev->entity,
+ subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (csi->src_pad < 0) {
+ dev_err(csi->dev, "Couldn't find output pad for subdev %s\n",
+ subdev->name);
+ return csi->src_pad;
+ }
+
+ dev_dbg(csi->dev, "Bound %s pad: %d\n", subdev->name, csi->src_pad);
+ return 0;
+}
+
+static int sun4i_csi_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct sun4i_csi *csi = container_of(notifier, struct sun4i_csi,
+ notifier);
+ struct v4l2_subdev *subdev = &csi->subdev;
+ struct video_device *vdev = &csi->vdev;
+ int ret;
+
+ ret = v4l2_device_register_subdev(&csi->v4l, subdev);
+ if (ret < 0)
+ return ret;
+
+ ret = sun4i_csi_v4l2_register(csi);
+ if (ret < 0)
+ return ret;
+
+ ret = media_device_register(&csi->mdev);
+ if (ret)
+ return ret;
+
+ /* Create link from subdev to main device */
+ ret = media_create_pad_link(&subdev->entity, CSI_SUBDEV_SOURCE,
+ &vdev->entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret)
+ goto err_clean_media;
+
+ ret = media_create_pad_link(&csi->src_subdev->entity, csi->src_pad,
+ &subdev->entity, CSI_SUBDEV_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret)
+ goto err_clean_media;
+
+ ret = v4l2_device_register_subdev_nodes(&csi->v4l);
+ if (ret < 0)
+ goto err_clean_media;
+
+ return 0;
+
+err_clean_media:
+ media_device_unregister(&csi->mdev);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations sun4i_csi_notify_ops = {
+ .bound = sun4i_csi_notify_bound,
+ .complete = sun4i_csi_notify_complete,
+};
+
+static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_PARALLEL,
+ };
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_notifier_init(&csi->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto out;
+
+ csi->bus = vep.bus.parallel;
+
+ ret = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier,
+ ep, &csi->asd);
+ if (ret)
+ goto out;
+
+ csi->notifier.ops = &sun4i_csi_notify_ops;
+
+out:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static int sun4i_csi_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct video_device *vdev;
+ struct sun4i_csi *csi;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, csi);
+ csi->dev = &pdev->dev;
+ subdev = &csi->subdev;
+ vdev = &csi->vdev;
+
+ csi->mdev.dev = csi->dev;
+ strscpy(csi->mdev.model, "Allwinner Video Capture Device",
+ sizeof(csi->mdev.model));
+ csi->mdev.hw_revision = 0;
+ media_device_init(&csi->mdev);
+ csi->v4l.mdev = &csi->mdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csi->regs))
+ return PTR_ERR(csi->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ csi->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(csi->bus_clk)) {
+ dev_err(&pdev->dev, "Couldn't get our bus clock\n");
+ return PTR_ERR(csi->bus_clk);
+ }
+
+ csi->isp_clk = devm_clk_get(&pdev->dev, "isp");
+ if (IS_ERR(csi->isp_clk)) {
+ dev_err(&pdev->dev, "Couldn't get our ISP clock\n");
+ return PTR_ERR(csi->isp_clk);
+ }
+
+ csi->ram_clk = devm_clk_get(&pdev->dev, "ram");
+ if (IS_ERR(csi->ram_clk)) {
+ dev_err(&pdev->dev, "Couldn't get our ram clock\n");
+ return PTR_ERR(csi->ram_clk);
+ }
+
+ csi->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(csi->rst)) {
+ dev_err(&pdev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(csi->rst);
+ }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(subdev, &sun4i_csi_subdev_ops);
+ subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ subdev->owner = THIS_MODULE;
+ snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0");
+ v4l2_set_subdevdata(subdev, csi);
+
+ csi->subdev_pads[CSI_SUBDEV_SINK].flags =
+ MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ csi->subdev_pads[CSI_SUBDEV_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&subdev->entity, CSI_SUBDEV_PADS,
+ csi->subdev_pads);
+ if (ret < 0)
+ return ret;
+
+ csi->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ vdev->entity.ops = &sun4i_csi_video_entity_ops;
+ ret = media_entity_pads_init(&vdev->entity, 1, &csi->vdev_pad);
+ if (ret < 0)
+ return ret;
+
+ ret = sun4i_csi_dma_register(csi, irq);
+ if (ret)
+ goto err_clean_pad;
+
+ ret = sun4i_csi_notifier_init(csi);
+ if (ret)
+ goto err_unregister_media;
+
+ ret = v4l2_async_notifier_register(&csi->v4l, &csi->notifier);
+ if (ret) {
+ dev_err(csi->dev, "Couldn't register our notifier.\n");
+ goto err_unregister_media;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+err_unregister_media:
+ media_device_unregister(&csi->mdev);
+ sun4i_csi_dma_unregister(csi);
+
+err_clean_pad:
+ media_device_cleanup(&csi->mdev);
+
+ return ret;
+}
+
+static int sun4i_csi_remove(struct platform_device *pdev)
+{
+ struct sun4i_csi *csi = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&csi->notifier);
+ v4l2_async_notifier_cleanup(&csi->notifier);
+ media_device_unregister(&csi->mdev);
+ sun4i_csi_dma_unregister(csi);
+ media_device_cleanup(&csi->mdev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_csi_of_match[] = {
+ { .compatible = "allwinner,sun7i-a20-csi0" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun4i_csi_of_match);
+
+static int __maybe_unused sun4i_csi_runtime_resume(struct device *dev)
+{
+ struct sun4i_csi *csi = dev_get_drvdata(dev);
+
+ reset_control_deassert(csi->rst);
+ clk_prepare_enable(csi->bus_clk);
+ clk_prepare_enable(csi->ram_clk);
+ clk_set_rate(csi->isp_clk, 80000000);
+ clk_prepare_enable(csi->isp_clk);
+
+ writel(1, csi->regs + CSI_EN_REG);
+
+ return 0;
+}
+
+static int __maybe_unused sun4i_csi_runtime_suspend(struct device *dev)
+{
+ struct sun4i_csi *csi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(csi->isp_clk);
+ clk_disable_unprepare(csi->ram_clk);
+ clk_disable_unprepare(csi->bus_clk);
+
+ reset_control_assert(csi->rst);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sun4i_csi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sun4i_csi_runtime_suspend,
+ sun4i_csi_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver sun4i_csi_driver = {
+ .probe = sun4i_csi_probe,
+ .remove = sun4i_csi_remove,
+ .driver = {
+ .name = "sun4i-csi",
+ .of_match_table = sun4i_csi_of_match,
+ .pm = &sun4i_csi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_csi_driver);
+
+MODULE_DESCRIPTION("Allwinner A10 Camera Sensor Interface driver");
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
new file mode 100644
index 000000000000..001c8bde006c
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 NextThing Co
+ * Copyright (C) 2016-2019 Bootlin
+ *
+ * Author: Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#ifndef _SUN4I_CSI_H_
+#define _SUN4I_CSI_H_
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/videobuf2-core.h>
+
+#define CSI_EN_REG 0x00
+
+#define CSI_CFG_REG 0x04
+#define CSI_CFG_INPUT_FMT(fmt) ((fmt) << 20)
+#define CSI_CFG_OUTPUT_FMT(fmt) ((fmt) << 16)
+#define CSI_CFG_YUV_DATA_SEQ(seq) ((seq) << 8)
+#define CSI_CFG_VSYNC_POL(pol) ((pol) << 2)
+#define CSI_CFG_HSYNC_POL(pol) ((pol) << 1)
+#define CSI_CFG_PCLK_POL(pol) ((pol) << 0)
+
+#define CSI_CPT_CTRL_REG 0x08
+#define CSI_CPT_CTRL_VIDEO_START BIT(1)
+#define CSI_CPT_CTRL_IMAGE_START BIT(0)
+
+#define CSI_BUF_ADDR_REG(fifo, buf) (0x10 + (0x8 * (fifo)) + (0x4 * (buf)))
+
+#define CSI_BUF_CTRL_REG 0x28
+#define CSI_BUF_CTRL_DBN BIT(2)
+#define CSI_BUF_CTRL_DBS BIT(1)
+#define CSI_BUF_CTRL_DBE BIT(0)
+
+#define CSI_INT_EN_REG 0x30
+#define CSI_INT_FRM_DONE BIT(1)
+#define CSI_INT_CPT_DONE BIT(0)
+
+#define CSI_INT_STA_REG 0x34
+
+#define CSI_WIN_CTRL_W_REG 0x40
+#define CSI_WIN_CTRL_W_ACTIVE(w) ((w) << 16)
+
+#define CSI_WIN_CTRL_H_REG 0x44
+#define CSI_WIN_CTRL_H_ACTIVE(h) ((h) << 16)
+
+#define CSI_BUF_LEN_REG 0x48
+
+#define CSI_MAX_BUFFER 2
+#define CSI_MAX_HEIGHT 8192U
+#define CSI_MAX_WIDTH 8192U
+
+enum csi_input {
+ CSI_INPUT_RAW = 0,
+ CSI_INPUT_BT656 = 2,
+ CSI_INPUT_YUV = 3,
+};
+
+enum csi_output_raw {
+ CSI_OUTPUT_RAW_PASSTHROUGH = 0,
+};
+
+enum csi_output_yuv {
+ CSI_OUTPUT_YUV_422_PLANAR = 0,
+ CSI_OUTPUT_YUV_420_PLANAR = 1,
+ CSI_OUTPUT_YUV_422_UV = 4,
+ CSI_OUTPUT_YUV_420_UV = 5,
+ CSI_OUTPUT_YUV_422_MACRO = 8,
+ CSI_OUTPUT_YUV_420_MACRO = 9,
+};
+
+enum csi_yuv_data_seq {
+ CSI_YUV_DATA_SEQ_YUYV = 0,
+ CSI_YUV_DATA_SEQ_YVYU = 1,
+ CSI_YUV_DATA_SEQ_UYVY = 2,
+ CSI_YUV_DATA_SEQ_VYUY = 3,
+};
+
+enum csi_subdev_pads {
+ CSI_SUBDEV_SINK,
+ CSI_SUBDEV_SOURCE,
+
+ CSI_SUBDEV_PADS,
+};
+
+extern const struct v4l2_subdev_ops sun4i_csi_subdev_ops;
+
+struct sun4i_csi_format {
+ u32 mbus;
+ u32 fourcc;
+ enum csi_input input;
+ u32 output;
+ unsigned int num_planes;
+ u8 bpp[3];
+ unsigned int hsub;
+ unsigned int vsub;
+};
+
+const struct sun4i_csi_format *sun4i_csi_find_format(const u32 *fourcc,
+ const u32 *mbus);
+
+struct sun4i_csi {
+ /* Device resources */
+ struct device *dev;
+
+ void __iomem *regs;
+ struct clk *bus_clk;
+ struct clk *isp_clk;
+ struct clk *ram_clk;
+ struct reset_control *rst;
+
+ struct vb2_v4l2_buffer *current_buf[CSI_MAX_BUFFER];
+
+ struct {
+ size_t size;
+ void *vaddr;
+ dma_addr_t paddr;
+ } scratch;
+
+ struct v4l2_fwnode_bus_parallel bus;
+
+ /* Main Device */
+ struct v4l2_device v4l;
+ struct media_device mdev;
+ struct video_device vdev;
+ struct media_pad vdev_pad;
+ struct v4l2_pix_format_mplane fmt;
+
+ /* Local subdev */
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[CSI_SUBDEV_PADS];
+ struct v4l2_mbus_framefmt subdev_fmt;
+
+ /* V4L2 Async variables */
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *src_subdev;
+ int src_pad;
+
+ /* V4L2 variables */
+ struct mutex lock;
+
+ /* Videobuf2 */
+ struct vb2_queue queue;
+ struct list_head buf_list;
+ spinlock_t qlock;
+ unsigned int sequence;
+};
+
+int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq);
+void sun4i_csi_dma_unregister(struct sun4i_csi *csi);
+
+int sun4i_csi_v4l2_register(struct sun4i_csi *csi);
+
+#endif /* _SUN4I_CSI_H_ */
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
new file mode 100644
index 000000000000..d6979e11a67b
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 NextThing Co
+ * Copyright (C) 2016-2019 Bootlin
+ *
+ * Author: Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun4i_csi.h"
+
+struct sun4i_csi_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct sun4i_csi_buffer *
+vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p)
+{
+ return container_of(p, struct sun4i_csi_buffer, vb);
+}
+
+static inline struct sun4i_csi_buffer *
+vb2_to_csi_buffer(const struct vb2_buffer *p)
+{
+ return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p));
+}
+
+static void sun4i_csi_capture_start(struct sun4i_csi *csi)
+{
+ writel(CSI_CPT_CTRL_VIDEO_START, csi->regs + CSI_CPT_CTRL_REG);
+}
+
+static void sun4i_csi_capture_stop(struct sun4i_csi *csi)
+{
+ writel(0, csi->regs + CSI_CPT_CTRL_REG);
+}
+
+static int sun4i_csi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct sun4i_csi *csi = vb2_get_drv_priv(vq);
+ unsigned int num_planes = csi->fmt.num_planes;
+ unsigned int i;
+
+ if (*nplanes) {
+ if (*nplanes != num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < num_planes; i++)
+ if (sizes[i] < csi->fmt.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *nplanes = num_planes;
+ for (i = 0; i < num_planes; i++)
+ sizes[i] = csi->fmt.plane_fmt[i].sizeimage;
+
+ return 0;
+};
+
+static int sun4i_csi_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int i;
+
+ for (i = 0; i < csi->fmt.num_planes; i++) {
+ unsigned long size = csi->fmt.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(csi->dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static int sun4i_csi_setup_scratch_buffer(struct sun4i_csi *csi,
+ unsigned int slot)
+{
+ dma_addr_t addr = csi->scratch.paddr;
+ unsigned int plane;
+
+ dev_dbg(csi->dev,
+ "No more available buffer, using the scratch buffer\n");
+
+ for (plane = 0; plane < csi->fmt.num_planes; plane++) {
+ writel(addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
+ addr += csi->fmt.plane_fmt[plane].sizeimage;
+ }
+
+ csi->current_buf[slot] = NULL;
+ return 0;
+}
+
+static int sun4i_csi_buffer_fill_slot(struct sun4i_csi *csi, unsigned int slot)
+{
+ struct sun4i_csi_buffer *c_buf;
+ struct vb2_v4l2_buffer *v_buf;
+ unsigned int plane;
+
+ /*
+ * We should never end up in a situation where we overwrite an
+ * already filled slot.
+ */
+ if (WARN_ON(csi->current_buf[slot]))
+ return -EINVAL;
+
+ if (list_empty(&csi->buf_list))
+ return sun4i_csi_setup_scratch_buffer(csi, slot);
+
+ c_buf = list_first_entry(&csi->buf_list, struct sun4i_csi_buffer, list);
+ list_del_init(&c_buf->list);
+
+ v_buf = &c_buf->vb;
+ csi->current_buf[slot] = v_buf;
+
+ for (plane = 0; plane < csi->fmt.num_planes; plane++) {
+ dma_addr_t buf_addr;
+
+ buf_addr = vb2_dma_contig_plane_dma_addr(&v_buf->vb2_buf,
+ plane);
+ writel(buf_addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
+ }
+
+ return 0;
+}
+
+static int sun4i_csi_buffer_fill_all(struct sun4i_csi *csi)
+{
+ unsigned int slot;
+ int ret;
+
+ for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
+ ret = sun4i_csi_buffer_fill_slot(csi, slot);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sun4i_csi_buffer_mark_done(struct sun4i_csi *csi,
+ unsigned int slot,
+ unsigned int sequence)
+{
+ struct vb2_v4l2_buffer *v_buf;
+
+ if (!csi->current_buf[slot]) {
+ dev_dbg(csi->dev, "Scratch buffer was used, ignoring..\n");
+ return;
+ }
+
+ v_buf = csi->current_buf[slot];
+ v_buf->field = csi->fmt.field;
+ v_buf->sequence = sequence;
+ v_buf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&v_buf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ csi->current_buf[slot] = NULL;
+}
+
+static int sun4i_csi_buffer_flip(struct sun4i_csi *csi, unsigned int sequence)
+{
+ u32 reg = readl(csi->regs + CSI_BUF_CTRL_REG);
+ unsigned int next;
+
+ /* Our next buffer is not the current buffer */
+ next = !(reg & CSI_BUF_CTRL_DBS);
+
+ /* Report the previous buffer as done */
+ sun4i_csi_buffer_mark_done(csi, next, sequence);
+
+ /* Put a new buffer in there */
+ return sun4i_csi_buffer_fill_slot(csi, next);
+}
+
+static void sun4i_csi_buffer_queue(struct vb2_buffer *vb)
+{
+ struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct sun4i_csi_buffer *buf = vb2_to_csi_buffer(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->qlock, flags);
+ list_add_tail(&buf->list, &csi->buf_list);
+ spin_unlock_irqrestore(&csi->qlock, flags);
+}
+
+static void return_all_buffers(struct sun4i_csi *csi,
+ enum vb2_buffer_state state)
+{
+ struct sun4i_csi_buffer *buf, *node;
+ unsigned int slot;
+
+ list_for_each_entry_safe(buf, node, &csi->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
+ struct vb2_v4l2_buffer *v_buf = csi->current_buf[slot];
+
+ if (!v_buf)
+ continue;
+
+ vb2_buffer_done(&v_buf->vb2_buf, state);
+ csi->current_buf[slot] = NULL;
+ }
+}
+
+static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sun4i_csi *csi = vb2_get_drv_priv(vq);
+ struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
+ const struct sun4i_csi_format *csi_fmt;
+ unsigned long hsync_pol, pclk_pol, vsync_pol;
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ csi_fmt = sun4i_csi_find_format(&csi->fmt.pixelformat, NULL);
+ if (!csi_fmt)
+ return -EINVAL;
+
+ dev_dbg(csi->dev, "Starting capture\n");
+
+ csi->sequence = 0;
+
+ /*
+ * We need a scratch buffer in case where we'll not have any
+ * more buffer queued so that we don't error out. One of those
+ * cases is when you end up at the last frame to capture, you
+ * don't havea any buffer queued any more, and yet it doesn't
+ * really matter since you'll never reach the next buffer.
+ *
+ * Since we support the multi-planar API, we need to have a
+ * buffer for each plane. Allocating a single one large enough
+ * to hold all the buffers is simpler, so let's go for that.
+ */
+ csi->scratch.size = 0;
+ for (i = 0; i < csi->fmt.num_planes; i++)
+ csi->scratch.size += csi->fmt.plane_fmt[i].sizeimage;
+
+ csi->scratch.vaddr = dma_alloc_coherent(csi->dev,
+ csi->scratch.size,
+ &csi->scratch.paddr,
+ GFP_KERNEL);
+ if (!csi->scratch.vaddr) {
+ dev_err(csi->dev, "Failed to allocate scratch buffer\n");
+ ret = -ENOMEM;
+ goto err_clear_dma_queue;
+ }
+
+ ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
+ if (ret < 0)
+ goto err_free_scratch_buffer;
+
+ spin_lock_irqsave(&csi->qlock, flags);
+
+ /* Setup timings */
+ writel(CSI_WIN_CTRL_W_ACTIVE(csi->fmt.width * 2),
+ csi->regs + CSI_WIN_CTRL_W_REG);
+ writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
+ csi->regs + CSI_WIN_CTRL_H_REG);
+
+ hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
+ pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
+ vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
+ writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
+ CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
+ CSI_CFG_VSYNC_POL(vsync_pol) |
+ CSI_CFG_HSYNC_POL(hsync_pol) |
+ CSI_CFG_PCLK_POL(pclk_pol),
+ csi->regs + CSI_CFG_REG);
+
+ /* Setup buffer length */
+ writel(csi->fmt.plane_fmt[0].bytesperline,
+ csi->regs + CSI_BUF_LEN_REG);
+
+ /* Prepare our buffers in hardware */
+ ret = sun4i_csi_buffer_fill_all(csi);
+ if (ret) {
+ spin_unlock_irqrestore(&csi->qlock, flags);
+ goto err_disable_pipeline;
+ }
+
+ /* Enable double buffering */
+ writel(CSI_BUF_CTRL_DBE, csi->regs + CSI_BUF_CTRL_REG);
+
+ /* Clear the pending interrupts */
+ writel(CSI_INT_FRM_DONE, csi->regs + 0x34);
+
+ /* Enable frame done interrupt */
+ writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_EN_REG);
+
+ sun4i_csi_capture_start(csi);
+
+ spin_unlock_irqrestore(&csi->qlock, flags);
+
+ ret = v4l2_subdev_call(csi->src_subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto err_disable_device;
+
+ return 0;
+
+err_disable_device:
+ sun4i_csi_capture_stop(csi);
+
+err_disable_pipeline:
+ media_pipeline_stop(&csi->vdev.entity);
+
+err_free_scratch_buffer:
+ dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
+ csi->scratch.paddr);
+
+err_clear_dma_queue:
+ spin_lock_irqsave(&csi->qlock, flags);
+ return_all_buffers(csi, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&csi->qlock, flags);
+
+ return ret;
+}
+
+static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
+{
+ struct sun4i_csi *csi = vb2_get_drv_priv(vq);
+ unsigned long flags;
+
+ dev_dbg(csi->dev, "Stopping capture\n");
+
+ v4l2_subdev_call(csi->src_subdev, video, s_stream, 0);
+ sun4i_csi_capture_stop(csi);
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&csi->qlock, flags);
+ return_all_buffers(csi, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&csi->qlock, flags);
+
+ media_pipeline_stop(&csi->vdev.entity);
+
+ dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
+ csi->scratch.paddr);
+}
+
+static const struct vb2_ops sun4i_csi_qops = {
+ .queue_setup = sun4i_csi_queue_setup,
+ .buf_prepare = sun4i_csi_buffer_prepare,
+ .buf_queue = sun4i_csi_buffer_queue,
+ .start_streaming = sun4i_csi_start_streaming,
+ .stop_streaming = sun4i_csi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static irqreturn_t sun4i_csi_irq(int irq, void *data)
+{
+ struct sun4i_csi *csi = data;
+ u32 reg;
+
+ reg = readl(csi->regs + CSI_INT_STA_REG);
+
+ /* Acknowledge the interrupts */
+ writel(reg, csi->regs + CSI_INT_STA_REG);
+
+ if (!(reg & CSI_INT_FRM_DONE))
+ return IRQ_HANDLED;
+
+ spin_lock(&csi->qlock);
+ if (sun4i_csi_buffer_flip(csi, csi->sequence++)) {
+ dev_warn(csi->dev, "%s: Flip failed\n", __func__);
+ sun4i_csi_capture_stop(csi);
+ }
+ spin_unlock(&csi->qlock);
+
+ return IRQ_HANDLED;
+}
+
+int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
+{
+ struct vb2_queue *q = &csi->queue;
+ int ret;
+ int i;
+
+ spin_lock_init(&csi->qlock);
+ mutex_init(&csi->lock);
+
+ INIT_LIST_HEAD(&csi->buf_list);
+ for (i = 0; i < CSI_MAX_BUFFER; i++)
+ csi->current_buf[i] = NULL;
+
+ q->min_buffers_needed = 3;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP;
+ q->lock = &csi->lock;
+ q->drv_priv = csi;
+ q->buf_struct_size = sizeof(struct sun4i_csi_buffer);
+ q->ops = &sun4i_csi_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = csi->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to initialize VB2 queue\n");
+ goto err_free_mutex;
+ }
+
+ ret = v4l2_device_register(csi->dev, &csi->v4l);
+ if (ret) {
+ dev_err(csi->dev, "Couldn't register the v4l2 device\n");
+ goto err_free_queue;
+ }
+
+ ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0,
+ dev_name(csi->dev), csi);
+ if (ret) {
+ dev_err(csi->dev, "Couldn't register our interrupt\n");
+ goto err_unregister_device;
+ }
+
+ return 0;
+
+err_unregister_device:
+ v4l2_device_unregister(&csi->v4l);
+
+err_free_queue:
+ vb2_queue_release(q);
+
+err_free_mutex:
+ mutex_destroy(&csi->lock);
+ return ret;
+}
+
+void sun4i_csi_dma_unregister(struct sun4i_csi *csi)
+{
+ v4l2_device_unregister(&csi->v4l);
+ vb2_queue_release(&csi->queue);
+ mutex_destroy(&csi->lock);
+}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
new file mode 100644
index 000000000000..83a3a0257c7b
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 NextThing Co
+ * Copyright (C) 2016-2019 Bootlin
+ *
+ * Author: Maxime Ripard <maxime.ripard@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun4i_csi.h"
+
+#define CSI_DEFAULT_WIDTH 640
+#define CSI_DEFAULT_HEIGHT 480
+
+static const struct sun4i_csi_format sun4i_csi_formats[] = {
+ /* YUV422 inputs */
+ {
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .input = CSI_INPUT_YUV,
+ .output = CSI_OUTPUT_YUV_420_PLANAR,
+ .num_planes = 3,
+ .bpp = { 8, 8, 8 },
+ .hsub = 2,
+ .vsub = 2,
+ },
+};
+
+const struct sun4i_csi_format *sun4i_csi_find_format(const u32 *fourcc,
+ const u32 *mbus)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_csi_formats); i++) {
+ if (fourcc && *fourcc != sun4i_csi_formats[i].fourcc)
+ continue;
+
+ if (mbus && *mbus != sun4i_csi_formats[i].mbus)
+ continue;
+
+ return &sun4i_csi_formats[i];
+ }
+
+ return NULL;
+}
+
+static int sun4i_csi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "sun4i-csi", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(csi->dev));
+
+ return 0;
+}
+
+static int sun4i_csi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strscpy(inp->name, "Camera", sizeof(inp->name));
+
+ return 0;
+}
+
+static int sun4i_csi_g_input(struct file *file, void *fh,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int sun4i_csi_s_input(struct file *file, void *fh,
+ unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void _sun4i_csi_try_fmt(struct sun4i_csi *csi,
+ struct v4l2_pix_format_mplane *pix)
+{
+ const struct sun4i_csi_format *_fmt;
+ unsigned int height, width;
+ unsigned int i;
+
+ _fmt = sun4i_csi_find_format(&pix->pixelformat, NULL);
+ if (!_fmt)
+ _fmt = &sun4i_csi_formats[0];
+
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
+ pix->ycbcr_enc);
+
+ pix->num_planes = _fmt->num_planes;
+ pix->pixelformat = _fmt->fourcc;
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ /* Align the width and height on the subsampling */
+ width = ALIGN(pix->width, _fmt->hsub);
+ height = ALIGN(pix->height, _fmt->vsub);
+
+ /* Clamp the width and height to our capabilities */
+ pix->width = clamp(width, _fmt->hsub, CSI_MAX_WIDTH);
+ pix->height = clamp(height, _fmt->vsub, CSI_MAX_HEIGHT);
+
+ for (i = 0; i < _fmt->num_planes; i++) {
+ unsigned int hsub = i > 0 ? _fmt->hsub : 1;
+ unsigned int vsub = i > 0 ? _fmt->vsub : 1;
+ unsigned int bpl;
+
+ bpl = pix->width / hsub * _fmt->bpp[i] / 8;
+ pix->plane_fmt[i].bytesperline = bpl;
+ pix->plane_fmt[i].sizeimage = bpl * pix->height / vsub;
+ memset(pix->plane_fmt[i].reserved, 0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+}
+
+static int sun4i_csi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+
+ _sun4i_csi_try_fmt(csi, &f->fmt.pix_mp);
+
+ return 0;
+}
+
+static int sun4i_csi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+
+ _sun4i_csi_try_fmt(csi, &f->fmt.pix_mp);
+ csi->fmt = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int sun4i_csi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+
+ f->fmt.pix_mp = csi->fmt;
+
+ return 0;
+}
+
+static int sun4i_csi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(sun4i_csi_formats))
+ return -EINVAL;
+
+ f->pixelformat = sun4i_csi_formats[f->index].fourcc;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops sun4i_csi_ioctl_ops = {
+ .vidioc_querycap = sun4i_csi_querycap,
+
+ .vidioc_enum_fmt_vid_cap = sun4i_csi_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = sun4i_csi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = sun4i_csi_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = sun4i_csi_try_fmt_vid_cap,
+
+ .vidioc_enum_input = sun4i_csi_enum_input,
+ .vidioc_g_input = sun4i_csi_g_input,
+ .vidioc_s_input = sun4i_csi_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int sun4i_csi_open(struct file *file)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+ int ret;
+
+ ret = mutex_lock_interruptible(&csi->lock);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(csi->dev);
+ if (ret < 0)
+ goto err_pm_put;
+
+ ret = v4l2_pipeline_pm_use(&csi->vdev.entity, 1);
+ if (ret)
+ goto err_pm_put;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto err_pipeline_pm_put;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+
+err_pipeline_pm_put:
+ v4l2_pipeline_pm_use(&csi->vdev.entity, 0);
+
+err_pm_put:
+ pm_runtime_put(csi->dev);
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int sun4i_csi_release(struct file *file)
+{
+ struct sun4i_csi *csi = video_drvdata(file);
+
+ mutex_lock(&csi->lock);
+
+ v4l2_fh_release(file);
+ v4l2_pipeline_pm_use(&csi->vdev.entity, 0);
+ pm_runtime_put(csi->dev);
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations sun4i_csi_fops = {
+ .owner = THIS_MODULE,
+ .open = sun4i_csi_open,
+ .release = sun4i_csi_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .write = vb2_fop_write,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_mbus_framefmt sun4i_csi_pad_fmt_default = {
+ .width = CSI_DEFAULT_WIDTH,
+ .height = CSI_DEFAULT_HEIGHT,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
+static int sun4i_csi_subdev_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_get_try_format(subdev, cfg, CSI_SUBDEV_SINK);
+ *fmt = sun4i_csi_pad_fmt_default;
+
+ return 0;
+}
+
+static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
+ struct v4l2_mbus_framefmt *subdev_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ else
+ subdev_fmt = &csi->subdev_fmt;
+
+ fmt->format = *subdev_fmt;
+
+ return 0;
+}
+
+static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev);
+ struct v4l2_mbus_framefmt *subdev_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ subdev_fmt = v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+ else
+ subdev_fmt = &csi->subdev_fmt;
+
+ /* We can only set the format on the sink pad */
+ if (fmt->pad == CSI_SUBDEV_SINK) {
+ /* It's the sink, only allow changing the frame size */
+ subdev_fmt->width = fmt->format.width;
+ subdev_fmt->height = fmt->format.height;
+ subdev_fmt->code = fmt->format.code;
+ }
+
+ fmt->format = *subdev_fmt;
+
+ return 0;
+}
+
+static int
+sun4i_csi_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *mbus)
+{
+ if (mbus->index >= ARRAY_SIZE(sun4i_csi_formats))
+ return -EINVAL;
+
+ mbus->code = sun4i_csi_formats[mbus->index].mbus;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops sun4i_csi_subdev_pad_ops = {
+ .link_validate = v4l2_subdev_link_validate_default,
+ .init_cfg = sun4i_csi_subdev_init_cfg,
+ .get_fmt = sun4i_csi_subdev_get_fmt,
+ .set_fmt = sun4i_csi_subdev_set_fmt,
+ .enum_mbus_code = sun4i_csi_subdev_enum_mbus_code,
+};
+
+const struct v4l2_subdev_ops sun4i_csi_subdev_ops = {
+ .pad = &sun4i_csi_subdev_pad_ops,
+};
+
+int sun4i_csi_v4l2_register(struct sun4i_csi *csi)
+{
+ struct video_device *vdev = &csi->vdev;
+ int ret;
+
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
+ vdev->v4l2_dev = &csi->v4l;
+ vdev->queue = &csi->queue;
+ strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->lock = &csi->lock;
+
+ /* Set a default format */
+ csi->fmt.pixelformat = sun4i_csi_formats[0].fourcc,
+ csi->fmt.width = CSI_DEFAULT_WIDTH;
+ csi->fmt.height = CSI_DEFAULT_HEIGHT;
+ _sun4i_csi_try_fmt(csi, &csi->fmt);
+ csi->subdev_fmt = sun4i_csi_pad_fmt_default;
+
+ vdev->fops = &sun4i_csi_fops;
+ vdev->ioctl_ops = &sun4i_csi_ioctl_ops;
+ video_set_drvdata(vdev, csi);
+
+ ret = video_register_device(&csi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ return ret;
+
+ dev_info(csi->dev, "Device registered as %s\n",
+ video_device_node_name(vdev));
+
+ return 0;
+}
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 6e0e894154f4..055eb0b8e396 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -866,11 +866,8 @@ static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "No csi IRQ specified\n");
- ret = -ENXIO;
- return ret;
- }
+ if (irq < 0)
+ return -ENXIO;
ret = devm_request_irq(&pdev->dev, irq, sun6i_csi_isr, 0, MODULE_NAME,
sdev);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 6498b2d0492e..a632602131f2 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -380,38 +380,39 @@ static int tegra_cec_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device\n");
- goto clk_error;
- }
-
- cec->notifier = cec_notifier_get(hdmi_dev);
- if (!cec->notifier) {
- ret = -ENOMEM;
- goto clk_error;
+ goto err_clk;
}
cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
- CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(cec->adap)) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Couldn't create cec adapter\n");
- goto cec_error;
+ goto err_clk;
+ }
+
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto err_adapter;
}
+
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't register device\n");
- goto cec_error;
+ goto err_notifier;
}
- cec_register_cec_notifier(cec->adap, cec->notifier);
-
return 0;
-cec_error:
- if (cec->notifier)
- cec_notifier_put(cec->notifier);
+err_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier);
+err_adapter:
cec_delete_adapter(cec->adap);
-clk_error:
+err_clk:
clk_disable_unprepare(cec->clk);
return ret;
}
@@ -422,8 +423,8 @@ static int tegra_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(cec->clk);
+ cec_notifier_cec_adap_unregister(cec->notifier);
cec_unregister_adapter(cec->adap);
- cec_notifier_put(cec->notifier);
return 0;
}
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/platform/tegra-cec/tegra_cec.h
index 32d7d69f9491..8c370be38e1e 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.h
+++ b/drivers/media/platform/tegra-cec/tegra_cec.h
@@ -34,24 +34,24 @@
#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff
#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \
((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK)
-#define TEGRA_CEC_HWCTRL_RX_SNOOP (1 << 15)
-#define TEGRA_CEC_HWCTRL_RX_NAK_MODE (1 << 16)
-#define TEGRA_CEC_HWCTRL_TX_NAK_MODE (1 << 24)
-#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE (1 << 30)
-#define TEGRA_CEC_HWCTRL_TX_RX_MODE (1 << 31)
+#define TEGRA_CEC_HWCTRL_RX_SNOOP BIT(15)
+#define TEGRA_CEC_HWCTRL_RX_NAK_MODE BIT(16)
+#define TEGRA_CEC_HWCTRL_TX_NAK_MODE BIT(24)
+#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE BIT(30)
+#define TEGRA_CEC_HWCTRL_TX_RX_MODE BIT(31)
-#define TEGRA_CEC_INPUT_FILTER_MODE (1 << 31)
+#define TEGRA_CEC_INPUT_FILTER_MODE BIT(31)
#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0
#define TEGRA_CEC_TX_REG_DATA_SHIFT 0
-#define TEGRA_CEC_TX_REG_EOM (1 << 8)
-#define TEGRA_CEC_TX_REG_BCAST (1 << 12)
-#define TEGRA_CEC_TX_REG_START_BIT (1 << 16)
-#define TEGRA_CEC_TX_REG_RETRY (1 << 17)
+#define TEGRA_CEC_TX_REG_EOM BIT(8)
+#define TEGRA_CEC_TX_REG_BCAST BIT(12)
+#define TEGRA_CEC_TX_REG_START_BIT BIT(16)
+#define TEGRA_CEC_TX_REG_RETRY BIT(17)
#define TEGRA_CEC_RX_REGISTER_SHIFT 0
-#define TEGRA_CEC_RX_REGISTER_EOM (1 << 8)
-#define TEGRA_CEC_RX_REGISTER_ACK (1 << 9)
+#define TEGRA_CEC_RX_REGISTER_EOM BIT(8)
+#define TEGRA_CEC_RX_REGISTER_ACK BIT(9)
#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0
#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8
@@ -79,38 +79,38 @@
#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4
#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8
-#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY (1 << 0)
-#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN (1 << 1)
-#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
-#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED (1 << 3)
-#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED (1 << 4)
-#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED (1 << 5)
-#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL (1 << 8)
-#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN (1 << 9)
-#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED (1 << 10)
-#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED (1 << 11)
-#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED (1 << 12)
-#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
-#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
-
-#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY (1 << 0)
-#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN (1 << 1)
-#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
-#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED (1 << 3)
-#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED (1 << 4)
-#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED (1 << 5)
-#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL (1 << 8)
-#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN (1 << 9)
-#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED (1 << 10)
-#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED (1 << 11)
-#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED (1 << 12)
-#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
-#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY BIT(0)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN BIT(1)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD BIT(2)
+#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED BIT(3)
+#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED BIT(4)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED BIT(5)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL BIT(8)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN BIT(9)
+#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED BIT(10)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED BIT(11)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED BIT(12)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L BIT(13)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H BIT(14)
+
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY BIT(0)
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN BIT(1)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD BIT(2)
+#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED BIT(3)
+#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED BIT(4)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED BIT(5)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL BIT(8)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN BIT(9)
+#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED BIT(10)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED BIT(11)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED BIT(12)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L BIT(13)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H BIT(14)
#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0
#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17
#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21
-#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT (1 << 25)
-#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER (1 << 26)
+#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT BIT(25)
+#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER BIT(26)
#endif /* TEGRA_CEC_H */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 9e86d761546b..223161f9c403 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -913,9 +913,6 @@ static int cal_querycap(struct file *file, void *priv,
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", ctx->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1419,6 +1416,8 @@ static const struct video_device cal_videodev = {
.ioctl_ops = &cal_ioctl_ops,
.minor = -1,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE,
};
/* -----------------------------------------------------------------
@@ -1613,6 +1612,7 @@ of_get_next_port(const struct device_node *parent,
}
prev = port;
} while (!of_node_name_eq(port, "port"));
+ of_node_put(ports);
}
return port;
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index fd37d79e1619..53d27cd6e10a 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -445,23 +445,25 @@ int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
if (ret)
- return ret;
+ goto free_desc;
ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
if (ret)
- return ret;
+ goto unmap_desc;
while (vpdma_list_busy(vpdma, list_num) && --timeout)
;
if (timeout == 0) {
dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
- return -EBUSY;
+ ret = -EBUSY;
}
+unmap_desc:
vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
+free_desc:
vpdma_free_desc_buf(&abort_list.buf);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(vpdma_list_cleanup);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index dda04498ac56..60b575bb44c4 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -224,7 +224,6 @@ static const struct vpe_port_data port_data[11] = {
/* driver info for each of the supported video formats */
struct vpe_fmt {
- char *name; /* human-readable name */
u32 fourcc; /* standard format identifier */
u8 types; /* CAPTURE and/or OUTPUT */
u8 coplanar; /* set for unpacked Luma and Chroma */
@@ -234,7 +233,6 @@ struct vpe_fmt {
static struct vpe_fmt vpe_formats[] = {
{
- .name = "NV16 YUV 422 co-planar",
.fourcc = V4L2_PIX_FMT_NV16,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -243,7 +241,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "NV12 YUV 420 co-planar",
.fourcc = V4L2_PIX_FMT_NV12,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 1,
@@ -252,7 +249,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "YUYV 422 packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -260,7 +256,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "UYVY 422 packed",
.fourcc = V4L2_PIX_FMT_UYVY,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -268,7 +263,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "RGB888 packed",
.fourcc = V4L2_PIX_FMT_RGB24,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -276,7 +270,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "ARGB32",
.fourcc = V4L2_PIX_FMT_RGB32,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -284,7 +277,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "BGR888 packed",
.fourcc = V4L2_PIX_FMT_BGR24,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -292,7 +284,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "ABGR32",
.fourcc = V4L2_PIX_FMT_BGR32,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -300,7 +291,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -308,7 +298,6 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
- .name = "RGB5551",
.fourcc = V4L2_PIX_FMT_RGB555,
.types = VPE_FMT_TYPE_CAPTURE,
.coplanar = 0,
@@ -1514,7 +1503,6 @@ static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
if (!fmt)
return -EINVAL;
- strscpy(f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
return 0;
}
diff --git a/drivers/media/platform/ti-vpe/vpe_regs.h b/drivers/media/platform/ti-vpe/vpe_regs.h
index 9969bea0dded..1a1ad5ae1228 100644
--- a/drivers/media/platform/ti-vpe/vpe_regs.h
+++ b/drivers/media/platform/ti-vpe/vpe_regs.h
@@ -48,24 +48,24 @@
#define VPE_INT0_ENABLE0_SET 0x0030
#define VPE_INT0_ENABLE0 VPE_INT0_ENABLE0_SET
#define VPE_INT0_ENABLE0_CLR 0x0038
-#define VPE_INT0_LIST0_COMPLETE (1 << 0)
-#define VPE_INT0_LIST0_NOTIFY (1 << 1)
-#define VPE_INT0_LIST1_COMPLETE (1 << 2)
-#define VPE_INT0_LIST1_NOTIFY (1 << 3)
-#define VPE_INT0_LIST2_COMPLETE (1 << 4)
-#define VPE_INT0_LIST2_NOTIFY (1 << 5)
-#define VPE_INT0_LIST3_COMPLETE (1 << 6)
-#define VPE_INT0_LIST3_NOTIFY (1 << 7)
-#define VPE_INT0_LIST4_COMPLETE (1 << 8)
-#define VPE_INT0_LIST4_NOTIFY (1 << 9)
-#define VPE_INT0_LIST5_COMPLETE (1 << 10)
-#define VPE_INT0_LIST5_NOTIFY (1 << 11)
-#define VPE_INT0_LIST6_COMPLETE (1 << 12)
-#define VPE_INT0_LIST6_NOTIFY (1 << 13)
-#define VPE_INT0_LIST7_COMPLETE (1 << 14)
-#define VPE_INT0_LIST7_NOTIFY (1 << 15)
-#define VPE_INT0_DESCRIPTOR (1 << 16)
-#define VPE_DEI_FMD_INT (1 << 18)
+#define VPE_INT0_LIST0_COMPLETE BIT(0)
+#define VPE_INT0_LIST0_NOTIFY BIT(1)
+#define VPE_INT0_LIST1_COMPLETE BIT(2)
+#define VPE_INT0_LIST1_NOTIFY BIT(3)
+#define VPE_INT0_LIST2_COMPLETE BIT(4)
+#define VPE_INT0_LIST2_NOTIFY BIT(5)
+#define VPE_INT0_LIST3_COMPLETE BIT(6)
+#define VPE_INT0_LIST3_NOTIFY BIT(7)
+#define VPE_INT0_LIST4_COMPLETE BIT(8)
+#define VPE_INT0_LIST4_NOTIFY BIT(9)
+#define VPE_INT0_LIST5_COMPLETE BIT(10)
+#define VPE_INT0_LIST5_NOTIFY BIT(11)
+#define VPE_INT0_LIST6_COMPLETE BIT(12)
+#define VPE_INT0_LIST6_NOTIFY BIT(13)
+#define VPE_INT0_LIST7_COMPLETE BIT(14)
+#define VPE_INT0_LIST7_NOTIFY BIT(15)
+#define VPE_INT0_DESCRIPTOR BIT(16)
+#define VPE_DEI_FMD_INT BIT(18)
#define VPE_INT0_STATUS1_RAW_SET 0x0024
#define VPE_INT0_STATUS1_RAW VPE_INT0_STATUS1_RAW_SET
@@ -74,21 +74,21 @@
#define VPE_INT0_ENABLE1_SET 0x0034
#define VPE_INT0_ENABLE1 VPE_INT0_ENABLE1_SET
#define VPE_INT0_ENABLE1_CLR 0x003c
-#define VPE_INT0_CHANNEL_GROUP0 (1 << 0)
-#define VPE_INT0_CHANNEL_GROUP1 (1 << 1)
-#define VPE_INT0_CHANNEL_GROUP2 (1 << 2)
-#define VPE_INT0_CHANNEL_GROUP3 (1 << 3)
-#define VPE_INT0_CHANNEL_GROUP4 (1 << 4)
-#define VPE_INT0_CHANNEL_GROUP5 (1 << 5)
-#define VPE_INT0_CLIENT (1 << 7)
-#define VPE_DEI_ERROR_INT (1 << 16)
-#define VPE_DS1_UV_ERROR_INT (1 << 22)
+#define VPE_INT0_CHANNEL_GROUP0 BIT(0)
+#define VPE_INT0_CHANNEL_GROUP1 BIT(1)
+#define VPE_INT0_CHANNEL_GROUP2 BIT(2)
+#define VPE_INT0_CHANNEL_GROUP3 BIT(3)
+#define VPE_INT0_CHANNEL_GROUP4 BIT(4)
+#define VPE_INT0_CHANNEL_GROUP5 BIT(5)
+#define VPE_INT0_CLIENT BIT(7)
+#define VPE_DEI_ERROR_INT BIT(16)
+#define VPE_DS1_UV_ERROR_INT BIT(22)
#define VPE_INTC_EOI 0x00a0
#define VPE_CLK_ENABLE 0x0100
-#define VPE_VPEDMA_CLK_ENABLE (1 << 0)
-#define VPE_DATA_PATH_CLK_ENABLE (1 << 1)
+#define VPE_VPEDMA_CLK_ENABLE BIT(0)
+#define VPE_DATA_PATH_CLK_ENABLE BIT(1)
#define VPE_CLK_RESET 0x0104
#define VPE_VPDMA_CLK_RESET_MASK 0x1
@@ -101,11 +101,11 @@
#define VPE_CLK_FORMAT_SELECT 0x010c
#define VPE_CSC_SRC_SELECT_MASK 0x03
#define VPE_CSC_SRC_SELECT_SHIFT 0
-#define VPE_RGB_OUT_SELECT (1 << 8)
+#define VPE_RGB_OUT_SELECT BIT(8)
#define VPE_DS_SRC_SELECT_MASK 0x07
#define VPE_DS_SRC_SELECT_SHIFT 9
-#define VPE_DS_BYPASS (1 << 16)
-#define VPE_COLOR_SEPARATE_422 (1 << 18)
+#define VPE_DS_BYPASS BIT(16)
+#define VPE_COLOR_SEPARATE_422 BIT(18)
#define VPE_DS_SRC_DEI_SCALER (5 << VPE_DS_SRC_SELECT_SHIFT)
#define VPE_CSC_SRC_DEI_SCALER (3 << VPE_CSC_SRC_SELECT_SHIFT)
@@ -115,8 +115,8 @@
#define VPE_RANGE_RANGE_MAP_Y_SHIFT 0
#define VPE_RANGE_RANGE_MAP_UV_MASK 0x07
#define VPE_RANGE_RANGE_MAP_UV_SHIFT 3
-#define VPE_RANGE_MAP_ON (1 << 6)
-#define VPE_RANGE_REDUCTION_ON (1 << 28)
+#define VPE_RANGE_MAP_ON BIT(6)
+#define VPE_RANGE_REDUCTION_ON BIT(28)
/* VPE chrominance upsampler regs */
#define VPE_US1_R0 0x0304
@@ -195,13 +195,13 @@
#define VPE_DEI_WIDTH_SHIFT 0
#define VPE_DEI_HEIGHT_MASK 0x07ff
#define VPE_DEI_HEIGHT_SHIFT 16
-#define VPE_DEI_INTERLACE_BYPASS (1 << 29)
-#define VPE_DEI_FIELD_FLUSH (1 << 30)
-#define VPE_DEI_PROGRESSIVE (1 << 31)
+#define VPE_DEI_INTERLACE_BYPASS BIT(29)
+#define VPE_DEI_FIELD_FLUSH BIT(30)
+#define VPE_DEI_PROGRESSIVE BIT(31)
#define VPE_MDT_BYPASS 0x0604
-#define VPE_MDT_TEMPMAX_BYPASS (1 << 0)
-#define VPE_MDT_SPATMAX_BYPASS (1 << 1)
+#define VPE_MDT_TEMPMAX_BYPASS BIT(0)
+#define VPE_MDT_SPATMAX_BYPASS BIT(1)
#define VPE_MDT_SF_THRESHOLD 0x0608
#define VPE_MDT_SF_SC_THR1_MASK 0xff
@@ -214,8 +214,8 @@
#define VPE_EDI_CONFIG 0x060c
#define VPE_EDI_INP_MODE_MASK 0x03
#define VPE_EDI_INP_MODE_SHIFT 0
-#define VPE_EDI_ENABLE_3D (1 << 2)
-#define VPE_EDI_ENABLE_CHROMA_3D (1 << 3)
+#define VPE_EDI_ENABLE_3D BIT(2)
+#define VPE_EDI_ENABLE_CHROMA_3D BIT(3)
#define VPE_EDI_CHROMA3D_COR_THR_MASK 0xff
#define VPE_EDI_CHROMA3D_COR_THR_SHIFT 8
#define VPE_EDI_DIR_COR_LOWER_THR_MASK 0xff
@@ -268,7 +268,7 @@
#define VPE_FMD_WINDOW_MINX_SHIFT 0
#define VPE_FMD_WINDOW_MAXX_MASK 0x07ff
#define VPE_FMD_WINDOW_MAXX_SHIFT 16
-#define VPE_FMD_WINDOW_ENABLE (1 << 31)
+#define VPE_FMD_WINDOW_ENABLE BIT(31)
#define VPE_DEI_FMD_WINDOW_R1 0x0624
#define VPE_FMD_WINDOW_MINY_MASK 0x07ff
@@ -277,10 +277,10 @@
#define VPE_FMD_WINDOW_MAXY_SHIFT 16
#define VPE_DEI_FMD_CONTROL_R0 0x0628
-#define VPE_FMD_ENABLE (1 << 0)
-#define VPE_FMD_LOCK (1 << 1)
-#define VPE_FMD_JAM_DIR (1 << 2)
-#define VPE_FMD_BED_ENABLE (1 << 3)
+#define VPE_FMD_ENABLE BIT(0)
+#define VPE_FMD_LOCK BIT(1)
+#define VPE_FMD_JAM_DIR BIT(2)
+#define VPE_FMD_BED_ENABLE BIT(3)
#define VPE_FMD_CAF_FIELD_THR_MASK 0xff
#define VPE_FMD_CAF_FIELD_THR_SHIFT 16
#define VPE_FMD_CAF_LINE_THR_MASK 0xff
@@ -293,7 +293,7 @@
#define VPE_DEI_FMD_STATUS_R0 0x0630
#define VPE_FMD_CAF_MASK 0x000fffff
#define VPE_FMD_CAF_SHIFT 0
-#define VPE_FMD_RESET (1 << 24)
+#define VPE_FMD_RESET BIT(24)
#define VPE_DEI_FMD_STATUS_R1 0x0634
#define VPE_FMD_FIELD_DIFF_MASK 0x0fffffff
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 038de7a2027a..78841b9015ce 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -18,9 +18,10 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/i2c/ov7670.h>
-#include <media/videobuf-dma-sg.h>
+#include <media/videobuf2-dma-sg.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/pm_qos.h>
@@ -84,16 +85,11 @@ struct via_camera {
* live in frame buffer memory, so we don't call them "DMA".
*/
unsigned int cb_offsets[3]; /* offsets into fb mem */
- u8 __iomem *cb_addrs[3]; /* Kernel-space addresses */
+ u8 __iomem *cb_addrs[3]; /* Kernel-space addresses */
int n_cap_bufs; /* How many are we using? */
- int next_buf;
- struct videobuf_queue vb_queue;
- struct list_head buffer_queue; /* prot. by reg_lock */
- /*
- * User tracking.
- */
- int users;
- struct file *owner;
+ struct vb2_queue vq;
+ struct list_head buffer_queue;
+ u32 sequence;
/*
* Video format information. sensor_format is kept in a form
* that we can use to pass to the sensor. We always run the
@@ -106,6 +102,13 @@ struct via_camera {
u32 mbus_code;
};
+/* buffer for one video frame */
+struct via_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head queue;
+};
+
/*
* Yes, this is a hack, but there's only going to be one of these
* on any system we know of.
@@ -142,13 +145,11 @@ static struct via_camera *via_cam_info;
* now this information must be managed at this level too.
*/
static struct via_format {
- __u8 *desc;
__u32 pixelformat;
int bpp; /* Bytes per pixel */
u32 mbus_code;
} via_formats[] = {
{
- .desc = "YUYV 4:2:2",
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
@@ -324,28 +325,15 @@ static irqreturn_t viacam_quick_irq(int irq, void *data)
}
/*
- * Find the next videobuf buffer which has somebody waiting on it.
+ * Find the next buffer which has somebody waiting on it.
*/
-static struct videobuf_buffer *viacam_next_buffer(struct via_camera *cam)
+static struct via_buffer *viacam_next_buffer(struct via_camera *cam)
{
- unsigned long flags;
- struct videobuf_buffer *buf = NULL;
-
- spin_lock_irqsave(&cam->viadev->reg_lock, flags);
if (cam->opstate != S_RUNNING)
- goto out;
+ return NULL;
if (list_empty(&cam->buffer_queue))
- goto out;
- buf = list_entry(cam->buffer_queue.next, struct videobuf_buffer, queue);
- if (!waitqueue_active(&buf->done)) {/* Nobody waiting */
- buf = NULL;
- goto out;
- }
- list_del(&buf->queue);
- buf->state = VIDEOBUF_ACTIVE;
-out:
- spin_unlock_irqrestore(&cam->viadev->reg_lock, flags);
- return buf;
+ return NULL;
+ return list_entry(cam->buffer_queue.next, struct via_buffer, queue);
}
/*
@@ -353,11 +341,12 @@ out:
*/
static irqreturn_t viacam_irq(int irq, void *data)
{
- int bufn;
- struct videobuf_buffer *vb;
struct via_camera *cam = data;
- struct videobuf_dmabuf *vdma;
+ struct via_buffer *vb;
+ int bufn;
+ struct sg_table *sgt;
+ mutex_lock(&cam->lock);
/*
* If there is no place to put the data frame, don't bother
* with anything else.
@@ -375,12 +364,15 @@ static irqreturn_t viacam_irq(int irq, void *data)
/*
* Copy over the data and let any waiters know.
*/
- vdma = videobuf_to_dma(vb);
- viafb_dma_copy_out_sg(cam->cb_offsets[bufn], vdma->sglist, vdma->sglen);
- vb->state = VIDEOBUF_DONE;
- vb->size = cam->user_format.sizeimage;
- wake_up(&vb->done);
+ sgt = vb2_dma_sg_plane_desc(&vb->vbuf.vb2_buf, 0);
+ vb->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ viafb_dma_copy_out_sg(cam->cb_offsets[bufn], sgt->sgl, sgt->nents);
+ vb->vbuf.sequence = cam->sequence++;
+ vb->vbuf.field = V4L2_FIELD_NONE;
+ list_del(&vb->queue);
+ vb2_buffer_done(&vb->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
done:
+ mutex_unlock(&cam->lock);
return IRQ_HANDLED;
}
@@ -556,7 +548,6 @@ static int viacam_config_controller(struct via_camera *cam)
static void viacam_start_engine(struct via_camera *cam)
{
spin_lock_irq(&cam->viadev->reg_lock);
- cam->next_buf = 0;
viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE);
viacam_int_enable(cam);
(void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */
@@ -577,81 +568,117 @@ static void viacam_stop_engine(struct via_camera *cam)
/* --------------------------------------------------------------------------*/
-/* Videobuf callback ops */
+/* vb2 callback ops */
-/*
- * buffer_setup. The purpose of this one would appear to be to tell
- * videobuf how big a single image is. It's also evidently up to us
- * to put some sort of limit on the maximum number of buffers allowed.
- */
-static int viacam_vb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static struct via_buffer *vb2_to_via_buffer(struct vb2_buffer *vb)
{
- struct via_camera *cam = q->priv_data;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- *size = cam->user_format.sizeimage;
- if (*count == 0 || *count > 6) /* Arbitrary number */
- *count = 6;
- return 0;
+ return container_of(vbuf, struct via_buffer, vbuf);
}
-/*
- * Prepare a buffer.
- */
-static int viacam_vb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static void viacam_vb2_queue(struct vb2_buffer *vb)
{
- struct via_camera *cam = q->priv_data;
-
- vb->size = cam->user_format.sizeimage;
- vb->width = cam->user_format.width; /* bytesperline???? */
- vb->height = cam->user_format.height;
- vb->field = field;
- if (vb->state == VIDEOBUF_NEEDS_INIT) {
- int ret = videobuf_iolock(q, vb, NULL);
- if (ret)
- return ret;
+ struct via_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct via_buffer *via = vb2_to_via_buffer(vb);
+
+ list_add_tail(&via->queue, &cam->buffer_queue);
+}
+
+static int viacam_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct via_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb2_plane_size(vb, 0) < cam->user_format.sizeimage) {
+ cam_dbg(cam,
+ "Plane size too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0),
+ cam->user_format.sizeimage);
+ return -EINVAL;
}
- vb->state = VIDEOBUF_PREPARED;
+
+ vb2_set_plane_payload(vb, 0, cam->user_format.sizeimage);
+
return 0;
}
-/*
- * We've got a buffer to put data into.
- *
- * FIXME: check for a running engine and valid buffers?
- */
-static void viacam_vb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static int viacam_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
- struct via_camera *cam = q->priv_data;
+ struct via_camera *cam = vb2_get_drv_priv(vq);
+ int size = cam->user_format.sizeimage;
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ sizes[0] = size;
+ return 0;
+}
+
+static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct via_camera *cam = vb2_get_drv_priv(vq);
+ struct via_buffer *buf, *tmp;
+ int ret = 0;
+
+ if (cam->opstate != S_IDLE) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /*
+ * Configure things if need be.
+ */
+ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
+ ret = viacam_configure_sensor(cam);
+ if (ret)
+ goto out;
+ ret = viacam_config_controller(cam);
+ if (ret)
+ goto out;
+ }
+ cam->sequence = 0;
/*
- * Note that videobuf holds the lock when it calls
- * us, so we need not (indeed, cannot) take it here.
+ * If the CPU goes into C3, the DMA transfer gets corrupted and
+ * users start filing unsightly bug reports. Put in a "latency"
+ * requirement which will keep the CPU out of the deeper sleep
+ * states.
*/
- vb->state = VIDEOBUF_QUEUED;
- list_add_tail(&vb->queue, &cam->buffer_queue);
+ pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ viacam_start_engine(cam);
+ return 0;
+out:
+ list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
}
-/*
- * Free a buffer.
- */
-static void viacam_vb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void viacam_vb2_stop_streaming(struct vb2_queue *vq)
{
- struct via_camera *cam = q->priv_data;
+ struct via_camera *cam = vb2_get_drv_priv(vq);
+ struct via_buffer *buf, *tmp;
- videobuf_dma_unmap(&cam->platdev->dev, videobuf_to_dma(vb));
- videobuf_dma_free(videobuf_to_dma(vb));
- vb->state = VIDEOBUF_NEEDS_INIT;
+ pm_qos_remove_request(&cam->qos_request);
+ viacam_stop_engine(cam);
+
+ list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
}
-static const struct videobuf_queue_ops viacam_vb_ops = {
- .buf_setup = viacam_vb_buf_setup,
- .buf_prepare = viacam_vb_buf_prepare,
- .buf_queue = viacam_vb_buf_queue,
- .buf_release = viacam_vb_buf_release,
+static const struct vb2_ops viacam_vb2_ops = {
+ .queue_setup = viacam_vb2_queue_setup,
+ .buf_queue = viacam_vb2_queue,
+ .buf_prepare = viacam_vb2_prepare,
+ .start_streaming = viacam_vb2_start_streaming,
+ .stop_streaming = viacam_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* --------------------------------------------------------------------------*/
@@ -660,62 +687,43 @@ static const struct videobuf_queue_ops viacam_vb_ops = {
static int viacam_open(struct file *filp)
{
struct via_camera *cam = video_drvdata(filp);
+ int ret;
- filp->private_data = cam;
/*
* Note the new user. If this is the first one, we'll also
* need to power up the sensor.
*/
mutex_lock(&cam->lock);
- if (cam->users == 0) {
- int ret = viafb_request_dma();
+ ret = v4l2_fh_open(filp);
+ if (ret)
+ goto out;
+ if (v4l2_fh_is_singular_file(filp)) {
+ ret = viafb_request_dma();
if (ret) {
- mutex_unlock(&cam->lock);
- return ret;
+ v4l2_fh_release(filp);
+ goto out;
}
via_sensor_power_up(cam);
set_bit(CF_CONFIG_NEEDED, &cam->flags);
- /*
- * Hook into videobuf. Evidently this cannot fail.
- */
- videobuf_queue_sg_init(&cam->vb_queue, &viacam_vb_ops,
- &cam->platdev->dev, &cam->viadev->reg_lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), cam, NULL);
}
- (cam->users)++;
+out:
mutex_unlock(&cam->lock);
- return 0;
+ return ret;
}
static int viacam_release(struct file *filp)
{
struct via_camera *cam = video_drvdata(filp);
+ bool last_open;
mutex_lock(&cam->lock);
- (cam->users)--;
- /*
- * If the "owner" is closing, shut down any ongoing
- * operations.
- */
- if (filp == cam->owner) {
- videobuf_stop(&cam->vb_queue);
- /*
- * We don't hold the spinlock here, but, if release()
- * is being called by the owner, nobody else will
- * be changing the state. And an extra stop would
- * not hurt anyway.
- */
- if (cam->opstate != S_IDLE)
- viacam_stop_engine(cam);
- cam->owner = NULL;
- }
+ last_open = v4l2_fh_is_singular_file(filp);
+ _vb2_fop_release(filp, NULL);
/*
* Last one out needs to turn out the lights.
*/
- if (cam->users == 0) {
- videobuf_mmap_free(&cam->vb_queue);
+ if (last_open) {
via_sensor_power_down(cam);
viafb_release_dma();
}
@@ -723,77 +731,14 @@ static int viacam_release(struct file *filp)
return 0;
}
-/*
- * Read a frame from the device.
- */
-static ssize_t viacam_read(struct file *filp, char __user *buffer,
- size_t len, loff_t *pos)
-{
- struct via_camera *cam = video_drvdata(filp);
- int ret;
-
- mutex_lock(&cam->lock);
- /*
- * Enforce the V4l2 "only one owner gets to read data" rule.
- */
- if (cam->owner && cam->owner != filp) {
- ret = -EBUSY;
- goto out_unlock;
- }
- cam->owner = filp;
- /*
- * Do we need to configure the hardware?
- */
- if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
- ret = viacam_configure_sensor(cam);
- if (!ret)
- ret = viacam_config_controller(cam);
- if (ret)
- goto out_unlock;
- }
- /*
- * Fire up the capture engine, then have videobuf do
- * the heavy lifting. Someday it would be good to avoid
- * stopping and restarting the engine each time.
- */
- INIT_LIST_HEAD(&cam->buffer_queue);
- viacam_start_engine(cam);
- ret = videobuf_read_stream(&cam->vb_queue, buffer, len, pos, 0,
- filp->f_flags & O_NONBLOCK);
- viacam_stop_engine(cam);
- /* videobuf_stop() ?? */
-
-out_unlock:
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-
-static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
-{
- struct via_camera *cam = video_drvdata(filp);
-
- return videobuf_poll_stream(filp, &cam->vb_queue, pt);
-}
-
-
-static int viacam_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct via_camera *cam = video_drvdata(filp);
-
- return videobuf_mmap_mapper(&cam->vb_queue, vma);
-}
-
-
-
static const struct v4l2_file_operations viacam_fops = {
.owner = THIS_MODULE,
.open = viacam_open,
.release = viacam_release,
- .read = viacam_read,
- .poll = viacam_poll,
- .mmap = viacam_mmap,
- .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
};
/*----------------------------------------------------------------------------*/
@@ -811,7 +756,6 @@ static int viacam_enum_input(struct file *filp, void *priv,
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
- input->std = V4L2_STD_ALL; /* Not sure what should go here */
strscpy(input->name, "Camera", sizeof(input->name));
return 0;
}
@@ -829,17 +773,6 @@ static int viacam_s_input(struct file *filp, void *priv, unsigned int i)
return 0;
}
-static int viacam_s_std(struct file *filp, void *priv, v4l2_std_id std)
-{
- return 0;
-}
-
-static int viacam_g_std(struct file *filp, void *priv, v4l2_std_id *std)
-{
- *std = V4L2_STD_NTSC_M;
- return 0;
-}
-
/*
* Video format stuff. Here is our default format until
* user space messes with things.
@@ -851,6 +784,7 @@ static const struct v4l2_pix_format viacam_def_pix_format = {
.field = V4L2_FIELD_NONE,
.bytesperline = VGA_WIDTH * 2,
.sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
};
static const u32 via_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
@@ -860,8 +794,6 @@ static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv,
{
if (fmt->index >= N_VIA_FMTS)
return -EINVAL;
- strscpy(fmt->description, via_formats[fmt->index].desc,
- sizeof(fmt->description));
fmt->pixelformat = via_formats[fmt->index].pixelformat;
return 0;
}
@@ -897,6 +829,10 @@ static void viacam_fmt_post(struct v4l2_pix_format *userfmt,
userfmt->field = sensorfmt->field;
userfmt->bytesperline = 2 * userfmt->width;
userfmt->sizeimage = userfmt->bytesperline * userfmt->height;
+ userfmt->colorspace = sensorfmt->colorspace;
+ userfmt->ycbcr_enc = sensorfmt->ycbcr_enc;
+ userfmt->quantization = sensorfmt->quantization;
+ userfmt->xfer_func = sensorfmt->xfer_func;
}
@@ -927,32 +863,26 @@ static int viacam_do_try_fmt(struct via_camera *cam,
static int viacam_try_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
- struct via_camera *cam = priv;
+ struct via_camera *cam = video_drvdata(filp);
struct v4l2_format sfmt;
- int ret;
- mutex_lock(&cam->lock);
- ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
- mutex_unlock(&cam->lock);
- return ret;
+ return viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
}
static int viacam_g_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
- struct via_camera *cam = priv;
+ struct via_camera *cam = video_drvdata(filp);
- mutex_lock(&cam->lock);
fmt->fmt.pix = cam->user_format;
- mutex_unlock(&cam->lock);
return 0;
}
static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
- struct via_camera *cam = priv;
+ struct via_camera *cam = video_drvdata(filp);
int ret;
struct v4l2_format sfmt;
struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat);
@@ -961,18 +891,15 @@ static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
* Camera must be idle or we can't mess with the
* video setup.
*/
- mutex_lock(&cam->lock);
- if (cam->opstate != S_IDLE) {
- ret = -EBUSY;
- goto out;
- }
+ if (cam->opstate != S_IDLE)
+ return -EBUSY;
/*
* Let the sensor code look over and tweak the
* requested formatting.
*/
ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix);
if (ret)
- goto out;
+ return ret;
/*
* OK, let's commit to the new format.
*/
@@ -982,8 +909,6 @@ static int viacam_s_fmt_vid_cap(struct file *filp, void *priv,
ret = viacam_configure_sensor(cam);
if (!ret)
ret = viacam_config_controller(cam);
-out:
- mutex_unlock(&cam->lock);
return ret;
}
@@ -992,155 +917,40 @@ static int viacam_querycap(struct file *filp, void *priv,
{
strscpy(cap->driver, "via-camera", sizeof(cap->driver));
strscpy(cap->card, "via-camera", sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ strscpy(cap->bus_info, "platform:via-camera", sizeof(cap->bus_info));
return 0;
}
-/*
- * Streaming operations - pure videobuf stuff.
- */
-static int viacam_reqbufs(struct file *filp, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct via_camera *cam = priv;
-
- return videobuf_reqbufs(&cam->vb_queue, rb);
-}
-
-static int viacam_querybuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct via_camera *cam = priv;
-
- return videobuf_querybuf(&cam->vb_queue, buf);
-}
-
-static int viacam_qbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
-{
- struct via_camera *cam = priv;
-
- return videobuf_qbuf(&cam->vb_queue, buf);
-}
-
-static int viacam_dqbuf(struct file *filp, void *priv, struct v4l2_buffer *buf)
-{
- struct via_camera *cam = priv;
-
- return videobuf_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
-}
-
-static int viacam_streamon(struct file *filp, void *priv, enum v4l2_buf_type t)
-{
- struct via_camera *cam = priv;
- int ret = 0;
-
- if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- mutex_lock(&cam->lock);
- if (cam->opstate != S_IDLE) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * Enforce the V4l2 "only one owner gets to read data" rule.
- */
- if (cam->owner && cam->owner != filp) {
- ret = -EBUSY;
- goto out;
- }
- cam->owner = filp;
- /*
- * Configure things if need be.
- */
- if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) {
- ret = viacam_configure_sensor(cam);
- if (ret)
- goto out;
- ret = viacam_config_controller(cam);
- if (ret)
- goto out;
- }
- /*
- * If the CPU goes into C3, the DMA transfer gets corrupted and
- * users start filing unsightly bug reports. Put in a "latency"
- * requirement which will keep the CPU out of the deeper sleep
- * states.
- */
- pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
- /*
- * Fire things up.
- */
- INIT_LIST_HEAD(&cam->buffer_queue);
- ret = videobuf_streamon(&cam->vb_queue);
- if (!ret)
- viacam_start_engine(cam);
-out:
- mutex_unlock(&cam->lock);
- return ret;
-}
-
-static int viacam_streamoff(struct file *filp, void *priv, enum v4l2_buf_type t)
-{
- struct via_camera *cam = priv;
- int ret;
-
- if (t != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- mutex_lock(&cam->lock);
- if (cam->opstate != S_RUNNING) {
- ret = -EINVAL;
- goto out;
- }
- pm_qos_remove_request(&cam->qos_request);
- viacam_stop_engine(cam);
- /*
- * Videobuf will recycle all of the outstanding buffers, but
- * we should be sure we don't retain any references to
- * any of them.
- */
- ret = videobuf_streamoff(&cam->vb_queue);
- INIT_LIST_HEAD(&cam->buffer_queue);
-out:
- mutex_unlock(&cam->lock);
- return ret;
-}
-
/* G/S_PARM */
static int viacam_g_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct via_camera *cam = priv;
- int ret;
+ struct via_camera *cam = video_drvdata(filp);
- mutex_lock(&cam->lock);
- ret = v4l2_g_parm_cap(video_devdata(filp), cam->sensor, parm);
- mutex_unlock(&cam->lock);
- parm->parm.capture.readbuffers = cam->n_cap_bufs;
- return ret;
+ return v4l2_g_parm_cap(video_devdata(filp), cam->sensor, parm);
}
static int viacam_s_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct via_camera *cam = priv;
- int ret;
+ struct via_camera *cam = video_drvdata(filp);
- mutex_lock(&cam->lock);
- ret = v4l2_s_parm_cap(video_devdata(filp), cam->sensor, parm);
- mutex_unlock(&cam->lock);
- parm->parm.capture.readbuffers = cam->n_cap_bufs;
- return ret;
+ return v4l2_s_parm_cap(video_devdata(filp), cam->sensor, parm);
}
static int viacam_enum_framesizes(struct file *filp, void *priv,
struct v4l2_frmsizeenum *sizes)
{
+ unsigned int i;
+
if (sizes->index != 0)
return -EINVAL;
+ for (i = 0; i < N_VIA_FMTS; i++)
+ if (sizes->pixel_format == via_formats[i].pixelformat)
+ break;
+ if (i >= N_VIA_FMTS)
+ return -EINVAL;
sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
sizes->stepwise.min_width = QCIF_WIDTH;
sizes->stepwise.min_height = QCIF_HEIGHT;
@@ -1153,7 +963,7 @@ static int viacam_enum_framesizes(struct file *filp, void *priv,
static int viacam_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *interval)
{
- struct via_camera *cam = priv;
+ struct via_camera *cam = video_drvdata(filp);
struct v4l2_subdev_frame_interval_enum fie = {
.index = interval->index,
.code = cam->mbus_code,
@@ -1161,11 +971,18 @@ static int viacam_enum_frameintervals(struct file *filp, void *priv,
.height = cam->sensor_format.height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ unsigned int i;
int ret;
- mutex_lock(&cam->lock);
+ for (i = 0; i < N_VIA_FMTS; i++)
+ if (interval->pixel_format == via_formats[i].pixelformat)
+ break;
+ if (i >= N_VIA_FMTS)
+ return -EINVAL;
+ if (interval->width < QCIF_WIDTH || interval->width > VGA_WIDTH ||
+ interval->height < QCIF_HEIGHT || interval->height > VGA_HEIGHT)
+ return -EINVAL;
ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
- mutex_unlock(&cam->lock);
if (ret)
return ret;
interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
@@ -1173,29 +990,30 @@ static int viacam_enum_frameintervals(struct file *filp, void *priv,
return 0;
}
-
-
static const struct v4l2_ioctl_ops viacam_ioctl_ops = {
.vidioc_enum_input = viacam_enum_input,
.vidioc_g_input = viacam_g_input,
.vidioc_s_input = viacam_s_input,
- .vidioc_s_std = viacam_s_std,
- .vidioc_g_std = viacam_g_std,
.vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = viacam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = viacam_s_fmt_vid_cap,
.vidioc_querycap = viacam_querycap,
- .vidioc_reqbufs = viacam_reqbufs,
- .vidioc_querybuf = viacam_querybuf,
- .vidioc_qbuf = viacam_qbuf,
- .vidioc_dqbuf = viacam_dqbuf,
- .vidioc_streamon = viacam_streamon,
- .vidioc_streamoff = viacam_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_parm = viacam_g_parm,
.vidioc_s_parm = viacam_s_parm,
.vidioc_enum_framesizes = viacam_enum_framesizes,
.vidioc_enum_frameintervals = viacam_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*----------------------------------------------------------------------------*/
@@ -1233,7 +1051,7 @@ static int viacam_resume(void *priv)
/*
* Make sure the sensor's power state is correct
*/
- if (cam->users > 0)
+ if (!list_empty(&cam->vdev.fh_list))
via_sensor_power_up(cam);
else
via_sensor_power_down(cam);
@@ -1267,10 +1085,11 @@ static struct viafb_pm_hooks viacam_pm_hooks = {
static const struct video_device viacam_v4l_template = {
.name = "via-camera",
.minor = -1,
- .tvnorms = V4L2_STD_NTSC_M,
.fops = &viacam_fops,
.ioctl_ops = &viacam_ioctl_ops,
.release = video_device_release_empty, /* Check this */
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
};
/*
@@ -1317,6 +1136,7 @@ static int viacam_probe(struct platform_device *pdev)
int ret;
struct i2c_adapter *sensor_adapter;
struct viafb_dev *viadev = pdev->dev.platform_data;
+ struct vb2_queue *vq;
struct i2c_board_info ov7670_info = {
.type = "ov7670",
.addr = 0x42 >> 1,
@@ -1360,8 +1180,6 @@ static int viacam_probe(struct platform_device *pdev)
via_cam_info = cam;
cam->platdev = pdev;
cam->viadev = viadev;
- cam->users = 0;
- cam->owner = NULL;
cam->opstate = S_IDLE;
cam->user_format = cam->sensor_format = viacam_def_pix_format;
mutex_init(&cam->lock);
@@ -1422,15 +1240,31 @@ static int viacam_probe(struct platform_device *pdev)
viacam_irq, IRQF_SHARED, "via-camera", cam);
if (ret)
goto out_power_down;
+
+ vq = &cam->vq;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ vq->drv_priv = cam;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->buf_struct_size = sizeof(struct via_buffer);
+ vq->dev = cam->v4l2_dev.dev;
+
+ vq->ops = &viacam_vb2_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->lock = &cam->lock;
+
+ ret = vb2_queue_init(vq);
/*
* Tell V4l2 that we exist.
*/
cam->vdev = viacam_v4l_template;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ cam->vdev.lock = &cam->lock;
+ cam->vdev.queue = vq;
+ video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
if (ret)
goto out_irq;
- video_set_drvdata(&cam->vdev, cam);
#ifdef CONFIG_PM
/*
@@ -1464,6 +1298,9 @@ static int viacam_remove(struct platform_device *pdev)
video_unregister_device(&cam->vdev);
v4l2_device_unregister(&cam->v4l2_dev);
+#ifdef CONFIG_PM
+ viafb_pm_unregister(&viacam_pm_hooks);
+#endif
free_irq(viadev->pdev->irq, cam);
via_sensor_power_release(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 01e7f09efc4e..3c93d9232c3c 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -29,11 +29,15 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
{ V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
{ V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
{ V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
{ V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
@@ -193,6 +197,28 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
rf->luma++;
rf->alpha = rf->cr + 1;
break;
+ case V4L2_PIX_FMT_BGRX32:
+ rf->cb = rf->luma + 1;
+ rf->cr = rf->cb + 2;
+ rf->luma += 2;
+ break;
+ case V4L2_PIX_FMT_BGRA32:
+ rf->alpha = rf->luma;
+ rf->cb = rf->luma + 1;
+ rf->cr = rf->cb + 2;
+ rf->luma += 2;
+ break;
+ case V4L2_PIX_FMT_RGBX32:
+ rf->cr = rf->luma;
+ rf->cb = rf->cr + 2;
+ rf->luma++;
+ break;
+ case V4L2_PIX_FMT_RGBA32:
+ rf->alpha = rf->luma + 3;
+ rf->cr = rf->luma;
+ rf->cb = rf->cr + 2;
+ rf->luma++;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 7e7c1e80f29f..0ee143ae0f6b 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -742,6 +742,9 @@ static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx,
return -EINVAL;
f->pixelformat = ctx->is_stateless ?
V4L2_PIX_FMT_FWHT_STATELESS : V4L2_PIX_FMT_FWHT;
+ if (!ctx->is_enc && !ctx->is_stateless)
+ f->flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
+ V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM;
}
return 0;
}
@@ -1661,19 +1664,22 @@ static int vicodec_start_streaming(struct vb2_queue *q,
kvfree(state->compressed_frame);
state->compressed_frame = new_comp_frame;
- if (info->components_num >= 3) {
- state->ref_frame.cb = state->ref_frame.luma + size;
- state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
- } else {
+ if (info->components_num < 3) {
state->ref_frame.cb = NULL;
state->ref_frame.cr = NULL;
+ state->ref_frame.alpha = NULL;
+ return 0;
}
+ state->ref_frame.cb = state->ref_frame.luma + size;
+ state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
+
if (info->components_num == 4)
state->ref_frame.alpha =
state->ref_frame.cr + size / chroma_div;
else
state->ref_frame.alpha = NULL;
+
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 664855708fdf..1d56b91830ba 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -18,32 +18,6 @@
#define VIMC_CAP_DRV_NAME "vimc-capture"
-static const u32 vimc_cap_supported_pixfmt[] = {
- V4L2_PIX_FMT_BGR24,
- V4L2_PIX_FMT_RGB24,
- V4L2_PIX_FMT_ARGB32,
- V4L2_PIX_FMT_SBGGR8,
- V4L2_PIX_FMT_SGBRG8,
- V4L2_PIX_FMT_SGRBG8,
- V4L2_PIX_FMT_SRGGB8,
- V4L2_PIX_FMT_SBGGR10,
- V4L2_PIX_FMT_SGBRG10,
- V4L2_PIX_FMT_SGRBG10,
- V4L2_PIX_FMT_SRGGB10,
- V4L2_PIX_FMT_SBGGR10ALAW8,
- V4L2_PIX_FMT_SGBRG10ALAW8,
- V4L2_PIX_FMT_SGRBG10ALAW8,
- V4L2_PIX_FMT_SRGGB10ALAW8,
- V4L2_PIX_FMT_SBGGR10DPCM8,
- V4L2_PIX_FMT_SGBRG10DPCM8,
- V4L2_PIX_FMT_SGRBG10DPCM8,
- V4L2_PIX_FMT_SRGGB10DPCM8,
- V4L2_PIX_FMT_SBGGR12,
- V4L2_PIX_FMT_SGBRG12,
- V4L2_PIX_FMT_SGRBG12,
- V4L2_PIX_FMT_SRGGB12,
-};
-
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
@@ -117,25 +91,29 @@ static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *format = &f->fmt.pix;
+ const struct vimc_pix_map *vpix;
format->width = clamp_t(u32, format->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
format->height = clamp_t(u32, format->height, VIMC_FRAME_MIN_HEIGHT,
VIMC_FRAME_MAX_HEIGHT) & ~1;
- vimc_colorimetry_clamp(format);
+ /* Don't accept a pixelformat that is not on the table */
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ if (!vpix) {
+ format->pixelformat = fmt_default.pixelformat;
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ }
+ /* TODO: Add support for custom bytesperline values */
+ format->bytesperline = format->width * vpix->bpp;
+ format->sizeimage = format->bytesperline * format->height;
if (format->field == V4L2_FIELD_ANY)
format->field = fmt_default.field;
- /* TODO: Add support for custom bytesperline values */
-
- /* Don't accept a pixelformat that is not on the table */
- if (!v4l2_format_info(format->pixelformat))
- format->pixelformat = fmt_default.pixelformat;
+ vimc_colorimetry_clamp(format);
- return v4l2_fill_pixfmt(format, format->pixelformat,
- format->width, format->height);
+ return 0;
}
static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
@@ -174,31 +152,27 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(vimc_cap_supported_pixfmt))
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(f->index);
+
+ if (!vpix)
return -EINVAL;
- f->pixelformat = vimc_cap_supported_pixfmt[f->index];
+ f->pixelformat = vpix->pixelformat;
return 0;
}
-static bool vimc_cap_is_pixfmt_supported(u32 pixelformat)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(vimc_cap_supported_pixfmt); i++)
- if (vimc_cap_supported_pixfmt[i] == pixelformat)
- return true;
- return false;
-}
-
static int vimc_cap_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
+ const struct vimc_pix_map *vpix;
+
if (fsize->index)
return -EINVAL;
- if (!vimc_cap_is_pixfmt_supported(fsize->pixel_format))
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fsize->pixel_format);
+ if (!vpix)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
@@ -272,7 +246,6 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
return ret;
}
- vcap->stream.producer_pixfmt = vcap->format.pixelformat;
ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
if (ret) {
media_pipeline_stop(entity);
@@ -423,6 +396,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
{
struct v4l2_device *v4l2_dev = master_data;
struct vimc_platform_data *pdata = comp->platform_data;
+ const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
struct vb2_queue *q;
@@ -477,8 +451,10 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Set default frame format */
vcap->format = fmt_default;
- v4l2_fill_pixfmt(&vcap->format, vcap->format.pixelformat,
- vcap->format.width, vcap->format.height);
+ vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
+ vcap->format.bytesperline = vcap->format.width * vpix->bpp;
+ vcap->format.sizeimage = vcap->format.bytesperline *
+ vcap->format.height;
/* Fill the vimc_ent_device struct */
vcap->ved.ent = &vcap->vdev.entity;
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 03016f204d05..7e1ae0b12f1e 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -10,139 +10,192 @@
#include "vimc-common.h"
-static const __u32 vimc_mbus_list[] = {
- MEDIA_BUS_FMT_FIXED,
- MEDIA_BUS_FMT_RGB444_1X12,
- MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
- MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
- MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- MEDIA_BUS_FMT_RGB565_1X16,
- MEDIA_BUS_FMT_BGR565_2X8_BE,
- MEDIA_BUS_FMT_BGR565_2X8_LE,
- MEDIA_BUS_FMT_RGB565_2X8_BE,
- MEDIA_BUS_FMT_RGB565_2X8_LE,
- MEDIA_BUS_FMT_RGB666_1X18,
- MEDIA_BUS_FMT_RBG888_1X24,
- MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
- MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
- MEDIA_BUS_FMT_BGR888_1X24,
- MEDIA_BUS_FMT_GBR888_1X24,
- MEDIA_BUS_FMT_RGB888_1X24,
- MEDIA_BUS_FMT_RGB888_2X12_BE,
- MEDIA_BUS_FMT_RGB888_2X12_LE,
- MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
- MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
- MEDIA_BUS_FMT_ARGB8888_1X32,
- MEDIA_BUS_FMT_RGB888_1X32_PADHI,
- MEDIA_BUS_FMT_RGB101010_1X30,
- MEDIA_BUS_FMT_RGB121212_1X36,
- MEDIA_BUS_FMT_RGB161616_1X48,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_UYVY8_1_5X8,
- MEDIA_BUS_FMT_VYUY8_1_5X8,
- MEDIA_BUS_FMT_YUYV8_1_5X8,
- MEDIA_BUS_FMT_YVYU8_1_5X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
- MEDIA_BUS_FMT_UYVY10_2X10,
- MEDIA_BUS_FMT_VYUY10_2X10,
- MEDIA_BUS_FMT_YUYV10_2X10,
- MEDIA_BUS_FMT_YVYU10_2X10,
- MEDIA_BUS_FMT_Y12_1X12,
- MEDIA_BUS_FMT_UYVY12_2X12,
- MEDIA_BUS_FMT_VYUY12_2X12,
- MEDIA_BUS_FMT_YUYV12_2X12,
- MEDIA_BUS_FMT_YVYU12_2X12,
- MEDIA_BUS_FMT_UYVY8_1X16,
- MEDIA_BUS_FMT_VYUY8_1X16,
- MEDIA_BUS_FMT_YUYV8_1X16,
- MEDIA_BUS_FMT_YVYU8_1X16,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_UYVY10_1X20,
- MEDIA_BUS_FMT_VYUY10_1X20,
- MEDIA_BUS_FMT_YUYV10_1X20,
- MEDIA_BUS_FMT_YVYU10_1X20,
- MEDIA_BUS_FMT_VUY8_1X24,
- MEDIA_BUS_FMT_YUV8_1X24,
- MEDIA_BUS_FMT_UYYVYY8_0_5X24,
- MEDIA_BUS_FMT_UYVY12_1X24,
- MEDIA_BUS_FMT_VYUY12_1X24,
- MEDIA_BUS_FMT_YUYV12_1X24,
- MEDIA_BUS_FMT_YVYU12_1X24,
- MEDIA_BUS_FMT_YUV10_1X30,
- MEDIA_BUS_FMT_UYYVYY10_0_5X30,
- MEDIA_BUS_FMT_AYUV8_1X32,
- MEDIA_BUS_FMT_UYYVYY12_0_5X36,
- MEDIA_BUS_FMT_YUV12_1X36,
- MEDIA_BUS_FMT_YUV16_1X48,
- MEDIA_BUS_FMT_UYYVYY16_0_5X48,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8,
- MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
- MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
- MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
- MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR14_1X14,
- MEDIA_BUS_FMT_SGBRG14_1X14,
- MEDIA_BUS_FMT_SGRBG14_1X14,
- MEDIA_BUS_FMT_SRGGB14_1X14,
- MEDIA_BUS_FMT_SBGGR16_1X16,
- MEDIA_BUS_FMT_SGBRG16_1X16,
- MEDIA_BUS_FMT_SGRBG16_1X16,
- MEDIA_BUS_FMT_SRGGB16_1X16,
- MEDIA_BUS_FMT_JPEG_1X8,
- MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8,
- MEDIA_BUS_FMT_AHSV8888_1X32,
+/*
+ * NOTE: non-bayer formats need to come first (necessary for enum_mbus_code
+ * in the scaler)
+ */
+static const struct vimc_pix_map vimc_pix_map_list[] = {
+ /* TODO: add all missing formats */
+
+ /* RGB formats */
+ {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .bpp = 4,
+ .bayer = false,
+ },
+
+ /* Bayer formats */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .bpp = 2,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer a-law compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer DPCM compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .bpp = 2,
+ .bayer = true,
+ },
};
-/* Helper function to check mbus codes */
-bool vimc_mbus_code_supported(__u32 code)
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
+{
+ if (i >= ARRAY_SIZE(vimc_pix_map_list))
+ return NULL;
+
+ return &vimc_pix_map_list[i];
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
+
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(vimc_mbus_list); i++)
- if (code == vimc_mbus_list[i])
- return true;
- return false;
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].code == code)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_mbus_code_supported);
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
-/* Helper function to enumerate mbus codes */
-int vimc_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
{
- if (code->index >= ARRAY_SIZE(vimc_mbus_list))
- return -EINVAL;
+ unsigned int i;
- code->code = vimc_mbus_list[code->index];
- return 0;
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].pixelformat == pixelformat)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_enum_mbus_code);
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
/* Helper function to allocate and initialize pads */
struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
@@ -214,13 +267,15 @@ static int vimc_get_mbus_format(struct media_pad *pad,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
+ const struct vimc_pix_map *vpix;
struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
ved->vdev_get_format(ved, &vdev_fmt);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, 0);
+ vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
+ v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
} else {
return -EINVAL;
}
@@ -260,12 +315,8 @@ int vimc_link_validate(struct media_link *link)
/* The width, height and code must match. */
if (source_fmt.format.width != sink_fmt.format.width
|| source_fmt.format.height != sink_fmt.format.height
- || (source_fmt.format.code && sink_fmt.format.code &&
- source_fmt.format.code != sink_fmt.format.code)) {
- pr_err("vimc: format doesn't match in link %s->%s\n",
- link->source->entity->name, link->sink->entity->name);
+ || source_fmt.format.code != sink_fmt.format.code)
return -EPIPE;
- }
/*
* The field order must match, or the sink field order must be NONE
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 7b4d988b208b..9c2e0e216c6b 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -12,8 +12,6 @@
#include <media/media-device.h>
#include <media/v4l2-device.h>
-#include "vimc-streamer.h"
-
#define VIMC_PDEV_NAME "vimc"
/* VIMC-specific controls */
@@ -70,6 +68,23 @@ struct vimc_platform_data {
};
/**
+ * struct vimc_pix_map - maps media bus code with v4l2 pixel format
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ * @bbp: number of bytes each pixel occupies
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ *
+ * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
+ * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
+ */
+struct vimc_pix_map {
+ unsigned int code;
+ unsigned int bpp;
+ u32 pixelformat;
+ bool bayer;
+};
+
+/**
* struct vimc_ent_device - core struct that represents a node in the topology
*
* @ent: the pointer to struct media_entity for the node
@@ -90,7 +105,6 @@ struct vimc_platform_data {
struct vimc_ent_device {
struct media_entity *ent;
struct media_pad *pads;
- struct vimc_stream *stream;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -98,23 +112,6 @@ struct vimc_ent_device {
};
/**
- * vimc_mbus_code_supported - helper to check supported mbus codes
- *
- * Helper function to check if mbus code is enumerated by vimc_enum_mbus_code()
- */
-bool vimc_mbus_code_supported(__u32 code);
-
-/**
- * vimc_enum_mbus_code - enumerate mbus codes
- *
- * Helper function to be pluged in .enum_mbus_code from
- * struct v4l2_subdev_pad_ops.
- */
-int vimc_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code);
-
-/**
* vimc_pads_init - initialize pads
*
* @num_pads: number of pads to initialize
@@ -149,6 +146,27 @@ static inline void vimc_pads_cleanup(struct media_pad *pads)
int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
/**
+ * vimc_pix_map_by_index - get vimc_pix_map struct by its index
+ *
+ * @i: index of the vimc_pix_map struct in vimc_pix_map_list
+ */
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i);
+
+/**
+ * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
+
+/**
+ * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
+ *
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
+
+/**
* vimc_ent_sd_register - initialize and register a subdev node
*
* @ved: the vimc_ent_device struct to be initialize
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 00598fbf3cba..b72b8385067b 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -16,11 +16,6 @@
#include "vimc-common.h"
#define VIMC_DEB_DRV_NAME "vimc-debayer"
-/* This module only supports transforming a bayer format
- * to V4L2_PIX_FMT_RGB24
- */
-#define VIMC_DEB_SRC_PIXFMT V4L2_PIX_FMT_RGB24
-#define VIMC_DEB_SRC_MBUS_FMT_DEFAULT MEDIA_BUS_FMT_RGB888_1X24
static unsigned int deb_mean_win_size = 3;
module_param(deb_mean_win_size, uint, 0000);
@@ -39,7 +34,6 @@ enum vimc_deb_rgb_colors {
};
struct vimc_deb_pix_map {
- u32 pixelformat;
u32 code;
enum vimc_deb_rgb_colors order[2][2];
};
@@ -69,73 +63,61 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
{
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
},
{
- .pixelformat = V4L2_PIX_FMT_SBGGR10,
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGBRG10,
.code = MEDIA_BUS_FMT_SGBRG10_1X10,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGRBG10,
.code = MEDIA_BUS_FMT_SGRBG10_1X10,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SRGGB10,
.code = MEDIA_BUS_FMT_SRGGB10_1X10,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
},
{
- .pixelformat = V4L2_PIX_FMT_SBGGR12,
.code = MEDIA_BUS_FMT_SBGGR12_1X12,
.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_RED } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGBRG12,
.code = MEDIA_BUS_FMT_SGBRG12_1X12,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
{ VIMC_DEB_RED, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SGRBG12,
.code = MEDIA_BUS_FMT_SGRBG12_1X12,
.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
{ VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
},
{
- .pixelformat = V4L2_PIX_FMT_SRGGB12,
.code = MEDIA_BUS_FMT_SRGGB12_1X12,
.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
{ VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
@@ -176,32 +158,41 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* For the sink pad we only support codes in the map_list */
- if (IS_SINK(code->pad)) {
+ /* We only support one format for source pads */
+ if (IS_SRC(code->pad)) {
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = vdeb->src_code;
+ } else {
if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
return -EINVAL;
code->code = vimc_deb_pix_map_list[code->index].code;
- return 0;
}
- return vimc_enum_mbus_code(sd, cfg, code);
+ return 0;
}
static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
if (fse->index)
return -EINVAL;
- /* For the sink pad we only support codes in the map_list */
if (IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
if (!vpix)
return -EINVAL;
+ } else if (fse->code != vdeb->src_code) {
+ return -EINVAL;
}
fse->min_width = VIMC_FRAME_MIN_WIDTH;
@@ -257,12 +248,9 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
- if (!vimc_mbus_code_supported(fmt->format.code))
- fmt->format.code = sink_fmt_default.code;
-
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vdeb->ved.stream)
+ if (vdeb->src_frame)
return -EBUSY;
sink_fmt = &vdeb->sink_fmt;
@@ -272,11 +260,11 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/*
* Do not change the format of the source pad,
- * it is propagated from the sink (except for the code)
+ * it is propagated from the sink
*/
if (IS_SRC(fmt->pad)) {
- vdeb->src_code = fmt->format.code;
fmt->format = *sink_fmt;
+ /* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
} else {
/* Set the new format in the sink pad */
@@ -308,7 +296,7 @@ static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
.set_fmt = vimc_deb_set_fmt,
};
-static void vimc_deb_set_rgb_pix_rgb24(struct vimc_deb_device *vdeb,
+static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
unsigned int lin,
unsigned int col,
unsigned int rgb[3])
@@ -325,35 +313,25 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (enable) {
- u32 src_pixelformat = vdeb->ved.stream->producer_pixfmt;
- const struct v4l2_format_info *pix_info;
+ const struct vimc_pix_map *vpix;
unsigned int frame_size;
- /* We only support translating bayer to RGB24 */
- if (src_pixelformat != V4L2_PIX_FMT_RGB24) {
- dev_err(vdeb->dev,
- "translating to pixfmt (0x%08x) is not supported\n",
- src_pixelformat);
- return -EINVAL;
- }
+ if (vdeb->src_frame)
+ return 0;
+
+ /* Calculate the frame size of the source pad */
+ vpix = vimc_pix_map_by_code(vdeb->src_code);
+ frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
+ vpix->bpp;
+
+ /* Save the bytes per pixel of the sink */
+ vpix = vimc_pix_map_by_code(vdeb->sink_fmt.code);
+ vdeb->sink_bpp = vpix->bpp;
/* Get the corresponding pixel map from the table */
vdeb->sink_pix_map =
vimc_deb_pix_map_by_code(vdeb->sink_fmt.code);
- /* Request bayer format from the pipeline for the sink pad */
- vdeb->ved.stream->producer_pixfmt =
- vdeb->sink_pix_map->pixelformat;
-
- /* Calculate frame_size of the source */
- pix_info = v4l2_format_info(src_pixelformat);
- frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
- pix_info->bpp[0];
-
- /* Get bpp from the sink */
- pix_info = v4l2_format_info(vdeb->sink_pix_map->pixelformat);
- vdeb->sink_bpp = pix_info->bpp[0];
-
/*
* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
@@ -554,14 +532,14 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
- vdeb->src_code = VIMC_DEB_SRC_MBUS_FMT_DEFAULT;
/*
* TODO: Add support for more output formats, we only support
- * RGB24 for now.
+ * RGB888 for now
* NOTE: the src format is always the same as the sink, except
* for the code
*/
- vdeb->set_rgb_src = vimc_deb_set_rgb_pix_rgb24;
+ vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
+ vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index c7123a45c55b..49ab8d9dd9c9 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -25,12 +25,6 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
-static const u32 vimc_sca_supported_pixfmt[] = {
- V4L2_PIX_FMT_BGR24,
- V4L2_PIX_FMT_RGB24,
- V4L2_PIX_FMT_ARGB32,
-};
-
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
@@ -53,16 +47,6 @@ static const struct v4l2_mbus_framefmt sink_fmt_default = {
.colorspace = V4L2_COLORSPACE_DEFAULT,
};
-static bool vimc_sca_is_pixfmt_supported(u32 pixelformat)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(vimc_sca_supported_pixfmt); i++)
- if (vimc_sca_supported_pixfmt[i] == pixelformat)
- return true;
- return false;
-}
-
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg)
{
@@ -82,13 +66,35 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
return 0;
}
+static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+
+ /* We don't support bayer format */
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
+ code->code = vpix->code;
+
+ return 0;
+}
+
static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ const struct vimc_pix_map *vpix;
+
if (fse->index)
return -EINVAL;
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
@@ -125,6 +131,13 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix || vpix->bayer)
+ fmt->code = sink_fmt_default.code;
+
fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
@@ -143,12 +156,9 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
- if (!vimc_mbus_code_supported(fmt->format.code))
- fmt->format.code = sink_fmt_default.code;
-
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vsca->ved.stream)
+ if (vsca->src_frame)
return -EBUSY;
sink_fmt = &vsca->sink_fmt;
@@ -188,7 +198,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
.init_cfg = vimc_sca_init_cfg,
- .enum_mbus_code = vimc_enum_mbus_code,
+ .enum_mbus_code = vimc_sca_enum_mbus_code,
.enum_frame_size = vimc_sca_enum_frame_size,
.get_fmt = vimc_sca_get_fmt,
.set_fmt = vimc_sca_set_fmt,
@@ -199,19 +209,15 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
if (enable) {
- u32 pixelformat = vsca->ved.stream->producer_pixfmt;
- const struct v4l2_format_info *pix_info;
+ const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (!vimc_sca_is_pixfmt_supported(pixelformat)) {
- dev_err(vsca->dev, "pixfmt (0x%08x) is not supported\n",
- pixelformat);
- return -EINVAL;
- }
+ if (vsca->src_frame)
+ return 0;
/* Save the bytes per pixel of the sink */
- pix_info = v4l2_format_info(pixelformat);
- vsca->bpp = pix_info->bpp[0];
+ vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vsca->bpp = vpix->bpp;
/* Calculate the width in bytes of the src frame */
vsca->src_line_size = vsca->sink_fmt.width *
@@ -324,7 +330,7 @@ static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
ved);
/* If the stream in this node is not active, just return */
- if (!ved->stream)
+ if (!vsca->src_frame)
return ERR_PTR(-EINVAL);
vimc_sca_fill_src_frame(vsca, sink_frame);
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 51359472eef2..6c53b9fc1617 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -55,13 +55,34 @@ static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
return 0;
}
+static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+
+ if (!vpix)
+ return -EINVAL;
+
+ code->code = vpix->code;
+
+ return 0;
+}
+
static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
+ const struct vimc_pix_map *vpix;
+
if (fse->index)
return -EINVAL;
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix)
+ return -EINVAL;
+
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
@@ -86,17 +107,14 @@ static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
{
- u32 pixelformat = vsen->ved.stream->producer_pixfmt;
- const struct v4l2_format_info *pix_info;
-
- pix_info = v4l2_format_info(pixelformat);
+ const struct vimc_pix_map *vpix =
+ vimc_pix_map_by_code(vsen->mbus_format.code);
tpg_reset_source(&vsen->tpg, vsen->mbus_format.width,
vsen->mbus_format.height, vsen->mbus_format.field);
- tpg_s_bytesperline(&vsen->tpg, 0,
- vsen->mbus_format.width * pix_info->bpp[0]);
+ tpg_s_bytesperline(&vsen->tpg, 0, vsen->mbus_format.width * vpix->bpp);
tpg_s_buf_height(&vsen->tpg, vsen->mbus_format.height);
- tpg_s_fourcc(&vsen->tpg, pixelformat);
+ tpg_s_fourcc(&vsen->tpg, vpix->pixelformat);
/* TODO: add support for V4L2_FIELD_ALTERNATE */
tpg_s_field(&vsen->tpg, vsen->mbus_format.field, false);
tpg_s_colorspace(&vsen->tpg, vsen->mbus_format.colorspace);
@@ -107,6 +125,13 @@ static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
VIMC_FRAME_MAX_WIDTH) & ~1;
fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
@@ -126,12 +151,9 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
- if (!vimc_mbus_code_supported(fmt->format.code))
- fmt->format.code = fmt_default.code;
-
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
/* Do not change the format while stream is on */
- if (vsen->ved.stream)
+ if (vsen->frame)
return -EBUSY;
mf = &vsen->mbus_format;
@@ -161,7 +183,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
.init_cfg = vimc_sen_init_cfg,
- .enum_mbus_code = vimc_enum_mbus_code,
+ .enum_mbus_code = vimc_sen_enum_mbus_code,
.enum_frame_size = vimc_sen_enum_frame_size,
.get_fmt = vimc_sen_get_fmt,
.set_fmt = vimc_sen_set_fmt,
@@ -183,13 +205,16 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
container_of(sd, struct vimc_sen_device, sd);
if (enable) {
- u32 pixelformat = vsen->ved.stream->producer_pixfmt;
- const struct v4l2_format_info *pix_info;
+ const struct vimc_pix_map *vpix;
unsigned int frame_size;
+ if (vsen->kthread_sen)
+ /* tpg is already executing */
+ return 0;
+
/* Calculate the frame size */
- pix_info = v4l2_format_info(pixelformat);
- frame_size = vsen->mbus_format.width * pix_info->bpp[0] *
+ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
+ frame_size = vsen->mbus_format.width * vpix->bpp *
vsen->mbus_format.height;
/*
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 3b3f36357a0e..048d770e498b 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -20,6 +20,8 @@
*
* Helper function that returns the media entity containing the source pad
* linked with the first sink pad from the given media entity pad list.
+ *
+ * Return: The source pad or NULL, if it wasn't found.
*/
static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
{
@@ -35,7 +37,7 @@ static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
return NULL;
}
-/*
+/**
* vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
*
* @stream: the pointer to the stream structure with the pipeline to be
@@ -52,7 +54,6 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
while (stream->pipe_size) {
stream->pipe_size--;
ved = stream->ved_pipeline[stream->pipe_size];
- ved->stream = NULL;
stream->ved_pipeline[stream->pipe_size] = NULL;
if (!is_media_entity_v4l2_subdev(ved->ent))
@@ -63,15 +64,18 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
}
}
-/*
- * vimc_streamer_pipeline_init - initializes the stream structure
+/**
+ * vimc_streamer_pipeline_init - Initializes the stream structure
*
* @stream: the pointer to the stream structure to be initialized
* @ved: the pointer to the vimc entity initializing the stream
*
* Initializes the stream structure. Walks through the entity graph to
* construct the pipeline used later on the streamer thread.
- * Calls s_stream to enable stream in all entities of the pipeline.
+ * Calls vimc_streamer_s_stream() to enable stream in all entities of
+ * the pipeline.
+ *
+ * Return: 0 if success, error code otherwise.
*/
static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
struct vimc_ent_device *ved)
@@ -88,7 +92,6 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
return -EINVAL;
}
stream->ved_pipeline[stream->pipe_size++] = ved;
- ved->stream = stream;
if (is_media_entity_v4l2_subdev(ved->ent)) {
sd = media_entity_to_v4l2_subdev(ved->ent);
@@ -122,13 +125,17 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
return -EINVAL;
}
-/*
- * vimc_streamer_thread - process frames through the pipeline
+/**
+ * vimc_streamer_thread - Process frames through the pipeline
*
* @data: vimc_stream struct of the current stream
*
* From the source to the sink, gets a frame from each subdevice and send to
* the next one of the pipeline at a fixed framerate.
+ *
+ * Return:
+ * Always zero (created as ``int`` instead of ``void`` to comply with
+ * kthread API).
*/
static int vimc_streamer_thread(void *data)
{
@@ -157,19 +164,20 @@ static int vimc_streamer_thread(void *data)
return 0;
}
-/*
- * vimc_streamer_s_stream - start/stop the streaming on the media pipeline
+/**
+ * vimc_streamer_s_stream - Start/stop the streaming on the media pipeline
*
* @stream: the pointer to the stream structure of the current stream
* @ved: pointer to the vimc entity of the entity of the stream
* @enable: flag to determine if stream should start/stop
*
- * When starting, check if there is no stream->kthread allocated. This should
- * indicate that a stream is already running. Then, it initializes
- * the pipeline, creates and runs a kthread to consume buffers through the
- * pipeline.
- * When stopping, analogously check if there is a stream running, stop
- * the thread and terminates the pipeline.
+ * When starting, check if there is no ``stream->kthread`` allocated. This
+ * should indicate that a stream is already running. Then, it initializes the
+ * pipeline, creates and runs a kthread to consume buffers through the pipeline.
+ * When stopping, analogously check if there is a stream running, stop the
+ * thread and terminates the pipeline.
+ *
+ * Return: 0 if success, error code otherwise.
*/
int vimc_streamer_s_stream(struct vimc_stream *stream,
struct vimc_ent_device *ved,
diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
index 2b3667408794..fe3c51f15fad 100644
--- a/drivers/media/platform/vimc/vimc-streamer.h
+++ b/drivers/media/platform/vimc/vimc-streamer.h
@@ -25,11 +25,6 @@
* processed in the pipeline.
* @pipe_size: size of @ved_pipeline
* @kthread: thread that generates the frames of the stream.
- * @producer_pixfmt: the pixel format requested from the pipeline. This must
- * be set just before calling vimc_streamer_s_stream(ent, 1). This value is
- * propagated up to the source of the base image (usually a sensor node) and
- * can be modified by entities during s_stream callback to request a different
- * format from rest of the pipeline.
*
* When the user call stream_on in a video device, struct vimc_stream is
* used to keep track of all entities and subdevices that generates and
@@ -40,17 +35,8 @@ struct vimc_stream {
struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
unsigned int pipe_size;
struct task_struct *kthread;
- u32 producer_pixfmt;
};
-/**
- * vimc_streamer_s_streamer - start/stop the stream
- *
- * @stream: the pointer to the stream to start or stop
- * @ved: The last entity of the streamer pipeline
- * @enable: any non-zero number start the stream, zero stop
- *
- */
int vimc_streamer_s_stream(struct vimc_stream *stream,
struct vimc_ent_device *ved,
int enable);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index bc2a176937a4..53315c8dd2bb 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -792,7 +792,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (no_error_inj && ccs_cap == -1)
ccs_cap = 7;
- /* if ccs_cap == -1, then the use can select it using controls */
+ /* if ccs_cap == -1, then the user can select it using controls */
if (ccs_cap != -1) {
dev->has_crop_cap = ccs_cap & 1;
dev->has_compose_cap = ccs_cap & 2;
@@ -807,7 +807,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (no_error_inj && ccs_out == -1)
ccs_out = 7;
- /* if ccs_out == -1, then the use can select it using controls */
+ /* if ccs_out == -1, then the user can select it using controls */
if (ccs_out != -1) {
dev->has_crop_out = ccs_out & 1;
dev->has_compose_out = ccs_out & 2;
@@ -1099,6 +1099,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
+ snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
+ "vivid-%03d-vid-cap", inst);
/* initialize vid_cap queue */
q = &dev->vb_vid_cap_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
@@ -1122,6 +1124,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
if (dev->has_vid_out) {
+ snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
+ "vivid-%03d-vid-out", inst);
/* initialize vid_out queue */
q = &dev->vb_vid_out_q;
q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
@@ -1265,8 +1269,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
- snprintf(vfd->name, sizeof(vfd->name),
- "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1312,8 +1314,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vid_out) {
vfd = &dev->vid_out_dev;
- snprintf(vfd->name, sizeof(vfd->name),
- "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 3e916c8befb7..cb19a9a73092 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1473,7 +1473,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_handler_init(hdl_vid_cap, 55);
v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_vid_out, 26);
- if (!no_error_inj || dev->has_fb)
+ if (!no_error_inj || dev->has_fb || dev->num_hdmi_outputs)
v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_vbi_cap, 21);
v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL);
@@ -1613,6 +1613,8 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
}
if (dev->num_hdmi_inputs) {
+ s64 hdmi_input_mask = GENMASK(dev->num_hdmi_inputs - 1, 0);
+
dev->ctrl_dv_timings_signal_mode = v4l2_ctrl_new_custom(hdl_vid_cap,
&vivid_ctrl_dv_timings_signal_mode, NULL);
@@ -1633,12 +1635,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
dev->ctrl_rx_power_present = v4l2_ctrl_new_std(hdl_vid_cap,
- NULL, V4L2_CID_DV_RX_POWER_PRESENT, 0,
- (2 << (dev->num_hdmi_inputs - 1)) - 1, 0,
- (2 << (dev->num_hdmi_inputs - 1)) - 1);
+ NULL, V4L2_CID_DV_RX_POWER_PRESENT, 0, hdmi_input_mask,
+ 0, hdmi_input_mask);
}
if (dev->num_hdmi_outputs) {
+ s64 hdmi_output_mask = GENMASK(dev->num_hdmi_outputs - 1, 0);
+
/*
* We aren't doing anything with this at the moment, but
* HDMI outputs typically have this controls.
@@ -1652,17 +1655,14 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->ctrl_display_present = v4l2_ctrl_new_custom(hdl_vid_out,
&vivid_ctrl_display_present, NULL);
dev->ctrl_tx_hotplug = v4l2_ctrl_new_std(hdl_vid_out,
- NULL, V4L2_CID_DV_TX_HOTPLUG, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1);
+ NULL, V4L2_CID_DV_TX_HOTPLUG, 0, hdmi_output_mask,
+ 0, hdmi_output_mask);
dev->ctrl_tx_rxsense = v4l2_ctrl_new_std(hdl_vid_out,
- NULL, V4L2_CID_DV_TX_RXSENSE, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1);
+ NULL, V4L2_CID_DV_TX_RXSENSE, 0, hdmi_output_mask,
+ 0, hdmi_output_mask);
dev->ctrl_tx_edid_present = v4l2_ctrl_new_std(hdl_vid_out,
- NULL, V4L2_CID_DV_TX_EDID_PRESENT, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1, 0,
- (2 << (dev->num_hdmi_outputs - 1)) - 1);
+ NULL, V4L2_CID_DV_TX_EDID_PRESENT, 0, hdmi_output_mask,
+ 0, hdmi_output_mask);
}
if ((dev->has_vid_cap && dev->has_vid_out) ||
(dev->has_vbi_cap && dev->has_vbi_out))
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 6cf495a7d5cc..003319d7816d 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -232,8 +232,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
return vbuf;
}
-static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
- struct vivid_buffer *vid_cap_buf)
+static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p,
+ u8 *vcapbuf, struct vivid_buffer *vid_cap_buf)
{
bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
struct tpg_data *tpg = &dev->tpg;
@@ -658,6 +658,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
u64 f_period;
f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
+ if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0))
+ dev->timeperframe_vid_cap.denominator = 1;
do_div(f_period, dev->timeperframe_vid_cap.denominator);
if (dev->field_cap == V4L2_FIELD_ALTERNATE)
f_period >>= 1;
@@ -670,7 +672,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
dev->cap_frame_period = f_period;
}
-static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
+static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
+ int dropped_bufs)
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 1f33eb1a76b6..8665dfd25eb4 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -262,21 +262,66 @@ struct vivid_fmt vivid_formats[] = {
.can_do_overlay = true,
},
{
- .fourcc = V4L2_PIX_FMT_RGB444, /* xxxxrrrr ggggbbbb */
+ .fourcc = V4L2_PIX_FMT_RGB444, /* ggggbbbb xxxxrrrr */
.vdownsampling = { 1 },
.bit_depth = { 16 },
.planes = 1,
.buffers = 1,
},
{
- .fourcc = V4L2_PIX_FMT_XRGB444, /* xxxxrrrr ggggbbbb */
+ .fourcc = V4L2_PIX_FMT_XRGB444, /* ggggbbbb xxxxrrrr */
.vdownsampling = { 1 },
.bit_depth = { 16 },
.planes = 1,
.buffers = 1,
},
{
- .fourcc = V4L2_PIX_FMT_ARGB444, /* aaaarrrr ggggbbbb */
+ .fourcc = V4L2_PIX_FMT_ARGB444, /* ggggbbbb aaaarrrr */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x00f0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGBX444, /* bbbbxxxx rrrrgggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGBA444, /* bbbbaaaa rrrrgggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x00f0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR444, /* ggggrrrr xxxxbbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR444, /* ggggrrrr aaaabbbb */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x00f0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRX444, /* rrrrxxxx bbbbgggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRA444, /* rrrraaaa bbbbgggg */
.vdownsampling = { 1 },
.bit_depth = { 16 },
.planes = 1,
@@ -309,6 +354,57 @@ struct vivid_fmt vivid_formats[] = {
.alpha_mask = 0x8000,
},
{
+ .fourcc = V4L2_PIX_FMT_RGBX555, /* ggbbbbbx rrrrrggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGBA555, /* ggbbbbba rrrrrggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR555, /* gggrrrrr xbbbbbgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR555, /* gggrrrrr abbbbbgg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRX555, /* ggrrrrrx bbbbbggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRA555, /* ggrrrrra bbbbbggg */
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .planes = 1,
+ .buffers = 1,
+ .can_do_overlay = true,
+ .alpha_mask = 0x8000,
+ },
+ {
.fourcc = V4L2_PIX_FMT_RGB555X, /* xrrrrrgg gggbbbbb */
.vdownsampling = { 1 },
.bit_depth = { 16 },
@@ -396,6 +492,36 @@ struct vivid_fmt vivid_formats[] = {
.alpha_mask = 0xff000000,
},
{
+ .fourcc = V4L2_PIX_FMT_RGBX32, /* rgbx */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRX32, /* xbgr */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGBA32, /* rgba */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGRA32, /* abgr */
+ .vdownsampling = { 1 },
+ .bit_depth = { 32 },
+ .planes = 1,
+ .buffers = 1,
+ .alpha_mask = 0xff000000,
+ },
+ {
.fourcc = V4L2_PIX_FMT_SBGGR8, /* Bayer BG/GR */
.vdownsampling = { 1 },
.bit_depth = { 8 },
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 104b6f514536..d7b43037e500 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
/* Get a default body for our list. */
dl->body0 = vsp1_dl_body_get(dlm->pool);
- if (!dl->body0)
+ if (!dl->body0) {
+ kfree(dl);
return NULL;
+ }
header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
index 8b01e99acd20..30d751f2cccf 100644
--- a/drivers/media/platform/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -426,8 +426,6 @@ static int histo_v4l2_querycap(struct file *file, void *fh,
| V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_VIDEO_OUTPUT_MPLANE
| V4L2_CAP_META_CAPTURE;
- cap->device_caps = V4L2_CAP_META_CAPTURE
- | V4L2_CAP_STREAMING;
strscpy(cap->driver, "vsp1", sizeof(cap->driver));
strscpy(cap->card, histo->video.name, sizeof(cap->card));
@@ -556,6 +554,7 @@ int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
histo->video.vfl_type = VFL_TYPE_GRABBER;
histo->video.release = video_device_release_empty;
histo->video.ioctl_ops = &histo_v4l2_ioctl_ops;
+ histo->video.device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
video_set_drvdata(&histo->video, histo);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 1bb1d39c60d9..5c67ff92d97a 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -15,8 +15,8 @@
*/
#define VI6_CMD(n) (0x0000 + (n) * 4)
-#define VI6_CMD_UPDHDR (1 << 4)
-#define VI6_CMD_STRCMD (1 << 0)
+#define VI6_CMD_UPDHDR BIT(4)
+#define VI6_CMD_STRCMD BIT(0)
#define VI6_CLK_DCSWT 0x0018
#define VI6_CLK_DCSWT_CSTPW_MASK (0xff << 8)
@@ -25,29 +25,29 @@
#define VI6_CLK_DCSWT_CSTRW_SHIFT 0
#define VI6_SRESET 0x0028
-#define VI6_SRESET_SRTS(n) (1 << (n))
+#define VI6_SRESET_SRTS(n) BIT(n)
#define VI6_STATUS 0x0038
-#define VI6_STATUS_FLD_STD(n) (1 << ((n) + 28))
-#define VI6_STATUS_SYS_ACT(n) (1 << ((n) + 8))
+#define VI6_STATUS_FLD_STD(n) BIT((n) + 28)
+#define VI6_STATUS_SYS_ACT(n) BIT((n) + 8)
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
-#define VI6_WFP_IRQ_ENB_DFEE (1 << 1)
-#define VI6_WFP_IRQ_ENB_FREE (1 << 0)
+#define VI6_WFP_IRQ_ENB_DFEE BIT(1)
+#define VI6_WFP_IRQ_ENB_FREE BIT(0)
#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
-#define VI6_WFP_IRQ_STA_DFE (1 << 1)
-#define VI6_WFP_IRQ_STA_FRE (1 << 0)
+#define VI6_WFP_IRQ_STA_DFE BIT(1)
+#define VI6_WFP_IRQ_STA_FRE BIT(0)
#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
-#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
-#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
-#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
+#define VI6_DISP_IRQ_ENB_DSTE BIT(8)
+#define VI6_DISP_IRQ_ENB_MAEE BIT(5)
+#define VI6_DISP_IRQ_ENB_LNEE(n) BIT(n)
#define VI6_DISP_IRQ_STA(n) (0x007c + (n) * 60)
-#define VI6_DISP_IRQ_STA_DST (1 << 8)
-#define VI6_DISP_IRQ_STA_MAE (1 << 5)
-#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
+#define VI6_DISP_IRQ_STA_DST BIT(8)
+#define VI6_DISP_IRQ_STA_MAE BIT(5)
+#define VI6_DISP_IRQ_STA_LNE(n) BIT(n)
#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
@@ -59,32 +59,32 @@
#define VI6_DL_CTRL 0x0100
#define VI6_DL_CTRL_AR_WAIT_MASK (0xffff << 16)
#define VI6_DL_CTRL_AR_WAIT_SHIFT 16
-#define VI6_DL_CTRL_DC2 (1 << 12)
-#define VI6_DL_CTRL_DC1 (1 << 8)
-#define VI6_DL_CTRL_DC0 (1 << 4)
-#define VI6_DL_CTRL_CFM0 (1 << 2)
-#define VI6_DL_CTRL_NH0 (1 << 1)
-#define VI6_DL_CTRL_DLE (1 << 0)
+#define VI6_DL_CTRL_DC2 BIT(12)
+#define VI6_DL_CTRL_DC1 BIT(8)
+#define VI6_DL_CTRL_DC0 BIT(4)
+#define VI6_DL_CTRL_CFM0 BIT(2)
+#define VI6_DL_CTRL_NH0 BIT(1)
+#define VI6_DL_CTRL_DLE BIT(0)
#define VI6_DL_HDR_ADDR(n) (0x0104 + (n) * 4)
#define VI6_DL_SWAP 0x0114
-#define VI6_DL_SWAP_LWS (1 << 2)
-#define VI6_DL_SWAP_WDS (1 << 1)
-#define VI6_DL_SWAP_BTS (1 << 0)
+#define VI6_DL_SWAP_LWS BIT(2)
+#define VI6_DL_SWAP_WDS BIT(1)
+#define VI6_DL_SWAP_BTS BIT(0)
#define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36)
-#define VI6_DL_EXT_CTRL_NWE (1 << 16)
+#define VI6_DL_EXT_CTRL_NWE BIT(16)
#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
-#define VI6_DL_EXT_CTRL_DLPRI (1 << 5)
-#define VI6_DL_EXT_CTRL_EXPRI (1 << 4)
-#define VI6_DL_EXT_CTRL_EXT (1 << 0)
+#define VI6_DL_EXT_CTRL_DLPRI BIT(5)
+#define VI6_DL_EXT_CTRL_EXPRI BIT(4)
+#define VI6_DL_EXT_CTRL_EXT BIT(0)
#define VI6_DL_EXT_AUTOFLD_INT BIT(0)
#define VI6_DL_BODY_SIZE 0x0120
-#define VI6_DL_BODY_SIZE_UPD (1 << 24)
+#define VI6_DL_BODY_SIZE_UPD BIT(24)
#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
#define VI6_DL_BODY_SIZE_BS_SHIFT 0
@@ -107,10 +107,10 @@
#define VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT 0
#define VI6_RPF_INFMT 0x0308
-#define VI6_RPF_INFMT_VIR (1 << 28)
-#define VI6_RPF_INFMT_CIPM (1 << 16)
-#define VI6_RPF_INFMT_SPYCS (1 << 15)
-#define VI6_RPF_INFMT_SPUVS (1 << 14)
+#define VI6_RPF_INFMT_VIR BIT(28)
+#define VI6_RPF_INFMT_CIPM BIT(16)
+#define VI6_RPF_INFMT_SPYCS BIT(15)
+#define VI6_RPF_INFMT_SPUVS BIT(14)
#define VI6_RPF_INFMT_CEXT_ZERO (0 << 12)
#define VI6_RPF_INFMT_CEXT_EXT (1 << 12)
#define VI6_RPF_INFMT_CEXT_ONE (2 << 12)
@@ -120,19 +120,19 @@
#define VI6_RPF_INFMT_RDTM_BT709 (2 << 9)
#define VI6_RPF_INFMT_RDTM_BT709_EXT (3 << 9)
#define VI6_RPF_INFMT_RDTM_MASK (7 << 9)
-#define VI6_RPF_INFMT_CSC (1 << 8)
+#define VI6_RPF_INFMT_CSC BIT(8)
#define VI6_RPF_INFMT_RDFMT_MASK (0x7f << 0)
#define VI6_RPF_INFMT_RDFMT_SHIFT 0
#define VI6_RPF_DSWAP 0x030c
-#define VI6_RPF_DSWAP_A_LLS (1 << 11)
-#define VI6_RPF_DSWAP_A_LWS (1 << 10)
-#define VI6_RPF_DSWAP_A_WDS (1 << 9)
-#define VI6_RPF_DSWAP_A_BTS (1 << 8)
-#define VI6_RPF_DSWAP_P_LLS (1 << 3)
-#define VI6_RPF_DSWAP_P_LWS (1 << 2)
-#define VI6_RPF_DSWAP_P_WDS (1 << 1)
-#define VI6_RPF_DSWAP_P_BTS (1 << 0)
+#define VI6_RPF_DSWAP_A_LLS BIT(11)
+#define VI6_RPF_DSWAP_A_LWS BIT(10)
+#define VI6_RPF_DSWAP_A_WDS BIT(9)
+#define VI6_RPF_DSWAP_A_BTS BIT(8)
+#define VI6_RPF_DSWAP_P_LLS BIT(3)
+#define VI6_RPF_DSWAP_P_LWS BIT(2)
+#define VI6_RPF_DSWAP_P_WDS BIT(1)
+#define VI6_RPF_DSWAP_P_BTS BIT(0)
#define VI6_RPF_LOC 0x0310
#define VI6_RPF_LOC_HCOORD_MASK (0x1fff << 16)
@@ -150,7 +150,7 @@
#define VI6_RPF_ALPH_SEL_ASEL_SHIFT 28
#define VI6_RPF_ALPH_SEL_IROP_MASK (0xf << 24)
#define VI6_RPF_ALPH_SEL_IROP_SHIFT 24
-#define VI6_RPF_ALPH_SEL_BSEL (1 << 23)
+#define VI6_RPF_ALPH_SEL_BSEL BIT(23)
#define VI6_RPF_ALPH_SEL_AEXT_ZERO (0 << 18)
#define VI6_RPF_ALPH_SEL_AEXT_EXT (1 << 18)
#define VI6_RPF_ALPH_SEL_AEXT_ONE (2 << 18)
@@ -171,7 +171,7 @@
#define VI6_RPF_VRTCOL_SET_LAYB_SHIFT 0
#define VI6_RPF_MSK_CTRL 0x031c
-#define VI6_RPF_MSK_CTRL_MSK_EN (1 << 24)
+#define VI6_RPF_MSK_CTRL_MSK_EN BIT(24)
#define VI6_RPF_MSK_CTRL_MGR_MASK (0xff << 16)
#define VI6_RPF_MSK_CTRL_MGR_SHIFT 16
#define VI6_RPF_MSK_CTRL_MGG_MASK (0xff << 8)
@@ -191,9 +191,9 @@
#define VI6_RPF_MSK_SET_MSB_SHIFT 0
#define VI6_RPF_CKEY_CTRL 0x0328
-#define VI6_RPF_CKEY_CTRL_CV (1 << 4)
-#define VI6_RPF_CKEY_CTRL_SAPE1 (1 << 1)
-#define VI6_RPF_CKEY_CTRL_SAPE0 (1 << 0)
+#define VI6_RPF_CKEY_CTRL_CV BIT(4)
+#define VI6_RPF_CKEY_CTRL_SAPE1 BIT(1)
+#define VI6_RPF_CKEY_CTRL_SAPE0 BIT(0)
#define VI6_RPF_CKEY_SET0 0x032c
#define VI6_RPF_CKEY_SET1 0x0330
@@ -250,7 +250,7 @@
#define VI6_WPF_HSZCLIP 0x1004
#define VI6_WPF_VSZCLIP 0x1008
-#define VI6_WPF_SZCLIP_EN (1 << 28)
+#define VI6_WPF_SZCLIP_EN BIT(28)
#define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
#define VI6_WPF_SZCLIP_OFST_SHIFT 16
#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
@@ -259,12 +259,12 @@
#define VI6_WPF_OUTFMT 0x100c
#define VI6_WPF_OUTFMT_PDV_MASK (0xff << 24)
#define VI6_WPF_OUTFMT_PDV_SHIFT 24
-#define VI6_WPF_OUTFMT_PXA (1 << 23)
-#define VI6_WPF_OUTFMT_ROT (1 << 18)
-#define VI6_WPF_OUTFMT_HFLP (1 << 17)
-#define VI6_WPF_OUTFMT_FLP (1 << 16)
-#define VI6_WPF_OUTFMT_SPYCS (1 << 15)
-#define VI6_WPF_OUTFMT_SPUVS (1 << 14)
+#define VI6_WPF_OUTFMT_PXA BIT(23)
+#define VI6_WPF_OUTFMT_ROT BIT(18)
+#define VI6_WPF_OUTFMT_HFLP BIT(17)
+#define VI6_WPF_OUTFMT_FLP BIT(16)
+#define VI6_WPF_OUTFMT_SPYCS BIT(15)
+#define VI6_WPF_OUTFMT_SPUVS BIT(14)
#define VI6_WPF_OUTFMT_DITH_DIS (0 << 12)
#define VI6_WPF_OUTFMT_DITH_EN (3 << 12)
#define VI6_WPF_OUTFMT_DITH_MASK (3 << 12)
@@ -273,18 +273,18 @@
#define VI6_WPF_OUTFMT_WRTM_BT709 (2 << 9)
#define VI6_WPF_OUTFMT_WRTM_BT709_EXT (3 << 9)
#define VI6_WPF_OUTFMT_WRTM_MASK (7 << 9)
-#define VI6_WPF_OUTFMT_CSC (1 << 8)
+#define VI6_WPF_OUTFMT_CSC BIT(8)
#define VI6_WPF_OUTFMT_WRFMT_MASK (0x7f << 0)
#define VI6_WPF_OUTFMT_WRFMT_SHIFT 0
#define VI6_WPF_DSWAP 0x1010
-#define VI6_WPF_DSWAP_P_LLS (1 << 3)
-#define VI6_WPF_DSWAP_P_LWS (1 << 2)
-#define VI6_WPF_DSWAP_P_WDS (1 << 1)
-#define VI6_WPF_DSWAP_P_BTS (1 << 0)
+#define VI6_WPF_DSWAP_P_LLS BIT(3)
+#define VI6_WPF_DSWAP_P_LWS BIT(2)
+#define VI6_WPF_DSWAP_P_WDS BIT(1)
+#define VI6_WPF_DSWAP_P_BTS BIT(0)
#define VI6_WPF_RNDCTRL 0x1014
-#define VI6_WPF_RNDCTRL_CBRM (1 << 28)
+#define VI6_WPF_RNDCTRL_CBRM BIT(28)
#define VI6_WPF_RNDCTRL_ABRM_TRUNC (0 << 24)
#define VI6_WPF_RNDCTRL_ABRM_ROUND (1 << 24)
#define VI6_WPF_RNDCTRL_ABRM_THRESH (2 << 24)
@@ -297,7 +297,7 @@
#define VI6_WPF_RNDCTRL_CLMD_MASK (3 << 12)
#define VI6_WPF_ROT_CTRL 0x1018
-#define VI6_WPF_ROT_CTRL_LN16 (1 << 17)
+#define VI6_WPF_ROT_CTRL_LN16 BIT(17)
#define VI6_WPF_ROT_CTRL_LMEM_WD_MASK (0x1fff << 0)
#define VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT 0
@@ -308,7 +308,7 @@
#define VI6_WPF_DSTM_ADDR_C1 0x102c
#define VI6_WPF_WRBCK_CTRL(n) (0x1034 + (n) * 0x100)
-#define VI6_WPF_WRBCK_CTRL_WBMD (1 << 0)
+#define VI6_WPF_WRBCK_CTRL_WBMD BIT(0)
/* -----------------------------------------------------------------------------
* UIF Control Registers
@@ -317,20 +317,20 @@
#define VI6_UIF_OFFSET 0x100
#define VI6_UIF_DISCOM_DOCMCR 0x1c00
-#define VI6_UIF_DISCOM_DOCMCR_CMPRU (1 << 16)
-#define VI6_UIF_DISCOM_DOCMCR_CMPR (1 << 0)
+#define VI6_UIF_DISCOM_DOCMCR_CMPRU BIT(16)
+#define VI6_UIF_DISCOM_DOCMCR_CMPR BIT(0)
#define VI6_UIF_DISCOM_DOCMSTR 0x1c04
-#define VI6_UIF_DISCOM_DOCMSTR_CMPPRE (1 << 1)
-#define VI6_UIF_DISCOM_DOCMSTR_CMPST (1 << 0)
+#define VI6_UIF_DISCOM_DOCMSTR_CMPPRE BIT(1)
+#define VI6_UIF_DISCOM_DOCMSTR_CMPST BIT(0)
#define VI6_UIF_DISCOM_DOCMCLSTR 0x1c08
-#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLPRE (1 << 1)
-#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLST (1 << 0)
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLPRE BIT(1)
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLST BIT(0)
#define VI6_UIF_DISCOM_DOCMIENR 0x1c0c
-#define VI6_UIF_DISCOM_DOCMIENR_CMPPREIEN (1 << 1)
-#define VI6_UIF_DISCOM_DOCMIENR_CMPIEN (1 << 0)
+#define VI6_UIF_DISCOM_DOCMIENR_CMPPREIEN BIT(1)
+#define VI6_UIF_DISCOM_DOCMIENR_CMPIEN BIT(0)
#define VI6_UIF_DISCOM_DOCMMDR 0x1c10
#define VI6_UIF_DISCOM_DOCMMDR_INTHRH(n) ((n) << 16)
@@ -338,7 +338,7 @@
#define VI6_UIF_DISCOM_DOCMPMR 0x1c14
#define VI6_UIF_DISCOM_DOCMPMR_CMPDFF(n) ((n) << 17)
#define VI6_UIF_DISCOM_DOCMPMR_CMPDFA(n) ((n) << 8)
-#define VI6_UIF_DISCOM_DOCMPMR_CMPDAUF (1 << 7)
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDAUF BIT(7)
#define VI6_UIF_DISCOM_DOCMPMR_SEL(n) ((n) << 0)
#define VI6_UIF_DISCOM_DOCMECRCR 0x1c18
@@ -365,7 +365,7 @@
#define VI6_DPR_HSI_ROUTE 0x2048
#define VI6_DPR_BRU_ROUTE 0x204c
#define VI6_DPR_ILV_BRS_ROUTE 0x2050
-#define VI6_DPR_ROUTE_BRSSEL (1 << 28)
+#define VI6_DPR_ROUTE_BRSSEL BIT(28)
#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
#define VI6_DPR_ROUTE_FXA_SHIFT 16
#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
@@ -407,10 +407,10 @@
#define VI6_SRU_CTRL0_PARAM1_MASK (0x1f << 8)
#define VI6_SRU_CTRL0_PARAM1_SHIFT 8
#define VI6_SRU_CTRL0_MODE_UPSCALE (4 << 4)
-#define VI6_SRU_CTRL0_PARAM2 (1 << 3)
-#define VI6_SRU_CTRL0_PARAM3 (1 << 2)
-#define VI6_SRU_CTRL0_PARAM4 (1 << 1)
-#define VI6_SRU_CTRL0_EN (1 << 0)
+#define VI6_SRU_CTRL0_PARAM2 BIT(3)
+#define VI6_SRU_CTRL0_PARAM3 BIT(2)
+#define VI6_SRU_CTRL0_PARAM4 BIT(1)
+#define VI6_SRU_CTRL0_EN BIT(0)
#define VI6_SRU_CTRL1 0x2204
#define VI6_SRU_CTRL1_PARAM5 0x7ff
@@ -427,18 +427,18 @@
#define VI6_UDS_OFFSET 0x100
#define VI6_UDS_CTRL 0x2300
-#define VI6_UDS_CTRL_AMD (1 << 30)
-#define VI6_UDS_CTRL_FMD (1 << 29)
-#define VI6_UDS_CTRL_BLADV (1 << 28)
-#define VI6_UDS_CTRL_AON (1 << 25)
-#define VI6_UDS_CTRL_ATHON (1 << 24)
-#define VI6_UDS_CTRL_BC (1 << 20)
-#define VI6_UDS_CTRL_NE_A (1 << 19)
-#define VI6_UDS_CTRL_NE_RCR (1 << 18)
-#define VI6_UDS_CTRL_NE_GY (1 << 17)
-#define VI6_UDS_CTRL_NE_BCB (1 << 16)
-#define VI6_UDS_CTRL_AMDSLH (1 << 2)
-#define VI6_UDS_CTRL_TDIPC (1 << 1)
+#define VI6_UDS_CTRL_AMD BIT(30)
+#define VI6_UDS_CTRL_FMD BIT(29)
+#define VI6_UDS_CTRL_BLADV BIT(28)
+#define VI6_UDS_CTRL_AON BIT(25)
+#define VI6_UDS_CTRL_ATHON BIT(24)
+#define VI6_UDS_CTRL_BC BIT(20)
+#define VI6_UDS_CTRL_NE_A BIT(19)
+#define VI6_UDS_CTRL_NE_RCR BIT(18)
+#define VI6_UDS_CTRL_NE_GY BIT(17)
+#define VI6_UDS_CTRL_NE_BCB BIT(16)
+#define VI6_UDS_CTRL_AMDSLH BIT(2)
+#define VI6_UDS_CTRL_TDIPC BIT(1)
#define VI6_UDS_SCALE 0x2304
#define VI6_UDS_SCALE_HMANT_MASK (0xf << 28)
@@ -477,12 +477,12 @@
#define VI6_UDS_HPHASE_HEDP_SHIFT 0
#define VI6_UDS_IPC 0x2318
-#define VI6_UDS_IPC_FIELD (1 << 27)
+#define VI6_UDS_IPC_FIELD BIT(27)
#define VI6_UDS_IPC_VEDP_MASK (0xfff << 0)
#define VI6_UDS_IPC_VEDP_SHIFT 0
#define VI6_UDS_HSZCLIP 0x231c
-#define VI6_UDS_HSZCLIP_HCEN (1 << 28)
+#define VI6_UDS_HSZCLIP_HCEN BIT(28)
#define VI6_UDS_HSZCLIP_HCL_OFST_MASK (0xff << 16)
#define VI6_UDS_HSZCLIP_HCL_OFST_SHIFT 16
#define VI6_UDS_HSZCLIP_HCL_SIZE_MASK (0x1fff << 0)
@@ -507,36 +507,36 @@
*/
#define VI6_LUT_CTRL 0x2800
-#define VI6_LUT_CTRL_EN (1 << 0)
+#define VI6_LUT_CTRL_EN BIT(0)
/* -----------------------------------------------------------------------------
* CLU Control Registers
*/
#define VI6_CLU_CTRL 0x2900
-#define VI6_CLU_CTRL_AAI (1 << 28)
-#define VI6_CLU_CTRL_MVS (1 << 24)
+#define VI6_CLU_CTRL_AAI BIT(28)
+#define VI6_CLU_CTRL_MVS BIT(24)
#define VI6_CLU_CTRL_AX1I_2D (3 << 14)
#define VI6_CLU_CTRL_AX2I_2D (1 << 12)
#define VI6_CLU_CTRL_OS0_2D (3 << 8)
#define VI6_CLU_CTRL_OS1_2D (1 << 6)
#define VI6_CLU_CTRL_OS2_2D (3 << 4)
-#define VI6_CLU_CTRL_M2D (1 << 1)
-#define VI6_CLU_CTRL_EN (1 << 0)
+#define VI6_CLU_CTRL_M2D BIT(1)
+#define VI6_CLU_CTRL_EN BIT(0)
/* -----------------------------------------------------------------------------
* HST Control Registers
*/
#define VI6_HST_CTRL 0x2a00
-#define VI6_HST_CTRL_EN (1 << 0)
+#define VI6_HST_CTRL_EN BIT(0)
/* -----------------------------------------------------------------------------
* HSI Control Registers
*/
#define VI6_HSI_CTRL 0x2b00
-#define VI6_HSI_CTRL_EN (1 << 0)
+#define VI6_HSI_CTRL_EN BIT(0)
/* -----------------------------------------------------------------------------
* BRS and BRU Control Registers
@@ -563,7 +563,7 @@
#define VI6_BRS_BASE 0x3900
#define VI6_BRU_INCTRL 0x0000
-#define VI6_BRU_INCTRL_NRM (1 << 28)
+#define VI6_BRU_INCTRL_NRM BIT(28)
#define VI6_BRU_INCTRL_DnON (1 << (16 + (n)))
#define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4))
#define VI6_BRU_INCTRL_DITHn_18BPP (1 << ((n) * 4))
@@ -597,7 +597,7 @@
#define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0
#define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4))
-#define VI6_BRU_CTRL_RBC (1 << 31)
+#define VI6_BRU_CTRL_RBC BIT(31)
#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
#define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20)
#define VI6_BRU_CTRL_DSTSEL_MASK (7 << 20)
@@ -610,7 +610,7 @@
#define VI6_BRU_CTRL_AROP_MASK (0xf << 0)
#define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4))
-#define VI6_BRU_BLD_CBES (1 << 31)
+#define VI6_BRU_BLD_CBES BIT(31)
#define VI6_BRU_BLD_CCMDX_DST_A (0 << 28)
#define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28)
#define VI6_BRU_BLD_CCMDX_SRC_A (2 << 28)
@@ -624,7 +624,7 @@
#define VI6_BRU_BLD_CCMDY_COEFY (4 << 24)
#define VI6_BRU_BLD_CCMDY_MASK (7 << 24)
#define VI6_BRU_BLD_CCMDY_SHIFT 24
-#define VI6_BRU_BLD_ABES (1 << 23)
+#define VI6_BRU_BLD_ABES BIT(23)
#define VI6_BRU_BLD_ACMDX_DST_A (0 << 20)
#define VI6_BRU_BLD_ACMDX_255_DST_A (1 << 20)
#define VI6_BRU_BLD_ACMDX_SRC_A (2 << 20)
@@ -662,11 +662,11 @@
#define VI6_HGO_SIZE_HSIZE_SHIFT 16
#define VI6_HGO_SIZE_VSIZE_SHIFT 0
#define VI6_HGO_MODE 0x3008
-#define VI6_HGO_MODE_STEP (1 << 10)
-#define VI6_HGO_MODE_MAXRGB (1 << 7)
-#define VI6_HGO_MODE_OFSB_R (1 << 6)
-#define VI6_HGO_MODE_OFSB_G (1 << 5)
-#define VI6_HGO_MODE_OFSB_B (1 << 4)
+#define VI6_HGO_MODE_STEP BIT(10)
+#define VI6_HGO_MODE_MAXRGB BIT(7)
+#define VI6_HGO_MODE_OFSB_R BIT(6)
+#define VI6_HGO_MODE_OFSB_G BIT(5)
+#define VI6_HGO_MODE_OFSB_B BIT(4)
#define VI6_HGO_MODE_HRATIO_SHIFT 2
#define VI6_HGO_MODE_VRATIO_SHIFT 0
#define VI6_HGO_LB_TH 0x300c
@@ -687,7 +687,7 @@
#define VI6_HGO_EXT_HIST_ADDR 0x335c
#define VI6_HGO_EXT_HIST_DATA 0x3360
#define VI6_HGO_REGRST 0x33fc
-#define VI6_HGO_REGRST_RCLEA (1 << 0)
+#define VI6_HGO_REGRST_RCLEA BIT(0)
/* -----------------------------------------------------------------------------
* HGT Control Registers
@@ -713,7 +713,7 @@
#define VI6_HGT_SUM 0x3754
#define VI6_HGT_LB_DET 0x3758
#define VI6_HGT_REGRST 0x37fc
-#define VI6_HGT_REGRST_RCLEA (1 << 0)
+#define VI6_HGT_REGRST_RCLEA BIT(0)
/* -----------------------------------------------------------------------------
* LIF Control Registers
@@ -724,9 +724,9 @@
#define VI6_LIF_CTRL 0x3b00
#define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16)
#define VI6_LIF_CTRL_OBTH_SHIFT 16
-#define VI6_LIF_CTRL_CFMT (1 << 4)
-#define VI6_LIF_CTRL_REQSEL (1 << 1)
-#define VI6_LIF_CTRL_LIF_EN (1 << 0)
+#define VI6_LIF_CTRL_CFMT BIT(4)
+#define VI6_LIF_CTRL_REQSEL BIT(1)
+#define VI6_LIF_CTRL_LIF_EN BIT(0)
#define VI6_LIF_CSBTH 0x3b04
#define VI6_LIF_CSBTH_HBTH_MASK (0x7ff << 16)
@@ -735,7 +735,7 @@
#define VI6_LIF_CSBTH_LBTH_SHIFT 0
#define VI6_LIF_LBA 0x3b0c
-#define VI6_LIF_LBA_LBA0 (1 << 31)
+#define VI6_LIF_LBA_LBA0 BIT(31)
#define VI6_LIF_LBA_LBA1_MASK (0xfff << 16)
#define VI6_LIF_LBA_LBA1_SHIFT 16
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index fd98e483b2f4..5e59ed2c3614 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -956,12 +956,6 @@ vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
| V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_VIDEO_OUTPUT_MPLANE;
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
- | V4L2_CAP_STREAMING;
- else
- cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
- | V4L2_CAP_STREAMING;
strscpy(cap->driver, "vsp1", sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
@@ -1268,11 +1262,15 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
video->pad.flags = MEDIA_PAD_FL_SOURCE;
video->video.vfl_dir = VFL_DIR_TX;
+ video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_STREAMING;
} else {
direction = "output";
video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
video->pad.flags = MEDIA_PAD_FL_SINK;
video->video.vfl_dir = VFL_DIR_RX;
+ video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
}
mutex_init(&video->lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index c9d5fdb2d407..b211380a11f2 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -491,15 +491,8 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- cap->device_caps = V4L2_CAP_STREAMING;
-
- if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- else
- cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
-
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS
- | dma->xdev->v4l2_caps;
+ cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
+ V4L2_CAP_DEVICE_CAPS;
strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
strscpy(cap->card, dma->video.name, sizeof(cap->card));
@@ -524,8 +517,6 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return -EINVAL;
f->pixelformat = dma->format.pixelformat;
- strscpy(f->description, dma->fmtinfo->description,
- sizeof(f->description));
return 0;
}
@@ -700,6 +691,11 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
dma->video.release = video_device_release_empty;
dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
dma->video.lock = &dma->lock;
+ dma->video.device_caps = V4L2_CAP_STREAMING;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else
+ dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
video_set_drvdata(&dma->video, dma);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 08a825c3a3f6..6ad61b08a31a 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -25,21 +25,21 @@
static const struct xvip_video_format xvip_video_formats[] = {
{ XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
- 2, V4L2_PIX_FMT_YUYV, "4:2:2, packed, YUYV" },
+ 2, V4L2_PIX_FMT_YUYV },
{ XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
- 3, V4L2_PIX_FMT_YUV444, "4:4:4, packed, YUYV" },
+ 3, V4L2_PIX_FMT_YUV444 },
{ XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
- 3, 0, NULL },
+ 3, 0 },
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
- 1, V4L2_PIX_FMT_GREY, "Greyscale 8-bit" },
+ 1, V4L2_PIX_FMT_GREY },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, V4L2_PIX_FMT_SRGGB8, "Bayer 8-bit RGGB" },
+ 1, V4L2_PIX_FMT_SRGGB8 },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
- 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit GRBG" },
+ 1, V4L2_PIX_FMT_SGRBG8 },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
- 1, V4L2_PIX_FMT_SGBRG8, "Bayer 8-bit GBRG" },
+ 1, V4L2_PIX_FMT_SGBRG8 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
- 1, V4L2_PIX_FMT_SBGGR8, "Bayer 8-bit BGGR" },
+ 1, V4L2_PIX_FMT_SBGGR8 },
};
/**
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index ba939dd52818..f71e2b650453 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -12,6 +12,7 @@
#ifndef __XILINX_VIP_H__
#define __XILINX_VIP_H__
+#include <linux/bitops.h>
#include <linux/io.h>
#include <media/v4l2-subdev.h>
@@ -35,23 +36,23 @@ struct clk;
/* Xilinx Video IP Control Registers */
#define XVIP_CTRL_CONTROL 0x0000
-#define XVIP_CTRL_CONTROL_SW_ENABLE (1 << 0)
-#define XVIP_CTRL_CONTROL_REG_UPDATE (1 << 1)
-#define XVIP_CTRL_CONTROL_BYPASS (1 << 4)
-#define XVIP_CTRL_CONTROL_TEST_PATTERN (1 << 5)
-#define XVIP_CTRL_CONTROL_FRAME_SYNC_RESET (1 << 30)
-#define XVIP_CTRL_CONTROL_SW_RESET (1 << 31)
+#define XVIP_CTRL_CONTROL_SW_ENABLE BIT(0)
+#define XVIP_CTRL_CONTROL_REG_UPDATE BIT(1)
+#define XVIP_CTRL_CONTROL_BYPASS BIT(4)
+#define XVIP_CTRL_CONTROL_TEST_PATTERN BIT(5)
+#define XVIP_CTRL_CONTROL_FRAME_SYNC_RESET BIT(30)
+#define XVIP_CTRL_CONTROL_SW_RESET BIT(31)
#define XVIP_CTRL_STATUS 0x0004
-#define XVIP_CTRL_STATUS_PROC_STARTED (1 << 0)
-#define XVIP_CTRL_STATUS_EOF (1 << 1)
+#define XVIP_CTRL_STATUS_PROC_STARTED BIT(0)
+#define XVIP_CTRL_STATUS_EOF BIT(1)
#define XVIP_CTRL_ERROR 0x0008
-#define XVIP_CTRL_ERROR_SLAVE_EOL_EARLY (1 << 0)
-#define XVIP_CTRL_ERROR_SLAVE_EOL_LATE (1 << 1)
-#define XVIP_CTRL_ERROR_SLAVE_SOF_EARLY (1 << 2)
-#define XVIP_CTRL_ERROR_SLAVE_SOF_LATE (1 << 3)
+#define XVIP_CTRL_ERROR_SLAVE_EOL_EARLY BIT(0)
+#define XVIP_CTRL_ERROR_SLAVE_EOL_LATE BIT(1)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_EARLY BIT(2)
+#define XVIP_CTRL_ERROR_SLAVE_SOF_LATE BIT(3)
#define XVIP_CTRL_IRQ_ENABLE 0x000c
-#define XVIP_CTRL_IRQ_ENABLE_PROC_STARTED (1 << 0)
-#define XVIP_CTRL_IRQ_EOF (1 << 1)
+#define XVIP_CTRL_IRQ_ENABLE_PROC_STARTED BIT(0)
+#define XVIP_CTRL_IRQ_EOF BIT(1)
#define XVIP_CTRL_VERSION 0x0010
#define XVIP_CTRL_VERSION_MAJOR_MASK (0xff << 24)
#define XVIP_CTRL_VERSION_MAJOR_SHIFT 24
@@ -108,7 +109,6 @@ struct xvip_device {
* @code: media bus format code
* @bpp: bytes per pixel (when stored in memory)
* @fourcc: V4L2 pixel format FCC identifier
- * @description: format description, suitable for userspace
*/
struct xvip_video_format {
unsigned int vf_code;
@@ -117,7 +117,6 @@ struct xvip_video_format {
unsigned int code;
unsigned int bpp;
u32 fourcc;
- const char *description;
};
const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index edce0402155d..cc2856efea59 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -385,9 +385,9 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
asd = v4l2_async_notifier_add_fwnode_subdev(
&xdev->notifier, remote,
sizeof(struct xvip_graph_entity));
+ fwnode_handle_put(remote);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
- fwnode_handle_put(remote);
goto err_notifier_cleanup;
}
}
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 06400112aebb..a532f63aa9d9 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -125,7 +125,7 @@ struct gemtek {
#define BU2614_FMUN_SHIFT (BU2614_VOID2_BITS + BU2614_VOID2_SHIFT)
#define BU2614_TEST_SHIFT (BU2614_FMUN_BITS + BU2614_FMUN_SHIFT)
-#define MKMASK(field) (((1<<BU2614_##field##_BITS) - 1) << \
+#define MKMASK(field) (((1UL<<BU2614_##field##_BITS) - 1) << \
BU2614_##field##_SHIFT)
#define BU2614_PORT_MASK MKMASK(PORT)
#define BU2614_FREQ_MASK MKMASK(FREQ)
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 2fc009509c7c..dfb8b62f0e2b 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -16,7 +16,6 @@
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
-#include <stdarg.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 7d53422b3b56..7541698a0be1 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -330,8 +330,7 @@ end:
/*
* si470x_i2c_probe - probe for the device
*/
-static int si470x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si470x_i2c_probe(struct i2c_client *client)
{
struct si470x_device *radio;
int retval = 0;
@@ -544,7 +543,7 @@ static struct i2c_driver si470x_i2c_driver = {
.pm = &si470x_i2c_pm,
#endif
},
- .probe = si470x_i2c_probe,
+ .probe_new = si470x_i2c_probe,
.remove = si470x_i2c_remove,
.id_table = si470x_i2c_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 49073747b1e7..fedff68d8c49 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -734,7 +734,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* start radio */
retval = si470x_start_usb(radio);
if (retval < 0)
- goto err_all;
+ goto err_buf;
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
@@ -749,6 +749,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
return 0;
err_all:
+ usb_kill_urb(radio->int_in_urb);
+err_buf:
kfree(radio->buffer);
err_ctrl:
v4l2_ctrl_handler_free(&radio->hdl);
@@ -822,6 +824,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
mutex_lock(&radio->lock);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
+ usb_kill_urb(radio->int_in_urb);
usb_set_intfdata(intf, NULL);
mutex_unlock(&radio->lock);
v4l2_device_put(&radio->v4l2_dev);
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 7d97de2fa56c..7f3aee495ed3 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1427,8 +1427,7 @@ static const struct v4l2_ctrl_config si4713_alt_freqs_ctrl = {
* I2C driver interface
*/
/* si4713_probe - probe for the device */
-static int si4713_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si4713_probe(struct i2c_client *client)
{
struct si4713_device *sdev;
struct v4l2_ctrl_handler *hdl;
@@ -1660,7 +1659,7 @@ static struct i2c_driver si4713_i2c_driver = {
.name = "si4713",
.of_match_table = of_match_ptr(si4713_of_match),
},
- .probe = si4713_probe,
+ .probe_new = si4713_probe,
.remove = si4713_remove,
.id_table = si4713_id,
};
diff --git a/drivers/media/radio/wl128x/fmdrv_common.h b/drivers/media/radio/wl128x/fmdrv_common.h
index 7d7a2b17aa76..6a287eadae75 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.h
+++ b/drivers/media/radio/wl128x/fmdrv_common.h
@@ -159,18 +159,18 @@ struct fm_event_msg_hdr {
#define FM_DISABLE 0
/* FLAG_GET register bits */
-#define FM_FR_EVENT (1 << 0)
-#define FM_BL_EVENT (1 << 1)
-#define FM_RDS_EVENT (1 << 2)
-#define FM_BBLK_EVENT (1 << 3)
-#define FM_LSYNC_EVENT (1 << 4)
-#define FM_LEV_EVENT (1 << 5)
-#define FM_IFFR_EVENT (1 << 6)
-#define FM_PI_EVENT (1 << 7)
-#define FM_PD_EVENT (1 << 8)
-#define FM_STIC_EVENT (1 << 9)
-#define FM_MAL_EVENT (1 << 10)
-#define FM_POW_ENB_EVENT (1 << 11)
+#define FM_FR_EVENT BIT(0)
+#define FM_BL_EVENT BIT(1)
+#define FM_RDS_EVENT BIT(2)
+#define FM_BBLK_EVENT BIT(3)
+#define FM_LSYNC_EVENT BIT(4)
+#define FM_LEV_EVENT BIT(5)
+#define FM_IFFR_EVENT BIT(6)
+#define FM_PI_EVENT BIT(7)
+#define FM_PD_EVENT BIT(8)
+#define FM_STIC_EVENT BIT(9)
+#define FM_MAL_EVENT BIT(10)
+#define FM_POW_ENB_EVENT BIT(11)
/*
* Firmware files of FM. ASIC ID and ASIC version will be appened to this,
@@ -268,38 +268,38 @@ struct fm_event_msg_hdr {
* Represents an RDS group type & version.
* There are 15 groups, each group has 2 versions: A and B.
*/
-#define FM_RDS_GROUP_TYPE_MASK_0A ((unsigned long)1<<0)
-#define FM_RDS_GROUP_TYPE_MASK_0B ((unsigned long)1<<1)
-#define FM_RDS_GROUP_TYPE_MASK_1A ((unsigned long)1<<2)
-#define FM_RDS_GROUP_TYPE_MASK_1B ((unsigned long)1<<3)
-#define FM_RDS_GROUP_TYPE_MASK_2A ((unsigned long)1<<4)
-#define FM_RDS_GROUP_TYPE_MASK_2B ((unsigned long)1<<5)
-#define FM_RDS_GROUP_TYPE_MASK_3A ((unsigned long)1<<6)
-#define FM_RDS_GROUP_TYPE_MASK_3B ((unsigned long)1<<7)
-#define FM_RDS_GROUP_TYPE_MASK_4A ((unsigned long)1<<8)
-#define FM_RDS_GROUP_TYPE_MASK_4B ((unsigned long)1<<9)
-#define FM_RDS_GROUP_TYPE_MASK_5A ((unsigned long)1<<10)
-#define FM_RDS_GROUP_TYPE_MASK_5B ((unsigned long)1<<11)
-#define FM_RDS_GROUP_TYPE_MASK_6A ((unsigned long)1<<12)
-#define FM_RDS_GROUP_TYPE_MASK_6B ((unsigned long)1<<13)
-#define FM_RDS_GROUP_TYPE_MASK_7A ((unsigned long)1<<14)
-#define FM_RDS_GROUP_TYPE_MASK_7B ((unsigned long)1<<15)
-#define FM_RDS_GROUP_TYPE_MASK_8A ((unsigned long)1<<16)
-#define FM_RDS_GROUP_TYPE_MASK_8B ((unsigned long)1<<17)
-#define FM_RDS_GROUP_TYPE_MASK_9A ((unsigned long)1<<18)
-#define FM_RDS_GROUP_TYPE_MASK_9B ((unsigned long)1<<19)
-#define FM_RDS_GROUP_TYPE_MASK_10A ((unsigned long)1<<20)
-#define FM_RDS_GROUP_TYPE_MASK_10B ((unsigned long)1<<21)
-#define FM_RDS_GROUP_TYPE_MASK_11A ((unsigned long)1<<22)
-#define FM_RDS_GROUP_TYPE_MASK_11B ((unsigned long)1<<23)
-#define FM_RDS_GROUP_TYPE_MASK_12A ((unsigned long)1<<24)
-#define FM_RDS_GROUP_TYPE_MASK_12B ((unsigned long)1<<25)
-#define FM_RDS_GROUP_TYPE_MASK_13A ((unsigned long)1<<26)
-#define FM_RDS_GROUP_TYPE_MASK_13B ((unsigned long)1<<27)
-#define FM_RDS_GROUP_TYPE_MASK_14A ((unsigned long)1<<28)
-#define FM_RDS_GROUP_TYPE_MASK_14B ((unsigned long)1<<29)
-#define FM_RDS_GROUP_TYPE_MASK_15A ((unsigned long)1<<30)
-#define FM_RDS_GROUP_TYPE_MASK_15B ((unsigned long)1<<31)
+#define FM_RDS_GROUP_TYPE_MASK_0A BIT(0)
+#define FM_RDS_GROUP_TYPE_MASK_0B BIT(1)
+#define FM_RDS_GROUP_TYPE_MASK_1A BIT(2)
+#define FM_RDS_GROUP_TYPE_MASK_1B BIT(3)
+#define FM_RDS_GROUP_TYPE_MASK_2A BIT(4)
+#define FM_RDS_GROUP_TYPE_MASK_2B BIT(5)
+#define FM_RDS_GROUP_TYPE_MASK_3A BIT(6)
+#define FM_RDS_GROUP_TYPE_MASK_3B BIT(7)
+#define FM_RDS_GROUP_TYPE_MASK_4A BIT(8)
+#define FM_RDS_GROUP_TYPE_MASK_4B BIT(9)
+#define FM_RDS_GROUP_TYPE_MASK_5A BIT(10)
+#define FM_RDS_GROUP_TYPE_MASK_5B BIT(11)
+#define FM_RDS_GROUP_TYPE_MASK_6A BIT(12)
+#define FM_RDS_GROUP_TYPE_MASK_6B BIT(13)
+#define FM_RDS_GROUP_TYPE_MASK_7A BIT(14)
+#define FM_RDS_GROUP_TYPE_MASK_7B BIT(15)
+#define FM_RDS_GROUP_TYPE_MASK_8A BIT(16)
+#define FM_RDS_GROUP_TYPE_MASK_8B BIT(17)
+#define FM_RDS_GROUP_TYPE_MASK_9A BIT(18)
+#define FM_RDS_GROUP_TYPE_MASK_9B BIT(19)
+#define FM_RDS_GROUP_TYPE_MASK_10A BIT(20)
+#define FM_RDS_GROUP_TYPE_MASK_10B BIT(21)
+#define FM_RDS_GROUP_TYPE_MASK_11A BIT(22)
+#define FM_RDS_GROUP_TYPE_MASK_11B BIT(23)
+#define FM_RDS_GROUP_TYPE_MASK_12A BIT(24)
+#define FM_RDS_GROUP_TYPE_MASK_12B BIT(25)
+#define FM_RDS_GROUP_TYPE_MASK_13A BIT(26)
+#define FM_RDS_GROUP_TYPE_MASK_13B BIT(27)
+#define FM_RDS_GROUP_TYPE_MASK_14A BIT(28)
+#define FM_RDS_GROUP_TYPE_MASK_14B BIT(29)
+#define FM_RDS_GROUP_TYPE_MASK_15A BIT(30)
+#define FM_RDS_GROUP_TYPE_MASK_15B BIT(31)
/* RX Alternate Frequency info */
#define FM_RDS_MIN_AF 1
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index ea05e125016a..872d6441e512 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -413,6 +413,10 @@ static int iguanair_probe(struct usb_interface *intf,
int ret, pipein, pipeout;
struct usb_host_interface *idesc;
+ idesc = intf->altsetting;
+ if (idesc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!ir || !rc) {
@@ -427,18 +431,13 @@ static int iguanair_probe(struct usb_interface *intf,
ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
- if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
+ if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
+ !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
+ !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
ret = -ENOMEM;
goto out;
}
- idesc = intf->altsetting;
-
- if (idesc->desc.bNumEndpoints < 2) {
- ret = -ENODEV;
- goto out;
- }
-
ir->rc = rc;
ir->dev = &intf->dev;
ir->udev = udev;
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 7e457f26a595..094aa6a06315 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -81,10 +81,8 @@ static int img_ir_probe(struct platform_device *pdev)
/* Get resources from platform device */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "cannot find IRQ resource\n");
+ if (irq < 0)
return irq;
- }
/* Private driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7bee72108b0e..37a850421fbb 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1826,12 +1826,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
break;
/* iMON VFD, MCE IR */
case 0x46:
- case 0x7e:
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
allowed_protos = RC_PROTO_BIT_RC6_MCE;
break;
+ /* iMON VFD, iMON or MCE IR */
+ case 0x7e:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ allowed_protos |= RC_PROTO_BIT_RC6_MCE;
+ break;
/* iMON LCD, MCE IR */
case 0x9f:
dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index 25e56c5b13c0..d4aedcf76418 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -14,7 +14,7 @@ struct imon {
struct device *dev;
struct urb *ir_urb;
struct rc_dev *rcdev;
- u8 ir_buf[8] __aligned(__alignof__(u64));
+ __be64 ir_buf;
char phys[64];
};
@@ -29,14 +29,35 @@ struct imon {
static void imon_ir_data(struct imon *imon)
{
struct ir_raw_event rawir = {};
- u64 d = be64_to_cpup((__be64 *)imon->ir_buf) >> 24;
+ u64 data = be64_to_cpu(imon->ir_buf);
+ u8 packet_no = data & 0xff;
int offset = 40;
int bit;
- dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
+ if (packet_no == 0xff)
+ return;
+
+ dev_dbg(imon->dev, "data: %*ph", 8, &imon->ir_buf);
+
+ /*
+ * Only the first 5 bytes contain IR data. Right shift so we move
+ * the IR bits to the lower 40 bits.
+ */
+ data >>= 24;
do {
- bit = fls64(d & (BIT_ULL(offset) - 1));
+ /*
+ * Find highest set bit which is less or equal to offset
+ *
+ * offset is the bit above (base 0) where we start looking.
+ *
+ * data & (BIT_ULL(offset) - 1) masks off any unwanted bits,
+ * so we have just bits less than offset.
+ *
+ * fls will tell us the highest bit set plus 1 (or 0 if no
+ * bits are set).
+ */
+ bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
rawir.pulse = true;
@@ -49,7 +70,12 @@ static void imon_ir_data(struct imon *imon)
offset = bit;
}
- bit = fls64(~d & (BIT_ULL(offset) - 1));
+ /*
+ * Find highest clear bit which is less than offset.
+ *
+ * Just invert the data and use same trick as above.
+ */
+ bit = fls64(~data & (BIT_ULL(offset) - 1));
dev_dbg(imon->dev, "space: %d bits", offset - bit);
rawir.pulse = false;
@@ -59,7 +85,7 @@ static void imon_ir_data(struct imon *imon)
offset = bit;
} while (offset > 0);
- if (imon->ir_buf[7] == 0x0a) {
+ if (packet_no == 0x0a && !imon->rcdev->idle) {
ir_raw_event_set_idle(imon->rcdev, true);
ir_raw_event_handle(imon->rcdev);
}
@@ -72,8 +98,7 @@ static void imon_ir_rx(struct urb *urb)
switch (urb->status) {
case 0:
- if (imon->ir_buf[7] != 0xff)
- imon_ir_data(imon);
+ imon_ir_data(imon);
break;
case -ECONNRESET:
case -ENOENT:
@@ -129,7 +154,7 @@ static int imon_probe(struct usb_interface *intf,
imon->dev = &intf->dev;
usb_fill_int_urb(imon->ir_urb, udev,
usb_rcvintpipe(udev, ir_ep->bEndpointAddress),
- imon->ir_buf, sizeof(imon->ir_buf),
+ &imon->ir_buf, sizeof(imon->ir_buf),
imon_ir_rx, imon, ir_ep->bInterval);
rcdev = devm_rc_allocate_device(&intf->dev, RC_DRIVER_IR_RAW);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 85561f6555a2..32ccefeff57d 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -232,10 +232,8 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "irq can not get\n");
+ if (priv->irq < 0)
return priv->irq;
- }
rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev)
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 5b1399af6b3a..a56fc634d2d6 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-it913x-v1.o \
rc-it913x-v2.o \
rc-kaiomy.o \
+ rc-khadas.o \
rc-kworld-315u.o \
rc-kworld-pc150u.o \
rc-kworld-plus-tv-analog.o \
@@ -75,6 +76,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-nec-terratec-cinergy-xs.o \
rc-norwood.o \
rc-npgtech.o \
+ rc-odroid.o \
rc-pctv-sedna.o \
rc-pinnacle-color.o \
rc-pinnacle-grey.o \
@@ -94,6 +96,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
rc-tango.o \
+ rc-tanix-tx3mini.o \
+ rc-tanix-tx5max.o \
rc-tbs-nec.o \
rc-technisat-ts35.o \
rc-technisat-usb2.o \
@@ -113,8 +117,11 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
+ rc-wetek-hub.o \
+ rc-wetek-play2.o \
rc-winfast.o \
rc-winfast-usbii-deluxe.o \
rc-su3000.o \
rc-xbox-dvd.o \
+ rc-x96max.o \
rc-zx-irdec.o
diff --git a/drivers/media/rc/keymaps/rc-imon-rsc.c b/drivers/media/rc/keymaps/rc-imon-rsc.c
index 6f7ee4859682..38787dd0e4a0 100644
--- a/drivers/media/rc/keymaps/rc-imon-rsc.c
+++ b/drivers/media/rc/keymaps/rc-imon-rsc.c
@@ -7,7 +7,8 @@
//
// Note that this remote has a stick which its own IR protocol,
-// with 16 directions. This is not supported yet.
+// with 16 directions. This is supported by the imon_rsc BPF decoder
+// in v4l-utils.
//
static struct rc_map_table imon_rsc[] = {
{ 0x801010, KEY_EXIT },
@@ -25,7 +26,7 @@ static struct rc_map_table imon_rsc[] = {
{ 0x80105c, KEY_NUMERIC_9 },
{ 0x801081, KEY_SCREEN }, /* Desktop */
{ 0x80105d, KEY_NUMERIC_0 },
- { 0x801082, KEY_MAX },
+ { 0x801082, KEY_ZOOM }, /* Maximise */
{ 0x801048, KEY_ESC },
{ 0x80104b, KEY_MEDIA }, /* Windows key */
{ 0x801083, KEY_MENU },
@@ -52,7 +53,7 @@ static struct rc_map_table imon_rsc[] = {
{ 0x80104e, KEY_STOP },
{ 0x801052, KEY_REWIND },
{ 0x801053, KEY_FASTFORWARD },
- { 0x801089, KEY_ZOOM } /* full screen */
+ { 0x801089, KEY_FULL_SCREEN } /* full screen */
};
static struct rc_map_list imon_rsc_map = {
diff --git a/drivers/media/rc/keymaps/rc-khadas.c b/drivers/media/rc/keymaps/rc-khadas.c
new file mode 100644
index 000000000000..ce4938444d90
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-khadas.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+/*
+ * Keytable for the Khadas VIM/EDGE SBC remote control
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table khadas[] = {
+ { 0x14, KEY_POWER },
+
+ { 0x03, KEY_UP },
+ { 0x02, KEY_DOWN },
+ { 0x0e, KEY_LEFT },
+ { 0x1a, KEY_RIGHT },
+ { 0x07, KEY_OK },
+
+ { 0x01, KEY_BACK },
+ { 0x5b, KEY_MUTE }, // mouse
+ { 0x13, KEY_MENU },
+
+ { 0x58, KEY_VOLUMEDOWN },
+ { 0x0b, KEY_VOLUMEUP },
+
+ { 0x48, KEY_HOME },
+};
+
+static struct rc_map_list khadas_map = {
+ .map = {
+ .scan = khadas,
+ .size = ARRAY_SIZE(khadas),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_KHADAS,
+ }
+};
+
+static int __init init_rc_map_khadas(void)
+{
+ return rc_map_register(&khadas_map);
+}
+
+static void __exit exit_rc_map_khadas(void)
+{
+ rc_map_unregister(&khadas_map);
+}
+
+module_init(init_rc_map_khadas)
+module_exit(exit_rc_map_khadas)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-odroid.c b/drivers/media/rc/keymaps/rc-odroid.c
new file mode 100644
index 000000000000..c6fbb64b5c41
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-odroid.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the HardKernel ODROID remote control
+//
+
+static struct rc_map_table odroid[] = {
+ { 0xb2dc, KEY_POWER },
+
+ { 0xb288, KEY_MUTE },
+ { 0xb282, KEY_HOME },
+
+ { 0xb2ca, KEY_UP },
+ { 0xb299, KEY_LEFT },
+ { 0xb2ce, KEY_OK },
+ { 0xb2c1, KEY_RIGHT },
+ { 0xb2d2, KEY_DOWN },
+
+ { 0xb2c5, KEY_MENU },
+ { 0xb29a, KEY_BACK },
+
+ { 0xb281, KEY_VOLUMEDOWN },
+ { 0xb280, KEY_VOLUMEUP },
+};
+
+static struct rc_map_list odroid_map = {
+ .map = {
+ .scan = odroid,
+ .size = ARRAY_SIZE(odroid),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_ODROID,
+ }
+};
+
+static int __init init_rc_map_odroid(void)
+{
+ return rc_map_register(&odroid_map);
+}
+
+static void __exit exit_rc_map_odroid(void)
+{
+ rc_map_unregister(&odroid_map);
+}
+
+module_init(init_rc_map_odroid)
+module_exit(exit_rc_map_odroid)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/keymaps/rc-tanix-tx3mini.c b/drivers/media/rc/keymaps/rc-tanix-tx3mini.c
new file mode 100644
index 000000000000..d486cd69afb2
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-tanix-tx3mini.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2018 Christian Hewitt
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Tanix TX3 mini STB remote control
+ */
+
+static struct rc_map_table tanix_tx3mini[] = {
+ { 0x8051, KEY_POWER },
+ { 0x804d, KEY_MUTE },
+
+ { 0x8009, KEY_RED },
+ { 0x8011, KEY_GREEN },
+ { 0x8054, KEY_YELLOW },
+ { 0x804f, KEY_BLUE },
+
+ { 0x8056, KEY_VOLUMEDOWN },
+ { 0x80bd, KEY_PREVIOUS },
+ { 0x80bb, KEY_NEXT },
+ { 0x804e, KEY_VOLUMEUP },
+
+ { 0x8053, KEY_HOME },
+ { 0x801b, KEY_BACK },
+
+ { 0x8026, KEY_UP },
+ { 0x8028, KEY_DOWN },
+ { 0x8025, KEY_LEFT },
+ { 0x8027, KEY_RIGHT },
+ { 0x800d, KEY_OK },
+
+ { 0x8049, KEY_MENU },
+ { 0x8052, KEY_EPG }, // mouse
+
+ { 0x8031, KEY_1 },
+ { 0x8032, KEY_2 },
+ { 0x8033, KEY_3 },
+
+ { 0x8034, KEY_4 },
+ { 0x8035, KEY_5 },
+ { 0x8036, KEY_6 },
+
+ { 0x8037, KEY_7 },
+ { 0x8038, KEY_8 },
+ { 0x8039, KEY_9 },
+
+ { 0x8058, KEY_SUBTITLE }, // 1/a
+ { 0x8030, KEY_0 },
+ { 0x8044, KEY_DELETE },
+};
+
+static struct rc_map_list tanix_tx3mini_map = {
+ .map = {
+ .scan = tanix_tx3mini,
+ .size = ARRAY_SIZE(tanix_tx3mini),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_TANIX_TX3MINI,
+ }
+};
+
+static int __init init_rc_map_tanix_tx3mini(void)
+{
+ return rc_map_register(&tanix_tx3mini_map);
+}
+
+static void __exit exit_rc_map_tanix_tx3mini(void)
+{
+ rc_map_unregister(&tanix_tx3mini_map);
+}
+
+module_init(init_rc_map_tanix_tx3mini)
+module_exit(exit_rc_map_tanix_tx3mini)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-tanix-tx5max.c b/drivers/media/rc/keymaps/rc-tanix-tx5max.c
new file mode 100644
index 000000000000..59aaabed80dd
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-tanix-tx5max.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2018 Christian Hewitt
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Tanix TX5 max STB remote control
+ */
+
+static struct rc_map_table tanix_tx5max[] = {
+ { 0x40404d, KEY_POWER },
+ { 0x404043, KEY_MUTE },
+
+ { 0x404017, KEY_VOLUMEDOWN },
+ { 0x404018, KEY_VOLUMEUP },
+
+ { 0x40400b, KEY_UP },
+ { 0x404010, KEY_LEFT },
+ { 0x404011, KEY_RIGHT },
+ { 0x40400e, KEY_DOWN },
+ { 0x40400d, KEY_OK },
+
+ { 0x40401a, KEY_HOME },
+ { 0x404045, KEY_MENU },
+ { 0x404042, KEY_BACK },
+
+ { 0x404001, KEY_1 },
+ { 0x404002, KEY_2 },
+ { 0x404003, KEY_3 },
+
+ { 0x404004, KEY_4 },
+ { 0x404005, KEY_5 },
+ { 0x404006, KEY_6 },
+
+ { 0x404007, KEY_7 },
+ { 0x404008, KEY_8 },
+ { 0x404009, KEY_9 },
+
+ { 0x404047, KEY_SUBTITLE }, // mouse
+ { 0x404000, KEY_0 },
+ { 0x40400c, KEY_DELETE },
+};
+
+static struct rc_map_list tanix_tx5max_map = {
+ .map = {
+ .scan = tanix_tx5max,
+ .size = ARRAY_SIZE(tanix_tx5max),
+ .rc_proto = RC_PROTO_NECX,
+ .name = RC_MAP_TANIX_TX5MAX,
+ }
+};
+
+static int __init init_rc_map_tanix_tx5max(void)
+{
+ return rc_map_register(&tanix_tx5max_map);
+}
+
+static void __exit exit_rc_map_tanix_tx5max(void)
+{
+ rc_map_unregister(&tanix_tx5max_map);
+}
+
+module_init(init_rc_map_tanix_tx5max)
+module_exit(exit_rc_map_tanix_tx5max)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-wetek-hub.c b/drivers/media/rc/keymaps/rc-wetek-hub.c
new file mode 100644
index 000000000000..b5a21aff45f5
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-wetek-hub.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2018 Christian Hewitt
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * This keymap is used with the WeTek Hub STB.
+ */
+
+static struct rc_map_table wetek_hub[] = {
+ { 0x77f1, KEY_POWER },
+
+ { 0x77f2, KEY_HOME },
+ { 0x77f3, KEY_MUTE }, // mouse
+
+ { 0x77f4, KEY_UP },
+ { 0x77f5, KEY_DOWN },
+ { 0x77f6, KEY_LEFT },
+ { 0x77f7, KEY_RIGHT },
+ { 0x77f8, KEY_OK },
+
+ { 0x77f9, KEY_BACK },
+ { 0x77fa, KEY_MENU },
+
+ { 0x77fb, KEY_VOLUMEUP },
+ { 0x77fc, KEY_VOLUMEDOWN },
+};
+
+static struct rc_map_list wetek_hub_map = {
+ .map = {
+ .scan = wetek_hub,
+ .size = ARRAY_SIZE(wetek_hub),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_WETEK_HUB,
+ }
+};
+
+static int __init init_rc_map_wetek_hub(void)
+{
+ return rc_map_register(&wetek_hub_map);
+}
+
+static void __exit exit_rc_map_wetek_hub(void)
+{
+ rc_map_unregister(&wetek_hub_map);
+}
+
+module_init(init_rc_map_wetek_hub)
+module_exit(exit_rc_map_wetek_hub)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-wetek-play2.c b/drivers/media/rc/keymaps/rc-wetek-play2.c
new file mode 100644
index 000000000000..bbbb11fa3c11
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-wetek-play2.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the WeTek Play 2 STB remote control
+//
+
+static struct rc_map_table wetek_play2[] = {
+ { 0x5e5f02, KEY_POWER },
+ { 0x5e5f46, KEY_SLEEP }, // tv
+ { 0x5e5f10, KEY_MUTE },
+
+ { 0x5e5f22, KEY_1 },
+ { 0x5e5f23, KEY_2 },
+ { 0x5e5f24, KEY_3 },
+
+ { 0x5e5f25, KEY_4 },
+ { 0x5e5f26, KEY_5 },
+ { 0x5e5f27, KEY_6 },
+
+ { 0x5e5f28, KEY_7 },
+ { 0x5e5f29, KEY_8 },
+ { 0x5e5f30, KEY_9 },
+
+ { 0x5e5f71, KEY_BACK },
+ { 0x5e5f21, KEY_0 },
+ { 0x5e5f72, KEY_CAPSLOCK },
+
+ // outer ring clockwide from top
+ { 0x5e5f03, KEY_HOME },
+ { 0x5e5f61, KEY_BACK },
+ { 0x5e5f77, KEY_CONFIG }, // mouse
+ { 0x5e5f83, KEY_EPG },
+ { 0x5e5f84, KEY_SCREEN }, // square
+ { 0x5e5f48, KEY_MENU },
+
+ // inner ring
+ { 0x5e5f50, KEY_UP },
+ { 0x5e5f4b, KEY_DOWN },
+ { 0x5e5f4c, KEY_LEFT },
+ { 0x5e5f4d, KEY_RIGHT },
+ { 0x5e5f47, KEY_OK },
+
+ { 0x5e5f44, KEY_VOLUMEUP },
+ { 0x5e5f43, KEY_VOLUMEDOWN },
+ { 0x5e5f4f, KEY_FAVORITES },
+ { 0x5e5f82, KEY_SUBTITLE }, // txt
+ { 0x5e5f41, KEY_PAGEUP },
+ { 0x5e5f42, KEY_PAGEDOWN },
+
+ { 0x5e5f73, KEY_RED },
+ { 0x5e5f74, KEY_GREEN },
+ { 0x5e5f75, KEY_YELLOW },
+ { 0x5e5f76, KEY_BLUE },
+
+ { 0x5e5f67, KEY_PREVIOUSSONG },
+ { 0x5e5f79, KEY_REWIND },
+ { 0x5e5f80, KEY_FASTFORWARD },
+ { 0x5e5f81, KEY_NEXTSONG },
+
+ { 0x5e5f04, KEY_RECORD },
+ { 0x5e5f2c, KEY_PLAYPAUSE },
+ { 0x5e5f2b, KEY_STOP },
+};
+
+static struct rc_map_list wetek_play2_map = {
+ .map = {
+ .scan = wetek_play2,
+ .size = ARRAY_SIZE(wetek_play2),
+ .rc_proto = RC_PROTO_NECX,
+ .name = RC_MAP_WETEK_PLAY2,
+ }
+};
+
+static int __init init_rc_map_wetek_play2(void)
+{
+ return rc_map_register(&wetek_play2_map);
+}
+
+static void __exit exit_rc_map_wetek_play2(void)
+{
+ rc_map_unregister(&wetek_play2_map);
+}
+
+module_init(init_rc_map_wetek_play2)
+module_exit(exit_rc_map_wetek_play2)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/keymaps/rc-x96max.c b/drivers/media/rc/keymaps/rc-x96max.c
new file mode 100644
index 000000000000..0998ec3320e4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-x96max.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the X96-max STB remote control
+//
+
+static struct rc_map_table x96max[] = {
+ { 0x140, KEY_POWER },
+
+ // ** TV CONTROL **
+ // SET
+ // AV/TV
+ // POWER
+ // VOLUME UP
+ // VOLUME DOWN
+
+ { 0x118, KEY_VOLUMEUP },
+ { 0x110, KEY_VOLUMEDOWN },
+
+ { 0x143, KEY_MUTE }, // config
+
+ { 0x100, KEY_EPG }, // mouse
+ { 0x119, KEY_BACK },
+
+ { 0x116, KEY_UP },
+ { 0x151, KEY_LEFT },
+ { 0x150, KEY_RIGHT },
+ { 0x11a, KEY_DOWN },
+ { 0x113, KEY_OK },
+
+ { 0x111, KEY_HOME },
+ { 0x14c, KEY_CONTEXT_MENU },
+
+ { 0x159, KEY_PREVIOUS },
+ { 0x15a, KEY_PLAYPAUSE },
+ { 0x158, KEY_NEXT },
+
+ { 0x147, KEY_MENU }, // @ key
+ { 0x101, KEY_NUMERIC_0 },
+ { 0x142, KEY_BACKSPACE },
+
+ { 0x14e, KEY_NUMERIC_1 },
+ { 0x10d, KEY_NUMERIC_2 },
+ { 0x10c, KEY_NUMERIC_3 },
+
+ { 0x14a, KEY_NUMERIC_4 },
+ { 0x109, KEY_NUMERIC_5 },
+ { 0x108, KEY_NUMERIC_6 },
+
+ { 0x146, KEY_NUMERIC_7 },
+ { 0x105, KEY_NUMERIC_8 },
+ { 0x104, KEY_NUMERIC_9 },
+};
+
+static struct rc_map_list x96max_map = {
+ .map = {
+ .scan = x96max,
+ .size = ARRAY_SIZE(x96max),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_X96MAX,
+ }
+};
+
+static int __init init_rc_map_x96max(void)
+{
+ return rc_map_register(&x96max_map);
+}
+
+static void __exit exit_rc_map_x96max(void)
+{
+ rc_map_unregister(&x96max_map);
+}
+
+module_init(init_rc_map_x96max)
+module_exit(exit_rc_map_x96max)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4d5351ebb940..3fc9829a9233 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -31,21 +31,22 @@
#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
-#define DRIVER_VERSION "1.94"
+#define DRIVER_VERSION "1.95"
#define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
#define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
"device driver"
#define DRIVER_NAME "mceusb"
+#define USB_TX_TIMEOUT 1000 /* in milliseconds */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
/* MCE constants */
-#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
+#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
#define MCE_TIME_UNIT 50 /* Approx 50us resolution */
-#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
-#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
-#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
+#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
+#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
+ /* Actual format is 0x80 + num_bytes */
#define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
#define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
#define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
@@ -461,6 +462,7 @@ struct mceusb_dev {
/* usb */
struct usb_device *usbdev;
+ struct usb_interface *usbintf;
struct urb *urb_in;
unsigned int pipe_in;
struct usb_endpoint_descriptor *usb_ep_out;
@@ -517,6 +519,7 @@ struct mceusb_dev {
unsigned long kevent_flags;
# define EVENT_TX_HALT 0
# define EVENT_RX_HALT 1
+# define EVENT_RST_PEND 31
};
/* MCE Device Command Strings, generally a port and command pair */
@@ -607,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
if (len <= skip)
return;
- dev_dbg(dev, "%cx data: %*ph (length=%d)",
- (out ? 't' : 'r'),
- min(len, buf_len - offset), buf + offset, len);
+ dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
+ (out ? 't' : 'r'), offset,
+ min(len, buf_len - offset), buf + offset, len, buf_len);
inout = out ? "Request" : "Got";
@@ -731,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
case MCE_RSP_CMD_ILLEGAL:
dev_dbg(dev, "Illegal PORT_IR command");
break;
+ case MCE_RSP_TX_TIMEOUT:
+ dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
+ break;
default:
dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
cmd, subcmd);
@@ -745,42 +751,107 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "End of raw IR data");
else if ((cmd != MCE_CMD_PORT_IR) &&
((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+ cmd & MCE_PACKET_LENGTH_MASK);
#endif
}
/*
* Schedule work that can't be done in interrupt handlers
- * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
+ * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
* Invokes mceusb_deferred_kevent() for recovering from
* error events specified by the kevent bit field.
*/
static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
{
set_bit(kevent, &ir->kevent_flags);
+
+ if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) {
+ dev_dbg(ir->dev, "kevent %d dropped pending USB Reset Device",
+ kevent);
+ return;
+ }
+
if (!schedule_work(&ir->kevent))
- dev_err(ir->dev, "kevent %d may have been dropped", kevent);
+ dev_dbg(ir->dev, "kevent %d already scheduled", kevent);
else
dev_dbg(ir->dev, "kevent %d scheduled", kevent);
}
-static void mce_async_callback(struct urb *urb)
+static void mce_write_callback(struct urb *urb)
{
- struct mceusb_dev *ir;
- int len;
-
if (!urb)
return;
- ir = urb->context;
+ complete(urb->context);
+}
+
+/*
+ * Write (TX/send) data to MCE device USB endpoint out.
+ * Used for IR blaster TX and MCE device commands.
+ *
+ * Return: The number of bytes written (> 0) or errno (< 0).
+ */
+static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
+{
+ int ret;
+ struct urb *urb;
+ struct device *dev = ir->dev;
+ unsigned char *buf_out;
+ struct completion tx_done;
+ unsigned long expire;
+ unsigned long ret_wait;
+
+ mceusb_dev_printdata(ir, data, size, 0, size, true);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (unlikely(!urb)) {
+ dev_err(dev, "Error: mce write couldn't allocate urb");
+ return -ENOMEM;
+ }
+
+ buf_out = kmalloc(size, GFP_KERNEL);
+ if (!buf_out) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ init_completion(&tx_done);
+
+ /* outbound data */
+ if (usb_endpoint_xfer_int(ir->usb_ep_out))
+ usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
+ buf_out, size, mce_write_callback, &tx_done,
+ ir->usb_ep_out->bInterval);
+ else
+ usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
+ buf_out, size, mce_write_callback, &tx_done);
+ memcpy(buf_out, data, size);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "Error: mce write submit urb error = %d", ret);
+ kfree(buf_out);
+ usb_free_urb(urb);
+ return ret;
+ }
+
+ expire = msecs_to_jiffies(USB_TX_TIMEOUT);
+ ret_wait = wait_for_completion_timeout(&tx_done, expire);
+ if (!ret_wait) {
+ dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
+ expire, USB_TX_TIMEOUT);
+ usb_kill_urb(urb);
+ ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
+ } else {
+ ret = urb->status;
+ }
+ if (ret >= 0)
+ ret = urb->actual_length; /* bytes written */
switch (urb->status) {
/* success */
case 0:
- len = urb->actual_length;
-
- mceusb_dev_printdata(ir, urb->transfer_buffer, len,
- 0, len, true);
break;
case -ECONNRESET:
@@ -790,140 +861,135 @@ static void mce_async_callback(struct urb *urb)
break;
case -EPIPE:
- dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
+ dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
urb->status);
mceusb_defer_kevent(ir, EVENT_TX_HALT);
break;
default:
- dev_err(ir->dev, "Error: request urb status = %d", urb->status);
+ dev_err(ir->dev, "Error: mce write urb status = %d",
+ urb->status);
break;
}
- /* the transfer buffer and urb were allocated in mce_request_packet */
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
-}
-
-/* request outgoing (send) usb packet - used to initialize remote */
-static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
- int size)
-{
- int res;
- struct urb *async_urb;
- struct device *dev = ir->dev;
- unsigned char *async_buf;
-
- async_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (unlikely(!async_urb)) {
- dev_err(dev, "Error, couldn't allocate urb!");
- return;
- }
-
- async_buf = kmalloc(size, GFP_KERNEL);
- if (!async_buf) {
- usb_free_urb(async_urb);
- return;
- }
+ dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
+ ret, ret_wait, expire, USB_TX_TIMEOUT,
+ urb->actual_length, urb->status);
- /* outbound data */
- if (usb_endpoint_xfer_int(ir->usb_ep_out))
- usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
- async_buf, size, mce_async_callback, ir,
- ir->usb_ep_out->bInterval);
- else
- usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
- async_buf, size, mce_async_callback, ir);
-
- memcpy(async_buf, data, size);
-
- dev_dbg(dev, "send request called (size=%#x)", size);
+ kfree(buf_out);
+ usb_free_urb(urb);
- res = usb_submit_urb(async_urb, GFP_ATOMIC);
- if (res) {
- dev_err(dev, "send request FAILED! (res=%d)", res);
- kfree(async_buf);
- usb_free_urb(async_urb);
- return;
- }
- dev_dbg(dev, "send request complete (res=%d)", res);
+ return ret;
}
-static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
{
int rsize = sizeof(DEVICE_RESUME);
if (ir->need_reset) {
ir->need_reset = false;
- mce_request_packet(ir, DEVICE_RESUME, rsize);
+ mce_write(ir, DEVICE_RESUME, rsize);
msleep(10);
}
- mce_request_packet(ir, data, size);
+ mce_write(ir, data, size);
msleep(10);
}
-/* Send data out the IR blaster port(s) */
+/*
+ * Transmit IR out the MCE device IR blaster port(s).
+ *
+ * Convert IR pulse/space sequence from LIRC to MCE format.
+ * Break up a long IR sequence into multiple parts (MCE IR data packets).
+ *
+ * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
+ * Pulses and spaces are implicit by their position.
+ * The first IR sample, txbuf[0], is always a pulse.
+ *
+ * u8 irbuf[] consists of multiple IR data packets for the MCE device.
+ * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
+ * An IR sample is 1-bit pulse/space flag with 7-bit time
+ * in MCE time units (50usec).
+ *
+ * Return: The number of IR samples sent (> 0) or errno (< 0).
+ */
static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
- int i, length, ret = 0;
- int cmdcount = 0;
- unsigned char cmdbuf[MCE_CMDBUF_SIZE];
-
- /* MCE tx init header */
- cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
- cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
- cmdbuf[cmdcount++] = ir->tx_mask;
+ u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
+ u8 irbuf[MCE_IRBUF_SIZE];
+ int ircount = 0;
+ unsigned int irsample;
+ int i, length, ret;
/* Send the set TX ports command */
- mce_async_out(ir, cmdbuf, cmdcount);
- cmdcount = 0;
-
- /* Generate mce packet data */
- for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
- txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
-
- do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
-
- /* Insert mce packet header every 4th entry */
- if ((cmdcount < MCE_CMDBUF_SIZE) &&
- (cmdcount % MCE_CODE_LENGTH) == 0)
- cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
-
- /* Insert mce packet data */
- if (cmdcount < MCE_CMDBUF_SIZE)
- cmdbuf[cmdcount++] =
- (txbuf[i] < MCE_PULSE_BIT ?
- txbuf[i] : MCE_MAX_PULSE_LENGTH) |
- (i & 1 ? 0x00 : MCE_PULSE_BIT);
- else {
- ret = -EINVAL;
- goto out;
+ cmdbuf[2] = ir->tx_mask;
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+
+ /* Generate mce IR data packet */
+ for (i = 0; i < count; i++) {
+ irsample = txbuf[i] / MCE_TIME_UNIT;
+
+ /* loop to support long pulses/spaces > 6350us (127*50us) */
+ while (irsample > 0) {
+ /* Insert IR header every 30th entry */
+ if (ircount % MCE_PACKET_SIZE == 0) {
+ /* Room for IR header and one IR sample? */
+ if (ircount >= MCE_IRBUF_SIZE - 1) {
+ /* Send near full buffer */
+ ret = mce_write(ir, irbuf, ircount);
+ if (ret < 0)
+ return ret;
+ ircount = 0;
+ }
+ irbuf[ircount++] = MCE_IRDATA_HEADER;
}
- } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
- (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
- }
-
- /* Check if we have room for the empty packet at the end */
- if (cmdcount >= MCE_CMDBUF_SIZE) {
- ret = -EINVAL;
- goto out;
- }
+ /* Insert IR sample */
+ if (irsample <= MCE_MAX_PULSE_LENGTH) {
+ irbuf[ircount] = irsample;
+ irsample = 0;
+ } else {
+ irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
+ irsample -= MCE_MAX_PULSE_LENGTH;
+ }
+ /*
+ * Even i = IR pulse
+ * Odd i = IR space
+ */
+ irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
+ ircount++;
+
+ /* IR buffer full? */
+ if (ircount >= MCE_IRBUF_SIZE) {
+ /* Fix packet length in last header */
+ length = ircount % MCE_PACKET_SIZE;
+ if (length > 0)
+ irbuf[ircount - length] -=
+ MCE_PACKET_SIZE - length;
+ /* Send full buffer */
+ ret = mce_write(ir, irbuf, ircount);
+ if (ret < 0)
+ return ret;
+ ircount = 0;
+ }
+ }
+ } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
/* Fix packet length in last header */
- length = cmdcount % MCE_CODE_LENGTH;
- cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
+ length = ircount % MCE_PACKET_SIZE;
+ if (length > 0)
+ irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
- /* All mce commands end with an empty packet (0x80) */
- cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
+ /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
+ irbuf[ircount++] = MCE_IRDATA_TRAILER;
- /* Transmit the command to the mce device */
- mce_async_out(ir, cmdbuf, cmdcount);
+ /* Send final buffer */
+ ret = mce_write(ir, irbuf, ircount);
+ if (ret < 0)
+ return ret;
-out:
- return ret ? ret : count;
+ return count;
}
/* Sets active IR outputs -- mce devices typically have two */
@@ -963,7 +1029,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "disabling carrier modulation");
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
return 0;
}
@@ -977,7 +1043,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
carrier);
/* Transmit new carrier to mce device */
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
return 0;
}
}
@@ -1000,10 +1066,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
cmdbuf[2] = units >> 8;
cmdbuf[3] = units;
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
/* get receiver timeout value */
- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
return 0;
}
@@ -1028,7 +1094,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
ir->wideband_rx_enabled = false;
cmdbuf[2] = 1; /* port 1 is long range receiver */
}
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
/* response from device sets ir->learning_active */
return 0;
@@ -1051,7 +1117,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
ir->carrier_report_enabled = true;
if (!ir->learning_active) {
cmdbuf[2] = 2; /* port 2 is short range receiver */
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
} else {
ir->carrier_report_enabled = false;
@@ -1062,7 +1128,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
*/
if (ir->learning_active && !ir->wideband_rx_enabled) {
cmdbuf[2] = 1; /* port 1 is long range receiver */
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
}
@@ -1141,6 +1207,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
}
break;
case MCE_RSP_CMD_ILLEGAL:
+ case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
default:
@@ -1279,7 +1346,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
{
/* If we get no reply or an illegal command reply, its ver 1, says MS */
ir->emver = 1;
- mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
+ mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
}
static void mceusb_gen1_init(struct mceusb_dev *ir)
@@ -1325,10 +1392,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get hw/sw revision? */
- mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
+ mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
kfree(data);
}
@@ -1336,13 +1403,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
/* device resume */
- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
/* get wake version (protocol, key, address) */
- mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+ mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
/* unknown what this one actually returns... */
- mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
+ mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
@@ -1356,24 +1423,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
ir->num_rxports = 2;
/* get number of tx and rx ports */
- mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+ mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
/* get the carrier and frequency */
- mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
+ mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
if (ir->num_txports && !ir->flags.no_tx)
/* get the transmitter bitmask */
- mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
+ mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
/* get receiver timeout value */
- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
/* get receiver sensor setting */
- mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+ mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
for (i = 0; i < ir->num_txports; i++) {
cmdbuf[2] = i;
- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
+ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
}
}
@@ -1382,7 +1449,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
if (ir->emver < 2)
return;
- mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
+ mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
}
/*
@@ -1398,28 +1465,59 @@ static void mceusb_deferred_kevent(struct work_struct *work)
container_of(work, struct mceusb_dev, kevent);
int status;
+ dev_err(ir->dev, "kevent handler called (flags 0x%lx)",
+ ir->kevent_flags);
+
+ if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) {
+ dev_err(ir->dev, "kevent handler canceled pending USB Reset Device");
+ return;
+ }
+
if (test_bit(EVENT_RX_HALT, &ir->kevent_flags)) {
usb_unlink_urb(ir->urb_in);
status = usb_clear_halt(ir->usbdev, ir->pipe_in);
+ dev_err(ir->dev, "rx clear halt status = %d", status);
if (status < 0) {
- dev_err(ir->dev, "rx clear halt error %d",
- status);
+ /*
+ * Unable to clear RX halt/stall.
+ * Will need to call usb_reset_device().
+ */
+ dev_err(ir->dev,
+ "stuck RX HALT state requires USB Reset Device to clear");
+ usb_queue_reset_device(ir->usbintf);
+ set_bit(EVENT_RST_PEND, &ir->kevent_flags);
+ clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
+
+ /* Cancel all other error events and handlers */
+ clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
+ return;
}
clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
- if (status == 0) {
- status = usb_submit_urb(ir->urb_in, GFP_KERNEL);
- if (status < 0) {
- dev_err(ir->dev,
- "rx unhalt submit urb error %d",
- status);
- }
+ status = usb_submit_urb(ir->urb_in, GFP_KERNEL);
+ if (status < 0) {
+ dev_err(ir->dev, "rx unhalt submit urb error = %d",
+ status);
}
}
if (test_bit(EVENT_TX_HALT, &ir->kevent_flags)) {
status = usb_clear_halt(ir->usbdev, ir->pipe_out);
- if (status < 0)
- dev_err(ir->dev, "tx clear halt error %d", status);
+ dev_err(ir->dev, "tx clear halt status = %d", status);
+ if (status < 0) {
+ /*
+ * Unable to clear TX halt/stall.
+ * Will need to call usb_reset_device().
+ */
+ dev_err(ir->dev,
+ "stuck TX HALT state requires USB Reset Device to clear");
+ usb_queue_reset_device(ir->usbintf);
+ set_bit(EVENT_RST_PEND, &ir->kevent_flags);
+ clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
+
+ /* Cancel all other error events and handlers */
+ clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
+ return;
+ }
clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
}
}
@@ -1581,6 +1679,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
if (!ir->urb_in)
goto urb_in_alloc_fail;
+ ir->usbintf = intf;
ir->usbdev = usb_get_dev(dev);
ir->dev = &intf->dev;
ir->len_in = maxp;
@@ -1688,6 +1787,8 @@ static void mceusb_dev_disconnect(struct usb_interface *intf)
struct usb_device *dev = interface_to_usbdev(intf);
struct mceusb_dev *ir = usb_get_intfdata(intf);
+ dev_dbg(&intf->dev, "%s called", __func__);
+
usb_set_intfdata(intf, NULL);
if (!ir)
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 72a7bbbf6b1f..51c6dd3406a0 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -117,10 +117,8 @@ static int meson_ir_probe(struct platform_device *pdev)
return PTR_ERR(ir->reg);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!ir->rc) {
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 50fb0aebb8d4..a0c94ab322c7 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -35,6 +35,11 @@
/* Fields containing pulse width data */
#define MTK_WIDTH_MASK (GENMASK(7, 0))
+/* IR threshold */
+#define MTK_IRTHD 0x14
+#define MTK_DG_CNT_MASK (GENMASK(12, 8))
+#define MTK_DG_CNT(x) ((x) << 8)
+
/* Bit to enable interrupt */
#define MTK_IRINT_EN BIT(0)
@@ -340,7 +345,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
ir->rc->map_name = map_name ?: RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
ir->rc->driver_name = MTK_IR_DEV;
- ir->rc->allowed_protocols = RC_PROTO_BIT_ALL;
+ ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = MTK_IR_SAMPLE;
ir->rc->timeout = MTK_MAX_SAMPLES * (MTK_IR_SAMPLE + 1);
@@ -353,10 +358,8 @@ static int mtk_ir_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ir);
ir->irq = platform_get_irq(pdev, 0);
- if (ir->irq < 0) {
- dev_err(dev, "no irq resource\n");
+ if (ir->irq < 0)
return -ENODEV;
- }
if (clk_prepare_enable(ir->clk)) {
dev_err(dev, "try to enable ir_clk failed\n");
@@ -398,6 +401,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
ir->data->fields[MTK_HW_PERIOD].reg);
+ /* Set de-glitch counter */
+ mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
+
/* Enable IR and PWM */
val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index aa719d0ae6b0..e222b4c98be4 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -39,11 +39,11 @@
/* Rx Interrupt Enable */
#define SUNXI_IR_RXINT_REG 0x2C
-/* Rx FIFO Overflow */
+/* Rx FIFO Overflow Interrupt Enable */
#define REG_RXINT_ROI_EN BIT(0)
-/* Rx Packet End */
+/* Rx Packet End Interrupt Enable */
#define REG_RXINT_RPEI_EN BIT(1)
-/* Rx FIFO Data Available */
+/* Rx FIFO Data Available Interrupt Enable */
#define REG_RXINT_RAI_EN BIT(4)
/* Rx FIFO available byte level */
@@ -51,6 +51,12 @@
/* Rx Interrupt Status */
#define SUNXI_IR_RXSTA_REG 0x30
+/* Rx FIFO Overflow */
+#define REG_RXSTA_ROI REG_RXINT_ROI_EN
+/* Rx Packet End */
+#define REG_RXSTA_RPE REG_RXINT_RPEI_EN
+/* Rx FIFO Data Available */
+#define REG_RXSTA_RA REG_RXINT_RAI_EN
/* RX FIFO Get Available Counter */
#define REG_RXSTA_GET_AC(val) (((val) >> 8) & (ir->fifo_size * 2 - 1))
/* Clear all interrupt status value */
@@ -72,6 +78,17 @@
/* Time after which device stops sending data in ms */
#define SUNXI_IR_TIMEOUT 120
+/**
+ * struct sunxi_ir_quirks - Differences between SoC variants.
+ *
+ * @has_reset: SoC needs reset deasserted.
+ * @fifo_size: size of the fifo.
+ */
+struct sunxi_ir_quirks {
+ bool has_reset;
+ int fifo_size;
+};
+
struct sunxi_ir {
spinlock_t ir_lock;
struct rc_dev *rc;
@@ -99,7 +116,7 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
/* clean all pending statuses */
writel(status | REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
- if (status & (REG_RXINT_RAI_EN | REG_RXINT_RPEI_EN)) {
+ if (status & (REG_RXSTA_RA | REG_RXSTA_RPE)) {
/* How many messages in fifo */
rc = REG_RXSTA_GET_AC(status);
/* Sanity check */
@@ -115,9 +132,9 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
}
}
- if (status & REG_RXINT_ROI_EN) {
+ if (status & REG_RXSTA_ROI) {
ir_raw_event_reset(ir->rc);
- } else if (status & REG_RXINT_RPEI_EN) {
+ } else if (status & REG_RXSTA_RPE) {
ir_raw_event_set_idle(ir->rc, true);
ir_raw_event_handle(ir->rc);
}
@@ -134,6 +151,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
+ const struct sunxi_ir_quirks *quirks;
struct resource *res;
struct sunxi_ir *ir;
u32 b_clk_freq = SUNXI_IR_BASE_CLK;
@@ -142,12 +160,15 @@ static int sunxi_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (!quirks) {
+ dev_err(&pdev->dev, "Failed to determine the quirks to use\n");
+ return -ENODEV;
+ }
+
spin_lock_init(&ir->ir_lock);
- if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
- ir->fifo_size = 64;
- else
- ir->fifo_size = 16;
+ ir->fifo_size = quirks->fifo_size;
/* Clock */
ir->apb_clk = devm_clk_get(dev, "apb");
@@ -164,13 +185,15 @@ static int sunxi_ir_probe(struct platform_device *pdev)
/* Base clock frequency (optional) */
of_property_read_u32(dn, "clock-frequency", &b_clk_freq);
- /* Reset (optional) */
- ir->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
- if (IS_ERR(ir->rst))
- return PTR_ERR(ir->rst);
- ret = reset_control_deassert(ir->rst);
- if (ret)
- return ret;
+ /* Reset */
+ if (quirks->has_reset) {
+ ir->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(ir->rst))
+ return PTR_ERR(ir->rst);
+ ret = reset_control_deassert(ir->rst);
+ if (ret)
+ return ret;
+ }
ret = clk_set_rate(ir->clk, b_clk_freq);
if (ret) {
@@ -233,7 +256,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
/* IRQ */
ir->irq = platform_get_irq(pdev, 0);
if (ir->irq < 0) {
- dev_err(dev, "no irq resource\n");
ret = ir->irq;
goto exit_free_dev;
}
@@ -306,10 +328,35 @@ static int sunxi_ir_remove(struct platform_device *pdev)
return 0;
}
+static const struct sunxi_ir_quirks sun4i_a10_ir_quirks = {
+ .has_reset = false,
+ .fifo_size = 16,
+};
+
+static const struct sunxi_ir_quirks sun5i_a13_ir_quirks = {
+ .has_reset = false,
+ .fifo_size = 64,
+};
+
+static const struct sunxi_ir_quirks sun6i_a31_ir_quirks = {
+ .has_reset = true,
+ .fifo_size = 64,
+};
+
static const struct of_device_id sunxi_ir_match[] = {
- { .compatible = "allwinner,sun4i-a10-ir", },
- { .compatible = "allwinner,sun5i-a13-ir", },
- {},
+ {
+ .compatible = "allwinner,sun4i-a10-ir",
+ .data = &sun4i_a10_ir_quirks,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-ir",
+ .data = &sun5i_a13_ir_quirks,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-ir",
+ .data = &sun6i_a31_ir_quirks,
+ },
+ {}
};
MODULE_DEVICE_TABLE(of, sunxi_ir_match);
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
index 08386abb9bbc..bcc49cb47de6 100644
--- a/drivers/media/spi/Kconfig
+++ b/drivers/media/spi/Kconfig
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
if VIDEO_V4L2
+comment "SPI drivers hidden by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+
menu "SPI helper chips"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
config VIDEO_GS1662
tristate "Gennum Serializers video"
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index a7108e575e9b..e104bb7766e1 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -15,8 +15,12 @@ config MEDIA_TUNER
select MEDIA_TUNER_TDA9887 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MC44S803 if MEDIA_SUBDRV_AUTOSELECT
+comment "Tuner drivers hidden by 'Autoselect ancillary drivers'"
+ depends on MEDIA_HIDE_ANCILLARY_SUBDRV
+ depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
+
menu "Customize TV tuners"
- visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST || EXPERT
+ visible if !MEDIA_HIDE_ANCILLARY_SUBDRV
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT || MEDIA_SDR_SUPPORT
config MEDIA_TUNER_SIMPLE
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index aa6861dcd3fd..574c3bb135d7 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -381,7 +381,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
goto corrupt;
}
- priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
+ priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL);
if (priv->firm[n].ptr == NULL) {
tuner_err("Not enough memory to load firmware file.\n");
rc = -ENOMEM;
@@ -394,7 +394,6 @@ static int load_all_firmwares(struct dvb_frontend *fe,
type, (unsigned long long)id, size);
}
- memcpy(priv->firm[n].ptr, p, size);
priv->firm[n].type = type;
priv->firm[n].id = id;
priv->firm[n].size = size;
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 43925e219d81..d9606738ce43 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -812,7 +812,7 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
goto corrupt;
}
- priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
+ priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL);
if (priv->firm[n].ptr == NULL) {
printk(KERN_ERR "Not enough memory to load firmware file.\n");
rc = -ENOMEM;
@@ -826,7 +826,6 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
type, (unsigned long long)id, size);
}
- memcpy(priv->firm[n].ptr, p, size);
priv->firm[n].type = type;
priv->firm[n].id = id;
priv->firm[n].size = size;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index b35231ffe503..751703db06f5 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -71,7 +71,6 @@ static const struct v4l2_frequency_band bands_rf[] = {
/* stream formats */
struct airspy_format {
- char *name;
u32 pixelformat;
u32 buffersize;
};
@@ -79,7 +78,6 @@ struct airspy_format {
/* format descriptions for capture and preview */
static struct airspy_format formats[] = {
{
- .name = "Real U12LE",
.pixelformat = V4L2_SDR_FMT_RU12LE,
.buffersize = BULK_BUFFER_SIZE,
},
@@ -622,7 +620,6 @@ static int airspy_enum_fmt_sdr_cap(struct file *file, void *priv,
if (f->index >= NUM_FORMATS)
return -EINVAL;
- strscpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 5e00019bce8a..d1895334cbbf 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1153,7 +1153,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
format->fmt.pix.sizeimage = width * height * 2;
format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
format->fmt.pix.field = V4L2_FIELD_INTERLACED;
- format->fmt.pix.priv = 0;
if (cmd == VIDIOC_TRY_FMT)
return 0;
@@ -1207,10 +1206,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
dprintk(1, "%s called\n", __func__);
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- strscpy(f->description, "Packed YUV2", sizeof(f->description));
-
- f->flags = 0;
f->pixelformat = V4L2_PIX_FMT_UYVY;
return 0;
@@ -1231,7 +1226,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.sizeimage = dev->frame_size;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* NTSC/PAL */
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- f->fmt.pix.priv = 0;
return 0;
}
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 17468f7d78ed..3ab80a7b4498 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -676,6 +676,10 @@ static int submit_urbs(struct camera_data *cam)
if (!urb) {
for (j = 0; j < i; j++)
usb_free_urb(cam->sbuf[j].urb);
+ for (j = 0; j < NUM_SBUF; j++) {
+ kfree(cam->sbuf[j].data);
+ cam->sbuf[j].data = NULL;
+ }
return -ENOMEM;
}
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 0feae825cebb..626264a56517 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -292,28 +292,13 @@ static int cpia2_s_input(struct file *file, void *fh, unsigned int i)
static int cpia2_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- int index = f->index;
-
- if (index < 0 || index > 1)
- return -EINVAL;
+ if (f->index > 1)
+ return -EINVAL;
- memset(f, 0, sizeof(*f));
- f->index = index;
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- switch(index) {
- case 0:
- strscpy(f->description, "MJPEG", sizeof(f->description));
+ if (f->index == 0)
f->pixelformat = V4L2_PIX_FMT_MJPEG;
- break;
- case 1:
- strscpy(f->description, "JPEG", sizeof(f->description));
+ else
f->pixelformat = V4L2_PIX_FMT_JPEG;
- break;
- default:
- return -EINVAL;
- }
-
return 0;
}
@@ -338,7 +323,6 @@ static int cpia2_try_fmt_vid_cap(struct file *file, void *fh,
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = cam->frame_size;
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- f->fmt.pix.priv = 0;
switch (cpia2_match_video_size(f->fmt.pix.width, f->fmt.pix.height)) {
case VIDEOSIZE_VGA:
@@ -449,7 +433,6 @@ static int cpia2_g_fmt_vid_cap(struct file *file, void *fh,
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = cam->frame_size;
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- f->fmt.pix.priv = 0;
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 2475f69a2f1c..6d218a036966 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1051,6 +1051,7 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
p_current_fw = p_fw;
vfree(p_current_fw);
p_current_fw = NULL;
+ vfree(p_buffer);
uninitGPIO(dev);
release_firmware(firmware);
dprintk(1, "Firmware upload successful.\n");
@@ -1592,7 +1593,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e0d98ba8fdbf..e123e74c549e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1351,7 +1351,7 @@ static void cx231xx_unregister_media_device(struct cx231xx *dev)
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
+ * called when the device gets disconnected or at module unload
*/
void cx231xx_release_resources(struct cx231xx *dev)
{
@@ -1924,7 +1924,7 @@ err_if:
/*
* cx231xx_usb_disconnect()
- * called when the device gets diconencted
+ * called when the device gets disconnected
* video device will be unregistered on v4l2_close in case it is still open
*/
static void cx231xx_usb_disconnect(struct usb_interface *interface)
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index a749baadc1f1..982cb56e97e9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -53,7 +53,7 @@ static DEFINE_MUTEX(cx231xx_devlist_mutex);
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
+ * called when the device gets disconnected or at module unload
*/
void cx231xx_remove_from_devlist(struct cx231xx *dev)
{
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index b651ac7713ea..9b51f07a729e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -80,7 +80,6 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
/* supported video standards */
static struct cx231xx_fmt format[] = {
{
- .name = "16bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.reg = 0,
@@ -1578,7 +1577,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (unlikely(f->index >= ARRAY_SIZE(format)))
return -EINVAL;
- strscpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
@@ -1839,7 +1837,7 @@ static int cx231xx_v4l2_open(struct file *filp)
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
+ * called when the device gets disconnected or at module unload
*/
void cx231xx_release_analog_resources(struct cx231xx *dev)
{
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 3efa8ff93c1c..b007611abc37 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -121,7 +121,6 @@
#define CX23417_RESET 9
struct cx23417_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int depth;
int flags;
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 8610487f2d72..617a306f6815 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -540,6 +540,8 @@ static int dvbsky_mygica_t230c_attach(struct dvb_usb_adapter *adap)
si2168_config.i2c_adapter = &i2c_adapter;
si2168_config.fe = &adap->fe[0];
si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ if (le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_MYGICA_T230C2)
+ si2168_config.ts_mode |= SI2168_TS_CLK_MANUAL;
si2168_config.ts_clock_inv = 1;
state->i2c_client_demod = dvb_module_probe("si2168", NULL,
@@ -550,11 +552,19 @@ static int dvbsky_mygica_t230c_attach(struct dvb_usb_adapter *adap)
/* attach tuner */
si2157_config.fe = adap->fe[0];
- si2157_config.if_port = 0;
-
- state->i2c_client_tuner = dvb_module_probe("si2157", "si2141",
- i2c_adapter,
- 0x60, &si2157_config);
+ if (le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_MYGICA_T230) {
+ si2157_config.if_port = 1;
+ state->i2c_client_tuner = dvb_module_probe("si2157", NULL,
+ i2c_adapter,
+ 0x60,
+ &si2157_config);
+ } else {
+ si2157_config.if_port = 0;
+ state->i2c_client_tuner = dvb_module_probe("si2157", "si2141",
+ i2c_adapter,
+ 0x60,
+ &si2157_config);
+ }
if (!state->i2c_client_tuner) {
dvb_module_release(state->i2c_client_demod);
return -ENODEV;
@@ -776,9 +786,15 @@ static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4,
&dvbsky_s960_props, "Terratec Cinergy S2 Rev.4",
RC_MAP_DVBSKY) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230",
+ RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2",
+ RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
{ }
};
MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 0c1fef118be4..e30305876840 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -309,7 +309,7 @@ static int ec168_streaming_ctrl(struct dvb_frontend *fe, int onoff)
/* DVB USB Driver stuff */
/* bInterfaceNumber 0 is HID
* bInterfaceNumber 1 is DVB-T */
-static struct dvb_usb_device_properties ec168_props = {
+static const struct dvb_usb_device_properties ec168_props = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index b784d9da1a82..c7197e534c02 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -353,7 +353,7 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
ret += i2c_transfer(&d->i2c_adap, &msg, 1);
/* send 32bit(satur, R, G, B) data in serial */
- mask = 1 << 31;
+ mask = 1UL << 31;
for (i = 0; i < 32; i++) {
buf[1] = power | FRIIO_CTL_STROBE;
if (sat_color & mask)
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index bac0778f7def..f02fa0a67aa4 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -78,7 +78,6 @@ enum cxusb_table_index {
DVICO_BLUEBIRD_DUAL_4_REV_2,
CONEXANT_D680_DMB,
MYGICA_D689,
- MYGICA_T230,
NR__cxusb_table_index
};
@@ -456,26 +455,6 @@ static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
-static int cxusb_read_status(struct dvb_frontend *fe,
- enum fe_status *status)
-{
- struct dvb_usb_adapter *adap = (struct dvb_usb_adapter *)fe->dvb->priv;
- struct cxusb_state *state = (struct cxusb_state *)adap->dev->priv;
- int ret;
-
- ret = state->fe_read_status(fe, status);
-
- /* it need resync slave fifo when signal change from unlock to lock.*/
- if ((*status & FE_HAS_LOCK) && (!state->last_lock)) {
- mutex_lock(&state->stream_mutex);
- cxusb_streaming_ctrl(adap, 1);
- mutex_unlock(&state->stream_mutex);
- }
-
- state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
- return ret;
-}
-
static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
{
int ep = d->props.generic_bulk_ctrl_endpoint;
@@ -1374,86 +1353,6 @@ static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *d = adap->dev;
- struct cxusb_state *st = d->priv;
- struct i2c_adapter *adapter;
- struct i2c_client *client_demod;
- struct i2c_client *client_tuner;
- struct i2c_board_info info;
- struct si2168_config si2168_config;
- struct si2157_config si2157_config;
-
- /* Select required USB configuration */
- if (usb_set_interface(d->udev, 0, 0) < 0)
- err("set interface failed");
-
- /* Unblock all USB pipes */
- usb_clear_halt(d->udev,
- usb_sndbulkpipe(d->udev,
- d->props.generic_bulk_ctrl_endpoint));
- usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev,
- d->props.generic_bulk_ctrl_endpoint));
- usb_clear_halt(d->udev,
- usb_rcvbulkpipe(d->udev,
- d->props.adapter[0].fe[0].stream.endpoint));
-
- /* attach frontend */
- si2168_config.i2c_adapter = &adapter;
- si2168_config.fe = &adap->fe_adap[0].fe;
- si2168_config.ts_mode = SI2168_TS_PARALLEL;
- si2168_config.ts_clock_inv = 1;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strscpy(info.type, "si2168", I2C_NAME_SIZE);
- info.addr = 0x64;
- info.platform_data = &si2168_config;
- request_module(info.type);
- client_demod = i2c_new_device(&d->i2c_adap, &info);
- if (!client_demod || !client_demod->dev.driver)
- return -ENODEV;
-
- if (!try_module_get(client_demod->dev.driver->owner)) {
- i2c_unregister_device(client_demod);
- return -ENODEV;
- }
-
- st->i2c_client_demod = client_demod;
-
- /* attach tuner */
- memset(&si2157_config, 0, sizeof(si2157_config));
- si2157_config.fe = adap->fe_adap[0].fe;
- si2157_config.if_port = 1;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strscpy(info.type, "si2157", I2C_NAME_SIZE);
- info.addr = 0x60;
- info.platform_data = &si2157_config;
- request_module(info.type);
- client_tuner = i2c_new_device(adapter, &info);
- if (!client_tuner || !client_tuner->dev.driver) {
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- return -ENODEV;
- }
- if (!try_module_get(client_tuner->dev.driver->owner)) {
- i2c_unregister_device(client_tuner);
- module_put(client_demod->dev.driver->owner);
- i2c_unregister_device(client_demod);
- return -ENODEV;
- }
-
- st->i2c_client_tuner = client_tuner;
-
- /* hook fe: need to resync the slave fifo when signal locks. */
- mutex_init(&st->stream_mutex);
- st->last_lock = 0;
- st->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
- adap->fe_adap[0].fe->ops.read_status = cxusb_read_status;
-
- return 0;
-}
-
/*
* DViCO has shipped two devices with the same USB ID, but only one of them
* needs a firmware download. Check the device class details to see if they
@@ -1633,7 +1532,6 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
static struct dvb_usb_device_properties cxusb_aver_a868r_properties;
static struct dvb_usb_device_properties cxusb_d680_dmb_properties;
static struct dvb_usb_device_properties cxusb_mygica_d689_properties;
-static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
static int cxusb_medion_priv_init(struct dvb_usb_device *dvbdev)
{
@@ -1759,8 +1657,6 @@ static int cxusb_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
!dvb_usb_device_init(intf, &cxusb_mygica_d689_properties,
THIS_MODULE, NULL, adapter_nr) ||
- !dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
- THIS_MODULE, NULL, adapter_nr) ||
0)
return 0;
@@ -1862,9 +1758,6 @@ static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
[MYGICA_D689] = {
USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689)
},
- [MYGICA_T230] = {
- USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230)
- },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, cxusb_table);
@@ -2535,60 +2428,6 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
}
};
-static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
- .caps = DVB_USB_IS_AN_I2C_ADAPTER,
-
- .usb_ctrl = CYPRESS_FX2,
-
- .size_of_priv = sizeof(struct cxusb_state),
-
- .num_adapters = 1,
- .adapter = {
- {
- .num_frontends = 1,
- .fe = {{
- .streaming_ctrl = cxusb_streaming_ctrl,
- .frontend_attach = cxusb_mygica_t230_frontend_attach,
-
- /* parameter for the MPEG2-data transfer */
- .stream = {
- .type = USB_BULK,
- .count = 5,
- .endpoint = 0x02,
- .u = {
- .bulk = {
- .buffersize = 8192,
- }
- }
- },
- } },
- },
- },
-
- .power_ctrl = cxusb_d680_dmb_power_ctrl,
-
- .i2c_algo = &cxusb_i2c_algo,
-
- .generic_bulk_ctrl_endpoint = 0x01,
-
- .rc.core = {
- .rc_interval = 100,
- .rc_codes = RC_MAP_D680_DMB,
- .module_name = KBUILD_MODNAME,
- .rc_query = cxusb_d680_dmb_rc_query,
- .allowed_protos = RC_PROTO_BIT_UNKNOWN,
- },
-
- .num_device_descs = 1,
- .devices = {
- {
- "Mygica T230 DVB-T/T2/C",
- { NULL },
- { &cxusb_table[MYGICA_T230], NULL },
- },
- }
-};
-
static struct usb_driver cxusb_driver = {
.name = "dvb_usb_cxusb",
.probe = cxusb_probe,
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 66d685065e06..ab7a100ec84f 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -2439,9 +2439,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
8, 0x0486,
};
+ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+ return -ENODEV;
if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
return -ENODEV;
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (!i2c)
+ return -ENODEV;
if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
return -ENODEV;
dib0700_set_i2c_speed(adap->dev, 1500);
@@ -2517,10 +2521,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
0, 0x00ef,
8, 0x0406,
};
+ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+ return -ENODEV;
i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
return -ENODEV;
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (!i2c)
+ return -ENODEV;
if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
return -ENODEV;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index d6b36e4f33d2..441d878fc22c 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -909,14 +909,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
&a->dev->i2c_adap);
if (!a->fe_adap[0].fe)
return -ENODEV;
-
- /*
- * dvb_frontend will call dvb_detach for both stb0899_detach
- * and stb0899_release but we only do dvb_attach(stb0899_attach).
- * Increment the module refcount instead.
- */
- symbol_get(stb0899_attach);
-
if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
&a->dev->i2c_adap)) == NULL)
err("Cannot attach lnbp22\n");
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index c659e18b358b..676d233d46d5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
{
struct technisat_usb2_state *state = d->priv;
- u8 *buf = state->buf;
- u8 *b;
- int ret;
struct ir_raw_event ev;
+ u8 *buf = state->buf;
+ int i, ret;
buf[0] = GET_IR_DATA_VENDOR_REQUEST;
buf[1] = 0x08;
@@ -647,26 +646,25 @@ unlock:
return 0; /* no key pressed */
/* decoding */
- b = buf+1;
#if 0
deb_rc("RC: %d ", ret);
- debug_dump(b, ret, deb_rc);
+ debug_dump(buf + 1, ret, deb_rc);
#endif
ev.pulse = 0;
- while (1) {
- ev.pulse = !ev.pulse;
- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
- ir_raw_event_store(d->rc_dev, &ev);
-
- b++;
- if (*b == 0xff) {
+ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
+ if (buf[i] == 0xff) {
ev.pulse = 0;
ev.duration = 888888*2;
ir_raw_event_store(d->rc_dev, &ev);
break;
}
+
+ ev.pulse = !ev.pulse;
+ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+ FIRMWARE_CLOCK_TICK) / 1000;
+ ir_raw_event_store(d->rc_dev, &ev);
}
ir_raw_event_handle(d->rc_dev);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 1283c7ca9ad5..5983e72a0622 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3566,13 +3566,12 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
static int em28xx_duplicate_dev(struct em28xx *dev)
{
int nr;
- struct em28xx *sec_dev = kzalloc(sizeof(*sec_dev), GFP_KERNEL);
+ struct em28xx *sec_dev = kmemdup(dev, sizeof(*sec_dev), GFP_KERNEL);
if (!sec_dev) {
dev->dev_next = NULL;
return -ENOMEM;
}
- memcpy(sec_dev, dev, sizeof(*sec_dev));
/* Check to see next free device and mark as used */
do {
nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
@@ -4020,7 +4019,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
dev->dev_next->disconnected = 1;
dev_info(&dev->intf->dev, "Disconnecting %s\n",
dev->dev_next->name);
- flush_request_modules(dev->dev_next);
}
dev->disconnected = 1;
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 2b8c84a5c9a8..e6088b5d1b80 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -931,7 +931,7 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
usb_bufs->buf = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
if (!usb_bufs->buf) {
- kfree(usb_bufs->buf);
+ kfree(usb_bufs->urb);
return -ENOMEM;
}
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 0512e1959394..b0f7390e4b4f 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -102,37 +102,30 @@ MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
/* supported video standards */
static struct em28xx_fmt format[] = {
{
- .name = "16 bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.reg = EM28XX_OUTFMT_YUV422_Y0UY1V,
}, {
- .name = "16 bpp RGB 565, LE",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.reg = EM28XX_OUTFMT_RGB_16_656,
}, {
- .name = "8 bpp Bayer RGRG..GBGB",
.fourcc = V4L2_PIX_FMT_SRGGB8,
.depth = 8,
.reg = EM28XX_OUTFMT_RGB_8_RGRG,
}, {
- .name = "8 bpp Bayer BGBG..GRGR",
.fourcc = V4L2_PIX_FMT_SBGGR8,
.depth = 8,
.reg = EM28XX_OUTFMT_RGB_8_BGBG,
}, {
- .name = "8 bpp Bayer GRGR..BGBG",
.fourcc = V4L2_PIX_FMT_SGRBG8,
.depth = 8,
.reg = EM28XX_OUTFMT_RGB_8_GRGR,
}, {
- .name = "8 bpp Bayer GBGB..RGRG",
.fourcc = V4L2_PIX_FMT_SGBRG8,
.depth = 8,
.reg = EM28XX_OUTFMT_RGB_8_GBGB,
}, {
- .name = "12 bpp YUV411",
.fourcc = V4L2_PIX_FMT_YUV411P,
.depth = 12,
.reg = EM28XX_OUTFMT_YUV411,
@@ -1517,7 +1510,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
else
f->fmt.pix.field = v4l2->interlaced_fieldmode ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
- f->fmt.pix.priv = 0;
return 0;
}
@@ -2011,7 +2003,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (unlikely(f->index >= ARRAY_SIZE(format)))
return -EINVAL;
- strscpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
@@ -2208,7 +2199,7 @@ static int em28xx_v4l2_open(struct file *filp)
/*
* em28xx_v4l2_fini()
* unregisters the v4l2,i2c and usb devices
- * called when the device gets disconected or at module unload
+ * called when the device gets disconnected or at module unload
*/
static int em28xx_v4l2_fini(struct em28xx *dev)
{
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index a551072e62ed..c8bc59059a19 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -251,13 +251,11 @@ struct em28xx_usb_ctl {
/**
* struct em28xx_fmt - Struct to enumberate video formats
*
- * @name: Name for the video standard
* @fourcc: v4l2 format id
* @depth: mean number of bits to represent a pixel
* @reg: em28xx register value to set it
*/
struct em28xx_fmt {
- char *name;
u32 fourcc;
int depth;
int reg;
@@ -657,7 +655,7 @@ struct em28xx {
enum em28xx_chip_id chip_id;
unsigned int is_em25xx:1; // em25xx/em276x/7x/8x family bridge
- unsigned int disconnected:1; // device has been diconnected
+ unsigned int disconnected:1; // device has been disconnected
unsigned int has_video:1;
unsigned int is_audio_only:1;
unsigned int is_webcam:1;
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 88edfef80b40..0b3d185f3cb0 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -285,33 +285,22 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- char *desc = NULL;
-
switch (fmt->index) {
case 0:
fmt->pixelformat = V4L2_PIX_FMT_MJPEG;
- desc = "Motion JPEG";
break;
case 1:
fmt->pixelformat = V4L2_PIX_FMT_MPEG1;
- desc = "MPEG-1 ES";
break;
case 2:
fmt->pixelformat = V4L2_PIX_FMT_MPEG2;
- desc = "MPEG-2 ES";
break;
case 3:
fmt->pixelformat = V4L2_PIX_FMT_MPEG4;
- desc = "MPEG-4 ES";
break;
default:
return -EINVAL;
}
- fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fmt->flags = V4L2_FMT_FLAG_COMPRESSED;
-
- strscpy(fmt->description, desc, sizeof(fmt->description));
-
return 0;
}
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 179d4d642dae..49e75a1a1f3f 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -505,9 +505,9 @@ static int s2250_probe(struct i2c_client *client,
struct go7007 *go = i2c_get_adapdata(adapter);
struct go7007_usb *usb = go->hpi_context;
- audio = i2c_new_dummy(adapter, TLV320_ADDRESS >> 1);
- if (audio == NULL)
- return -ENOMEM;
+ audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1);
+ if (IS_ERR(audio))
+ return PTR_ERR(audio);
state = kzalloc(sizeof(struct s2250), GFP_KERNEL);
if (state == NULL) {
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index 4a449c62fc32..b05fa227ffb2 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -253,7 +253,7 @@ int go7007_snd_init(struct go7007 *go)
return ret;
}
strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
- strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver));
+ strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname));
strscpy(gosnd->card->longname, gosnd->card->shortname,
sizeof(gosnd->card->longname));
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index be11f7830bca..4add2b12d330 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1024,27 +1024,18 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL; /* no more format */
fmtdesc->pixelformat = fmt_tb[index];
- if (gspca_dev->cam.cam_mode[i].sizeimage <
- gspca_dev->cam.cam_mode[i].width *
- gspca_dev->cam.cam_mode[i].height)
- fmtdesc->flags = V4L2_FMT_FLAG_COMPRESSED;
- fmtdesc->description[0] = fmtdesc->pixelformat & 0xff;
- fmtdesc->description[1] = (fmtdesc->pixelformat >> 8) & 0xff;
- fmtdesc->description[2] = (fmtdesc->pixelformat >> 16) & 0xff;
- fmtdesc->description[3] = fmtdesc->pixelformat >> 24;
- fmtdesc->description[4] = '\0';
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
+static int vidioc_g_fmt_vid_cap(struct file *file, void *_priv,
+ struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
+ u32 priv = fmt->fmt.pix.priv;
fmt->fmt.pix = gspca_dev->pixfmt;
- /* some drivers use priv internally, zero it before giving it back to
- the core */
- fmt->fmt.pix.priv = 0;
+ /* some drivers use priv internally, so keep the original value */
+ fmt->fmt.pix.priv = priv;
return 0;
}
@@ -1079,27 +1070,27 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
fmt->fmt.pix.height = h;
gspca_dev->sd_desc->try_fmt(gspca_dev, fmt);
}
- /* some drivers use priv internally, zero it before giving it back to
- the core */
- fmt->fmt.pix.priv = 0;
return mode; /* used when s_fmt */
}
-static int vidioc_try_fmt_vid_cap(struct file *file,
- void *priv,
- struct v4l2_format *fmt)
+static int vidioc_try_fmt_vid_cap(struct file *file, void *_priv,
+ struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
+ u32 priv = fmt->fmt.pix.priv;
if (try_fmt_vid_cap(gspca_dev, fmt) < 0)
return -EINVAL;
+ /* some drivers use priv internally, so keep the original value */
+ fmt->fmt.pix.priv = priv;
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
+static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv,
+ struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
+ u32 priv = fmt->fmt.pix.priv;
int mode;
if (vb2_is_busy(&gspca_dev->queue))
@@ -1115,6 +1106,8 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
gspca_dev->pixfmt = fmt->fmt.pix;
else
gspca_dev->pixfmt = gspca_dev->cam.cam_mode[mode];
+ /* some drivers use priv internally, so keep the original value */
+ fmt->fmt.pix.priv = priv;
return 0;
}
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index d8e40137a204..53db9a2895ea 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -114,6 +114,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, 2);
}
}
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 59649704beba..880f569bda30 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -1572,6 +1572,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
return;
}
if (len == 1)
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index cfb1f53bc17e..f417dfc0b872 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2073,6 +2073,11 @@ static int reg_r(struct sd *sd, u16 index)
} else {
gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
return ret;
@@ -2101,6 +2106,11 @@ static int reg_r8(struct sd *sd,
} else {
gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, 8);
}
return ret;
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 56521c991db4..185c1f10fb30 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -693,6 +693,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
if (ret < 0) {
pr_err("read failed %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
return gspca_dev->usb_buf[0];
}
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index 867f860a9650..91efc650cf76 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -1145,6 +1145,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ return 0;
}
return gspca_dev->usb_buf[0];
}
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 061deee138c3..e087cfb5980b 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -101,6 +101,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
pr_err("read req failed req %#04x error %d\n",
req, err);
gspca_dev->usb_err = err;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
}
}
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index b43f89fee6c1..2a6d0a1265a7 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -124,6 +124,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
}
},
{
+ .ident = "MSI MS-1039",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
+ }
+ },
+ {
.ident = "MSI MS-1632",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
@@ -909,6 +916,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
if (unlikely(result < 0 || result != length)) {
pr_err("Read register %02x failed %d\n", reg, result);
gspca_dev->usb_err = result;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 046fc2c2a135..4d655e2da9cb 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -453,6 +453,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
dev_err(gspca_dev->v4l2_dev.dev,
"Error reading register %02x: %d\n", value, res);
gspca_dev->usb_err = res;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
}
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 50a6c8425827..2e1bd2df8304 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1162,6 +1162,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index 2ae03b60163f..ccc477944ef8 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -71,6 +71,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index d1ba0888d798..c3610247a90e 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -425,6 +425,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r %04x failed %d\n", value, ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index d0ddfa957ca9..f4a4222f0d2e 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -255,6 +255,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
index 588a847ea483..4cb7c92ea132 100644
--- a/drivers/media/usb/gspca/vc032x.c
+++ b/drivers/media/usb/gspca/vc032x.c
@@ -2906,6 +2906,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
static void reg_r(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index 16b679c2de21..a8350ee9712f 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -133,6 +133,11 @@ static int w9968cf_read_sb(struct sd *sd)
} else {
pr_err("Read SB reg [01] failed\n");
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(sd->gspca_dev.usb_buf, 0, 2);
}
udelay(W9968CF_I2C_BUS_DELAY);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 9b9d894d29bc..b75c18a012a7 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -137,6 +137,7 @@ static int device_authorization(struct hdpvr_device *dev)
dev->fw_ver = dev->usbc_buf[1];
+ dev->usbc_buf[46] = '\0';
v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
dev->fw_ver, &dev->usbc_buf[2]);
@@ -271,6 +272,7 @@ static int hdpvr_probe(struct usb_interface *interface,
#endif
size_t buffer_size;
int i;
+ int dev_num;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
@@ -368,8 +370,17 @@ static int hdpvr_probe(struct usb_interface *interface,
}
#endif
+ dev_num = atomic_inc_return(&dev_nr);
+ if (dev_num >= HDPVR_MAX) {
+ v4l2_err(&dev->v4l2_dev,
+ "max device number reached, device register failed\n");
+ atomic_dec(&dev_nr);
+ retval = -ENODEV;
+ goto reg_fail;
+ }
+
retval = hdpvr_register_videodev(dev, &interface->dev,
- video_nr[atomic_inc_return(&dev_nr)]);
+ video_nr[dev_num]);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
goto reg_fail;
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index bc5975b17c0c..785c8508a46e 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -193,8 +193,6 @@ static int hdpvr_activate_ir(struct hdpvr_device *dev)
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
{
- int retval = -ENOMEM;
-
hdpvr_activate_ir(dev);
dev->i2c_adapter = hdpvr_i2c_adapter_template;
@@ -202,9 +200,7 @@ int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
i2c_set_adapdata(&dev->i2c_adapter, dev);
- retval = i2c_add_adapter(&dev->i2c_adapter);
-
- return retval;
+ return i2c_add_adapter(&dev->i2c_adapter);
}
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 5b3e67b80627..bad71d863d39 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -987,9 +987,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
if (f->index != 0)
return -EINVAL;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- strscpy(f->description, "MPEG2-TS with AVC/AAC streams",
- sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 4c9b2a12acfb..65be6f140fe8 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -66,7 +66,6 @@ static const struct v4l2_frequency_band bands[] = {
/* stream formats */
struct msi2500_format {
- char *name;
u32 pixelformat;
u32 buffersize;
};
@@ -74,27 +73,21 @@ struct msi2500_format {
/* format descriptions for capture and preview */
static struct msi2500_format formats[] = {
{
- .name = "Complex S8",
.pixelformat = V4L2_SDR_FMT_CS8,
.buffersize = 3 * 1008,
#if 0
}, {
- .name = "10+2-bit signed",
.pixelformat = MSI2500_PIX_FMT_SDR_MSI2500_384,
}, {
- .name = "12-bit signed",
.pixelformat = MSI2500_PIX_FMT_SDR_S12,
#endif
}, {
- .name = "Complex S14LE",
.pixelformat = V4L2_SDR_FMT_CS14LE,
.buffersize = 3 * 1008,
}, {
- .name = "Complex U8 (emulated)",
.pixelformat = V4L2_SDR_FMT_CU8,
.buffersize = 3 * 1008,
}, {
- .name = "Complex U16LE (emulated)",
.pixelformat = V4L2_SDR_FMT_CU16LE,
.buffersize = 3 * 1008,
},
@@ -904,7 +897,6 @@ static int msi2500_enum_fmt_sdr_cap(struct file *file, void *priv,
if (f->index >= dev->num_formats)
return -EINVAL;
- strscpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index 79f0e0c6df37..8e81af537901 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -39,7 +39,7 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
int ret;
int mode16 = 0;
unsigned pcnt,tcnt;
- eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
+ eeprom = kzalloc(EEPROM_SIZE, GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Failed to allocate memory required to read eeprom");
@@ -74,7 +74,6 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
(1) we're only fetching part of the eeprom, and (2) if we were
getting the whole thing our I2C driver can't grab it in one
pass - which is what tveeprom is otherwise going to attempt */
- memset(eeprom,0,EEPROM_SIZE);
for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
pcnt = 16;
if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 6fe8b9af858a..1cfb7cf64131 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -660,7 +660,7 @@ static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
{
if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
return 0;
- return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
+ return ((1UL << v) & cptr->hdw->input_allowed_mask) != 0;
}
static int ctrl_set_input(struct pvr2_ctrl *cptr,int m,int v)
@@ -784,7 +784,7 @@ static int ctrl_cx2341x_set(struct pvr2_ctrl *cptr,int m,int v)
static unsigned int ctrl_cx2341x_getv4lflags(struct pvr2_ctrl *cptr)
{
- struct v4l2_queryctrl qctrl;
+ struct v4l2_queryctrl qctrl = {};
struct pvr2_ctl_info *info;
qctrl.id = cptr->info->v4l_id;
cx2341x_ctrl_query(&cptr->hdw->enc_ctl_state,&qctrl);
@@ -2445,7 +2445,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
/* Ensure that default input choice is a valid one. */
m = hdw->input_avail_mask;
if (m) for (idx = 0; idx < (sizeof(m) << 3); idx++) {
- if (!((1 << idx) & m)) continue;
+ if (!((1UL << idx) & m)) continue;
hdw->input_val = idx;
break;
}
@@ -2501,11 +2501,11 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
// Initialize control data regarding video standard masks
valid_std_mask = pvr2_std_get_usable();
for (idx = 0; idx < 32; idx++) {
- if (!(valid_std_mask & (1 << idx))) continue;
+ if (!(valid_std_mask & (1UL << idx))) continue;
cnt1 = pvr2_std_id_to_str(
hdw->std_mask_names[idx],
sizeof(hdw->std_mask_names[idx])-1,
- 1 << idx);
+ 1UL << idx);
hdw->std_mask_names[idx][cnt1] = 0;
}
cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDAVAIL);
@@ -3329,7 +3329,7 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
int ret;
int mode16 = 0;
unsigned pcnt,tcnt;
- eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
+ eeprom = kzalloc(EEPROM_SIZE, GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Failed to allocate memory required to read eeprom");
@@ -3364,7 +3364,6 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
(1) we're only fetching part of the eeprom, and (2) if we were
getting the whole thing our I2C driver can't grab it in one
pass - which is what tveeprom is otherwise going to attempt */
- memset(eeprom,0,EEPROM_SIZE);
for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
pcnt = 16;
if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
@@ -4673,7 +4672,7 @@ static unsigned int print_input_mask(unsigned int msk,
unsigned int idx,ccnt;
unsigned int tcnt = 0;
for (idx = 0; idx < ARRAY_SIZE(control_values_input); idx++) {
- if (!((1 << idx) & msk)) continue;
+ if (!((1UL << idx) & msk)) continue;
ccnt = scnprintf(buf+tcnt,
acnt-tcnt,
"%s%s",
@@ -5100,7 +5099,7 @@ int pvr2_hdw_set_input_allowed(struct pvr2_hdw *hdw,
break;
}
hdw->input_allowed_mask = nv;
- if ((1 << hdw->input_val) & hdw->input_allowed_mask) {
+ if ((1UL << hdw->input_val) & hdw->input_allowed_mask) {
/* Current mode is still in the allowed mask, so
we're done. */
break;
@@ -5113,7 +5112,7 @@ int pvr2_hdw_set_input_allowed(struct pvr2_hdw *hdw,
}
m = hdw->input_allowed_mask;
for (idx = 0; idx < (sizeof(m) << 3); idx++) {
- if (!((1 << idx) & m)) continue;
+ if (!((1UL << idx) & m)) continue;
pvr2_hdw_set_input(hdw,idx);
break;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 0aff2f396392..a34717eba409 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1003,7 +1003,7 @@ static int pvr2_v4l2_open(struct file *file)
input_mask &= pvr2_hdw_get_input_available(hdw);
input_cnt = 0;
for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) {
- if (input_mask & (1 << idx)) input_cnt++;
+ if (input_mask & (1UL << idx)) input_cnt++;
}
fhp->input_cnt = input_cnt;
fhp->input_map = kzalloc(input_cnt,GFP_KERNEL);
@@ -1018,7 +1018,7 @@ static int pvr2_v4l2_open(struct file *file)
}
input_cnt = 0;
for (idx = 0; idx < (sizeof(input_mask) << 3); idx++) {
- if (!(input_mask & (1 << idx))) continue;
+ if (!(input_mask & (1UL << idx))) continue;
fhp->input_map[input_cnt++] = idx;
}
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 76c498cccc49..2f135d533af6 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -873,14 +873,9 @@ static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc
case 0:
/* RAW format */
f->pixelformat = pdev->type <= 646 ? V4L2_PIX_FMT_PWC1 : V4L2_PIX_FMT_PWC2;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- strscpy(f->description, "Raw Philips Webcam",
- sizeof(f->description));
break;
case 1:
f->pixelformat = V4L2_PIX_FMT_YUV420;
- strscpy(f->description, "4:2:0, planar, Y-Cb-Cr",
- sizeof(f->description));
break;
default:
return -EINVAL;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index aa90558479f7..329ec8089592 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -273,7 +273,6 @@ static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
}
struct s2255_fmt {
- char *name;
u32 fourcc;
int depth;
};
@@ -385,29 +384,23 @@ MODULE_DEVICE_TABLE(usb, s2255_table);
/* JPEG formats must be defined last to support jpeg_enable parameter */
static const struct s2255_fmt formats[] = {
{
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16
}, {
- .name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16
}, {
- .name = "4:2:2, planar, YUV422P",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16
}, {
- .name = "8bpp GREY",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8
}, {
- .name = "JPG",
.fourcc = V4L2_PIX_FMT_JPEG,
.depth = 24
}, {
- .name = "MJPG",
.fourcc = V4L2_PIX_FMT_MJPEG,
.depth = 24
}
@@ -737,7 +730,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[index].fourcc == V4L2_PIX_FMT_MJPEG)))
return -EINVAL;
- strscpy(f->description, formats[index].name, sizeof(f->description));
f->pixelformat = formats[index].fourcc;
return 0;
}
@@ -759,7 +751,6 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline = f->fmt.pix.width * (vc->fmt->depth >> 3);
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
return 0;
}
@@ -811,7 +802,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
dprintk(vc->dev, 50, "%s: set width %d height %d field %d\n", __func__,
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index b71a0f4b40b5..bcd14c66e8df 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -46,7 +46,6 @@ struct stk1160_decimate_ctrl {
/* supported video standards */
static struct stk1160_fmt format[] = {
{
- .name = "16 bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
}
@@ -346,7 +345,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index != 0)
return -EINVAL;
- strscpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
}
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 099ce2a2f021..a31ea1c80f25 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -102,7 +102,6 @@ struct stk1160_isoc_ctl {
};
struct stk1160_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int depth;
};
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index be8041e3e6b8..cfca3c70599b 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -857,23 +857,18 @@ static int stk_vidioc_enum_fmt_vid_cap(struct file *filp,
switch (fmtd->index) {
case 0:
fmtd->pixelformat = V4L2_PIX_FMT_RGB565;
- strscpy(fmtd->description, "r5g6b5", sizeof(fmtd->description));
break;
case 1:
fmtd->pixelformat = V4L2_PIX_FMT_RGB565X;
- strscpy(fmtd->description, "r5g6b5BE", sizeof(fmtd->description));
break;
case 2:
fmtd->pixelformat = V4L2_PIX_FMT_UYVY;
- strscpy(fmtd->description, "yuv4:2:2", sizeof(fmtd->description));
break;
case 3:
fmtd->pixelformat = V4L2_PIX_FMT_SBGGR8;
- strscpy(fmtd->description, "Raw bayer", sizeof(fmtd->description));
break;
case 4:
fmtd->pixelformat = V4L2_PIX_FMT_YUYV;
- strscpy(fmtd->description, "yuv4:2:2", sizeof(fmtd->description));
break;
default:
return -EINVAL;
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 23df50aa0a4a..5358cd8c4603 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -1328,7 +1328,7 @@ put_device:
/*
* tm6000_usb_disconnect()
- * called when the device gets diconencted
+ * called when the device gets disconnected
* video device will be unregistered on v4l2_close in case it is still open
*/
static void tm6000_usb_disconnect(struct usb_interface *interface)
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index e4d2dcd5cc0f..19c90fa9e443 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb)
printk(KERN_ERR "tm6000: error %s\n", __func__);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
+ dev->dvb->bulk_urb = NULL;
}
}
}
@@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
if (!dvb->bulk_urb->transfer_buffer) {
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return -ENOMEM;
}
@@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
kfree(dvb->bulk_urb->transfer_buffer);
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return ret;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 85fcddfb0202..c07a81a6cbe2 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -52,15 +52,12 @@ EXPORT_SYMBOL_GPL(tm6000_debug);
static struct tm6000_fmt format[] = {
{
- .name = "4:2:2, packed, YVY2",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
}, {
- .name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
}, {
- .name = "A/V + VBI mux packet",
.fourcc = V4L2_PIX_FMT_TM6000,
.depth = 16,
}
@@ -875,7 +872,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index >= ARRAY_SIZE(format))
return -EINVAL;
- strscpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
}
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index 0864ed7314eb..bf396544da9a 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -64,7 +64,6 @@ struct tm6000_input {
*/
struct tm6000_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int depth;
};
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 1d0afa340f47..3198f9624b7c 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -319,7 +319,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
dprintk("%s\n", __func__);
- b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+ b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
if (!b)
return -ENOMEM;
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 51f784479e91..3d9284a09ee5 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -633,8 +633,6 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
if (f->index > 0)
return -EINVAL;
- strscpy(f->description, "16 bpp YUY2, 4:2:2, packed",
- sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_YUYV;
return 0;
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 93750af82d98..cdc66adda755 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -87,14 +87,14 @@
static int usbvision_nr;
static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
- { 1, 1, 8, V4L2_PIX_FMT_GREY , "GREY" },
- { 1, 2, 16, V4L2_PIX_FMT_RGB565 , "RGB565" },
- { 1, 3, 24, V4L2_PIX_FMT_RGB24 , "RGB24" },
- { 1, 4, 32, V4L2_PIX_FMT_RGB32 , "RGB32" },
- { 1, 2, 16, V4L2_PIX_FMT_RGB555 , "RGB555" },
- { 1, 2, 16, V4L2_PIX_FMT_YUYV , "YUV422" },
- { 1, 2, 12, V4L2_PIX_FMT_YVU420 , "YUV420P" }, /* 1.5 ! */
- { 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" }
+ { 1, 1, 8, V4L2_PIX_FMT_GREY },
+ { 1, 2, 16, V4L2_PIX_FMT_RGB565 },
+ { 1, 3, 24, V4L2_PIX_FMT_RGB24 },
+ { 1, 4, 32, V4L2_PIX_FMT_RGB32 },
+ { 1, 2, 16, V4L2_PIX_FMT_RGB555 },
+ { 1, 2, 16, V4L2_PIX_FMT_YUYV },
+ { 1, 2, 12, V4L2_PIX_FMT_YVU420 }, /* 1.5 ! */
+ { 1, 2, 16, V4L2_PIX_FMT_YUV422P }
};
/* Function prototypes */
@@ -796,8 +796,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
{
if (vfd->index >= USBVISION_SUPPORTED_PALETTES - 1)
return -EINVAL;
- strscpy(vfd->description, usbvision_v4l2_format[vfd->index].desc,
- sizeof(vfd->description));
vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
return 0;
}
@@ -967,7 +965,6 @@ static ssize_t usbvision_read(struct file *file, char __user *buf,
__func__,
(unsigned long)count, frame->bytes_read);
-#if 1
/*
* FIXME:
* For now, forget the frame if it has not been read in one shot.
@@ -976,15 +973,6 @@ static ssize_t usbvision_read(struct file *file, char __user *buf,
/* Mark it as available to be used again. */
frame->grabstate = frame_state_unused;
-#else
- if (frame->bytes_read >= frame->scanlength) {
- /* All data has been read */
- frame->bytes_read = 0;
-
- /* Mark it as available to be used again. */
- frame->grabstate = frame_state_unused;
- }
-#endif
return count;
}
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 4198f972a47b..11539578e8d2 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -264,7 +264,6 @@ struct usbvision_v4l2_format_st {
int bytes_per_pixel;
int depth;
int format;
- char *desc;
};
#define USBVISION_SUPPORTED_PALETTES ARRAY_SIZE(usbvision_v4l2_format)
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 203329cadbc4..0335e69b70ab 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -253,7 +253,6 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
fmt->fmt.pix.colorspace = format->colorspace;
- fmt->fmt.pix.priv = 0;
if (uvc_format != NULL)
*uvc_format = format;
@@ -290,7 +289,6 @@ static int uvc_v4l2_get_format(struct uvc_streaming *stream,
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = stream->ctrl.dwMaxVideoFrameSize;
fmt->fmt.pix.colorspace = format->colorspace;
- fmt->fmt.pix.priv = 0;
done:
mutex_unlock(&stream->mutex);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index a9bcba4fa9c6..637962825d7a 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -141,7 +141,6 @@ struct zr364xx_pipeinfo {
};
struct zr364xx_fmt {
- char *name;
u32 fourcc;
int depth;
};
@@ -149,7 +148,6 @@ struct zr364xx_fmt {
/* image formats. */
static const struct zr364xx_fmt formats[] = {
{
- .name = "JPG",
.fourcc = V4L2_PIX_FMT_JPEG,
.depth = 24
}
@@ -199,12 +197,10 @@ static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
{
int status;
- unsigned char *transfer_buffer = kmalloc(size, GFP_KERNEL);
+ unsigned char *transfer_buffer = kmemdup(cp, size, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
- memcpy(transfer_buffer, cp, size);
-
status = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
@@ -376,8 +372,7 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
vb);
int rc;
- DBG("%s, field=%d, fmt name = %s\n", __func__, field,
- cam->fmt ? cam->fmt->name : "");
+ DBG("%s, field=%d\n", __func__, field);
if (!cam->fmt)
return -EINVAL;
@@ -751,8 +746,6 @@ static int zr364xx_vidioc_enum_fmt_vid_cap(struct file *file,
{
if (f->index > 0)
return -EINVAL;
- f->flags = V4L2_FMT_FLAG_COMPRESSED;
- strscpy(f->description, formats[0].name, sizeof(f->description));
f->pixelformat = formats[0].fourcc;
return 0;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 7c5f62f196e5..39e3fb30ba0b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -11,6 +11,11 @@ config VIDEO_V4L2
select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
default (I2C || I2C=n) && VIDEO_DEV
+config VIDEO_V4L2_I2C
+ bool
+ depends on I2C && VIDEO_V4L2
+ default y
+
config VIDEO_ADV_DEBUG
bool "Enable advanced debug functionality on V4L2 drivers"
help
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 9ee57e1efefe..786bd1ec4d1b 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -7,18 +7,15 @@ tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
- v4l2-async.o
-ifeq ($(CONFIG_COMPAT),y)
- videodev-objs += v4l2-compat-ioctl32.o
-endif
-obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
-ifeq ($(CONFIG_TRACEPOINTS),y)
- videodev-objs += v4l2-trace.o
-endif
+ v4l2-async.o v4l2-common.o
+videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
+videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
+videodev-$(CONFIG_SPI) += v4l2-spi.o
+videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
-obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 8d307b538f52..8bde33c21ce4 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -534,7 +534,7 @@ static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd, *tmp;
- if (!notifier)
+ if (!notifier || !notifier->asd_list.next)
return;
list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
@@ -593,10 +593,11 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
return ERR_PTR(-ENOMEM);
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- asd->match.fwnode = fwnode;
+ asd->match.fwnode = fwnode_handle_get(fwnode);
ret = v4l2_async_notifier_add_subdev(notifier, asd);
if (ret) {
+ fwnode_handle_put(fwnode);
kfree(asd);
return ERR_PTR(ret);
}
@@ -605,6 +606,29 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
}
EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
+int
+v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ struct v4l2_async_subdev *asd)
+{
+ struct fwnode_handle *remote;
+ int ret;
+
+ remote = fwnode_graph_get_remote_port_parent(endpoint);
+ if (!remote)
+ return -ENOTCONN;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode = remote;
+
+ ret = v4l2_async_notifier_add_subdev(notif, asd);
+ if (ret)
+ fwnode_handle_put(remote);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
+
struct v4l2_async_subdev *
v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
int adapter_id, unsigned short address,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f8ad1c580a3e..62f7aa92ac29 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -40,10 +40,6 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/i2c.h>
-#if defined(CONFIG_SPI)
-#include <linux/spi/spi.h>
-#endif
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -54,10 +50,6 @@
#include <linux/videodev2.h>
-MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr");
-MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers");
-MODULE_LICENSE("GPL");
-
/*
*
* V 4 L 2 D R I V E R H E L P E R A P I
@@ -95,212 +87,6 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _
}
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
-/* I2C Helper functions */
-
-#if IS_ENABLED(CONFIG_I2C)
-
-void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client,
- const char *devname, const char *postfix)
-{
- if (!devname)
- devname = client->dev.driver->name;
- if (!postfix)
- postfix = "";
-
- snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
- i2c_adapter_id(client->adapter), client->addr);
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
-
-void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
- const struct v4l2_subdev_ops *ops)
-{
- v4l2_subdev_init(sd, ops);
- sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
- /* the owner is the same as the i2c_client's driver owner */
- sd->owner = client->dev.driver->owner;
- sd->dev = &client->dev;
- /* i2c_client and v4l2_subdev point to one another */
- v4l2_set_subdevdata(sd, client);
- i2c_set_clientdata(client, sd);
- v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
-
-/* Load an i2c sub-device. */
-struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, struct i2c_board_info *info,
- const unsigned short *probe_addrs)
-{
- struct v4l2_subdev *sd = NULL;
- struct i2c_client *client;
-
- BUG_ON(!v4l2_dev);
-
- request_module(I2C_MODULE_PREFIX "%s", info->type);
-
- /* Create the i2c client */
- if (info->addr == 0 && probe_addrs)
- client = i2c_new_probed_device(adapter, info, probe_addrs,
- NULL);
- else
- client = i2c_new_device(adapter, info);
-
- /* Note: by loading the module first we are certain that c->driver
- will be set if the driver was found. If the module was not loaded
- first, then the i2c core tries to delay-load the module for us,
- and then c->driver is still NULL until the module is finally
- loaded. This delay-load mechanism doesn't work if other drivers
- want to use the i2c device, so explicitly loading the module
- is the best alternative. */
- if (client == NULL || client->dev.driver == NULL)
- goto error;
-
- /* Lock the module so we can safely get the v4l2_subdev pointer */
- if (!try_module_get(client->dev.driver->owner))
- goto error;
- sd = i2c_get_clientdata(client);
-
- /* Register with the v4l2_device which increases the module's
- use count as well. */
- if (v4l2_device_register_subdev(v4l2_dev, sd))
- sd = NULL;
- /* Decrease the module use count to match the first try_module_get. */
- module_put(client->dev.driver->owner);
-
-error:
- /* If we have a client but no subdev, then something went wrong and
- we must unregister the client. */
- if (client && sd == NULL)
- i2c_unregister_device(client);
- return sd;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
-
-struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
- struct i2c_adapter *adapter, const char *client_type,
- u8 addr, const unsigned short *probe_addrs)
-{
- struct i2c_board_info info;
-
- /* Setup the i2c board info with the device type and
- the device address. */
- memset(&info, 0, sizeof(info));
- strscpy(info.type, client_type, sizeof(info.type));
- info.addr = addr;
-
- return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
-
-/* Return i2c client address of v4l2_subdev. */
-unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return client ? client->addr : I2C_CLIENT_END;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
-
-/* Return a list of I2C tuner addresses to probe. Use only if the tuner
- addresses are unknown. */
-const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
-{
- static const unsigned short radio_addrs[] = {
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
- 0x10,
-#endif
- 0x60,
- I2C_CLIENT_END
- };
- static const unsigned short demod_addrs[] = {
- 0x42, 0x43, 0x4a, 0x4b,
- I2C_CLIENT_END
- };
- static const unsigned short tv_addrs[] = {
- 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
- 0x60, 0x61, 0x62, 0x63, 0x64,
- I2C_CLIENT_END
- };
-
- switch (type) {
- case ADDRS_RADIO:
- return radio_addrs;
- case ADDRS_DEMOD:
- return demod_addrs;
- case ADDRS_TV:
- return tv_addrs;
- case ADDRS_TV_WITH_DEMOD:
- return tv_addrs + 4;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
-
-#endif /* defined(CONFIG_I2C) */
-
-#if defined(CONFIG_SPI)
-
-/* Load an spi sub-device. */
-
-void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
- const struct v4l2_subdev_ops *ops)
-{
- v4l2_subdev_init(sd, ops);
- sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
- /* the owner is the same as the spi_device's driver owner */
- sd->owner = spi->dev.driver->owner;
- sd->dev = &spi->dev;
- /* spi_device and v4l2_subdev point to one another */
- v4l2_set_subdevdata(sd, spi);
- spi_set_drvdata(spi, sd);
- /* initialize name */
- snprintf(sd->name, sizeof(sd->name), "%s %s",
- spi->dev.driver->name, dev_name(&spi->dev));
-}
-EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
-
-struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
- struct spi_master *master, struct spi_board_info *info)
-{
- struct v4l2_subdev *sd = NULL;
- struct spi_device *spi = NULL;
-
- BUG_ON(!v4l2_dev);
-
- if (info->modalias[0])
- request_module(info->modalias);
-
- spi = spi_new_device(master, info);
-
- if (spi == NULL || spi->dev.driver == NULL)
- goto error;
-
- if (!try_module_get(spi->dev.driver->owner))
- goto error;
-
- sd = spi_get_drvdata(spi);
-
- /* Register with the v4l2_device which increases the module's
- use count as well. */
- if (v4l2_device_register_subdev(v4l2_dev, sd))
- sd = NULL;
-
- /* Decrease the module use count to match the first try_module_get. */
- module_put(spi->dev.driver->owner);
-
-error:
- /* If we have a client but no subdev, then something went wrong and
- we must unregister the client. */
- if (!sd)
- spi_unregister_device(spi);
-
- return sd;
-}
-EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
-
-#endif /* defined(CONFIG_SPI) */
-
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
@@ -455,11 +241,15 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 371537dd8cd3..1d8f38824631 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -6,6 +6,8 @@
*/
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -16,6 +18,12 @@
#include <media/v4l2-event.h>
#include <media/v4l2-dev.h>
+#define dprintk(vdev, fmt, arg...) do { \
+ if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
+ printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \
+ __func__, video_device_node_name(vdev), ##arg); \
+} while (0)
+
#define has_op(master, op) \
(master->ops && master->ops->op)
#define call_op(master, op) \
@@ -394,6 +402,16 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Explicit",
NULL,
};
+ static const char * const h264_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const h264_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
static const char * const mpeg_mpeg2_level[] = {
"Low",
"Main",
@@ -625,6 +643,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return h264_fp_arrangement_type;
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
return h264_fmo_map_type;
+ case V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE:
+ return h264_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_START_CODE:
+ return h264_start_code;
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
return mpeg_mpeg2_level;
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
@@ -844,6 +866,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
case V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS: return "H264 Slice Parameters";
case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS: return "H264 Decode Parameters";
+ case V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE: return "H264 Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_START_CODE: return "H264 Start Code";
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
@@ -885,6 +909,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER: return "VP8 Frame Header";
/* HEVC controls */
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
@@ -1211,6 +1236,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_START_CODE:
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
@@ -1345,6 +1372,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
+ *type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1629,10 +1659,105 @@ static void std_log(const struct v4l2_ctrl *ctrl)
})
/* Validate a new control */
+
+#define zero_padding(s) \
+ memset(&(s).padding, 0, sizeof((s).padding))
+
+/*
+ * Compound controls validation requires setting unused fields/flags to zero
+ * in order to properly detect unchanged controls with std_equal's memcmp.
+ */
+static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
+ struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ p_mpeg2_slice_params = p;
+
+ switch (p_mpeg2_slice_params->sequence.chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 2: /* 10 bits */
+ case 3: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_structure) {
+ case 1: /* interlaced top field */
+ case 2: /* interlaced bottom field */
+ case 3: /* progressive */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ break;
+
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SPS:
+ case V4L2_CTRL_TYPE_H264_PPS:
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ break;
+
+ case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
+ p_vp8_frame_header = p;
+
+ switch (p_vp8_frame_header->num_dct_parts) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_padding(p_vp8_frame_header->segment_header);
+ zero_padding(p_vp8_frame_header->lf_header);
+ zero_padding(p_vp8_frame_header->quant_header);
+ zero_padding(p_vp8_frame_header->entropy_header);
+ zero_padding(p_vp8_frame_header->coder_state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
- struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
size_t len;
u64 offset;
s64 val;
@@ -1695,63 +1820,8 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
return -ERANGE;
return 0;
- case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
- p_mpeg2_slice_params = ptr.p;
-
- switch (p_mpeg2_slice_params->sequence.chroma_format) {
- case 1: /* 4:2:0 */
- case 2: /* 4:2:2 */
- case 3: /* 4:4:4 */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
- case 0: /* 8 bits */
- case 1: /* 9 bits */
- case 2: /* 10 bits */
- case 3: /* 11 bits */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.picture_structure) {
- case 1: /* interlaced top field */
- case 2: /* interlaced bottom field */
- case 3: /* progressive */
- break;
- default:
- return -EINVAL;
- }
-
- switch (p_mpeg2_slice_params->picture.picture_coding_type) {
- case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
- case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-
- case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
- return 0;
-
- case V4L2_CTRL_TYPE_FWHT_PARAMS:
- return 0;
-
- case V4L2_CTRL_TYPE_H264_SPS:
- case V4L2_CTRL_TYPE_H264_PPS:
- case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
- case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
- case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
- return 0;
-
default:
- return -EINVAL;
+ return std_validate_compound(ctrl, idx, ptr);
}
}
@@ -2348,6 +2418,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
break;
+ case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
+ elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -3217,6 +3290,7 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
bool get)
{
struct v4l2_ctrl_helper *h;
@@ -3234,20 +3308,31 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (cs->which &&
cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
- V4L2_CTRL_ID2WHICH(id) != cs->which)
+ V4L2_CTRL_ID2WHICH(id) != cs->which) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
return -EINVAL;
+ }
/* Old-style private controls are not allowed for
extended controls */
- if (id >= V4L2_CID_PRIVATE_BASE)
+ if (id >= V4L2_CID_PRIVATE_BASE) {
+ dprintk(vdev,
+ "old-style private controls not allowed\n");
return -EINVAL;
+ }
ref = find_ref_lock(hdl, id);
- if (ref == NULL)
+ if (ref == NULL) {
+ dprintk(vdev, "cannot find control id 0x%x\n", id);
return -EINVAL;
+ }
h->ref = ref;
ctrl = ref->ctrl;
- if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
+ dprintk(vdev, "control id 0x%x is disabled\n", id);
return -EINVAL;
+ }
if (ctrl->cluster[0]->ncontrols > 1)
have_clusters = true;
@@ -3257,10 +3342,17 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
unsigned tot_size = ctrl->elems * ctrl->elem_size;
if (c->size < tot_size) {
+ /*
+ * In the get case the application first
+ * queries to obtain the size of the control.
+ */
if (get) {
c->size = tot_size;
return -ENOSPC;
}
+ dprintk(vdev,
+ "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
+ id, c->size, tot_size);
return -EFAULT;
}
c->size = tot_size;
@@ -3321,7 +3413,8 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
/* Get extended controls. Allocates the helpers array if needed. */
static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs)
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -3347,7 +3440,7 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, true);
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
@@ -3440,8 +3533,8 @@ v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
return obj;
}
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
- struct v4l2_ext_controls *cs)
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
{
struct media_request_object *obj = NULL;
struct media_request *req = NULL;
@@ -3477,7 +3570,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
req_obj);
}
- ret = v4l2_g_ext_ctrls_common(hdl, cs);
+ ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
if (obj) {
media_request_unlock_for_access(req);
@@ -3620,7 +3713,9 @@ static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
/* Validate controls. */
static int validate_ctrls(struct v4l2_ext_controls *cs,
- struct v4l2_ctrl_helper *helpers, bool set)
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool set)
{
unsigned i;
int ret = 0;
@@ -3632,16 +3727,24 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
cs->error_idx = i;
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ dprintk(vdev,
+ "control id 0x%x is read-only\n",
+ ctrl->id);
return -EACCES;
+ }
/* This test is also done in try_set_control_cluster() which
is called in atomic context, so that has the final say,
but it makes sense to do an up-front check as well. Once
an error occurs in try_set_control_cluster() some other
controls may have been set already and we want to do a
best-effort to avoid that. */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
+ dprintk(vdev,
+ "control id 0x%x is grabbed, cannot set\n",
+ ctrl->id);
return -EBUSY;
+ }
/*
* Skip validation for now if the payload needs to be copied
* from userspace into kernelspace. We'll validate those later.
@@ -3676,7 +3779,8 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master)
/* Try or try-and-set controls */
static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs, bool set)
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -3686,13 +3790,19 @@ static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
cs->error_idx = cs->count;
/* Default value cannot be changed */
- if (cs->which == V4L2_CTRL_WHICH_DEF_VAL)
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
+ dprintk(vdev, "%s: cannot change default value\n",
+ video_device_node_name(vdev));
return -EINVAL;
+ }
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
- if (hdl == NULL)
+ if (hdl == NULL) {
+ dprintk(vdev, "%s: invalid null control handler\n",
+ video_device_node_name(vdev));
return -EINVAL;
+ }
if (cs->count == 0)
return class_check(hdl, cs->which);
@@ -3703,9 +3813,9 @@ static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
if (!helpers)
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, false);
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
if (!ret)
- ret = validate_ctrls(cs, helpers, set);
+ ret = validate_ctrls(cs, helpers, vdev, set);
if (ret && set)
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++) {
@@ -3790,7 +3900,9 @@ static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
}
static int try_set_ext_ctrls(struct v4l2_fh *fh,
- struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
struct v4l2_ext_controls *cs, bool set)
{
struct media_request_object *obj = NULL;
@@ -3798,21 +3910,39 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh,
int ret;
if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
- if (!mdev || cs->request_fd < 0)
+ if (!mdev) {
+ dprintk(vdev, "%s: missing media device\n",
+ video_device_node_name(vdev));
return -EINVAL;
+ }
+
+ if (cs->request_fd < 0) {
+ dprintk(vdev, "%s: invalid request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return -EINVAL;
+ }
req = media_request_get_by_fd(mdev, cs->request_fd);
- if (IS_ERR(req))
+ if (IS_ERR(req)) {
+ dprintk(vdev, "%s: cannot find request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
return PTR_ERR(req);
+ }
ret = media_request_lock_for_update(req);
if (ret) {
+ dprintk(vdev, "%s: cannot lock request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
media_request_put(req);
return ret;
}
obj = v4l2_ctrls_find_req_obj(hdl, req, set);
if (IS_ERR(obj)) {
+ dprintk(vdev,
+ "%s: cannot find request object for request fd %d\n",
+ video_device_node_name(vdev),
+ cs->request_fd);
media_request_unlock_for_update(req);
media_request_put(req);
return PTR_ERR(obj);
@@ -3821,7 +3951,11 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh,
req_obj);
}
- ret = try_set_ext_ctrls_common(fh, hdl, cs, set);
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
if (obj) {
media_request_unlock_for_update(req);
@@ -3832,17 +3966,22 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh,
return ret;
}
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(NULL, hdl, mdev, cs, false);
+ return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
-int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct media_device *mdev, struct v4l2_ext_controls *cs)
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(fh, hdl, mdev, cs, true);
+ return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index cbb74f748555..4037689a945a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -859,6 +859,9 @@ int __video_register_device(struct video_device *vdev,
/* the v4l2_dev pointer MUST be present */
if (WARN_ON(!vdev->v4l2_dev))
return -EINVAL;
+ /* the device_caps field MUST be set for all but subdevs */
+ if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
+ return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
@@ -1089,7 +1092,7 @@ static void __exit videodev_exit(void)
subsys_initcall(videodev_init);
module_exit(videodev_exit)
-MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>, Bill Dirks, Justin Schoeman, Gerd Knorr");
+MODULE_DESCRIPTION("Video4Linux2 core driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index aa277f5bc862..63d6b147b21e 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -9,11 +9,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/module.h>
-#include <linux/i2c.h>
#include <linux/slab.h>
-#if defined(CONFIG_SPI)
-#include <linux/spi/spi.h>
-#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -102,37 +98,10 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
-#if IS_ENABLED(CONFIG_I2C)
- if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- /*
- * We need to unregister the i2c client
- * explicitly. We cannot rely on
- * i2c_del_adapter to always unregister
- * clients for us, since if the i2c bus is a
- * platform bus, then it is never deleted.
- *
- * Device tree or ACPI based devices must not
- * be unregistered as they have not been
- * registered by us, and would not be
- * re-created by just probing the V4L2 driver.
- */
- if (client &&
- !client->dev.of_node && !client->dev.fwnode)
- i2c_unregister_device(client);
- continue;
- }
-#endif
-#if defined(CONFIG_SPI)
- if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
- struct spi_device *spi = v4l2_get_subdevdata(sd);
-
- if (spi && !spi->dev.of_node && !spi->dev.fwnode)
- spi_unregister_device(spi);
- continue;
- }
-#endif
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C)
+ v4l2_i2c_subdev_unregister(sd);
+ else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI)
+ v4l2_spi_subdev_unregister(sd);
}
/* Mark as unregistered, thus preventing duplicate unregistrations */
v4l2_dev->name[0] = '\0';
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 7e740d332a54..3bd1888787eb 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -163,7 +163,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
pr_debug("no lane mapping given, using defaults\n");
}
- rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0);
+ rval = fwnode_property_count_u32(fwnode, "data-lanes");
if (rval > 0) {
num_data_lanes =
min_t(int, V4L2_FWNODE_CSI2_MAX_DATA_LANES, rval);
@@ -191,8 +191,7 @@ static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
pr_debug("lane %u position %u\n", i, array[i]);
}
- rval = fwnode_property_read_u32_array(fwnode, "lane-polarities", NULL,
- 0);
+ rval = fwnode_property_count_u32(fwnode, "lane-polarities");
if (rval > 0) {
if (rval != 1 + num_data_lanes /* clock+data */) {
pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
@@ -525,8 +524,7 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
if (rval < 0)
return rval;
- rval = fwnode_property_read_u64_array(fwnode, "link-frequencies",
- NULL, 0);
+ rval = fwnode_property_count_u64(fwnode, "link-frequencies");
if (rval > 0) {
unsigned int i;
@@ -777,23 +775,17 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
args.fwnode,
sizeof(*asd));
+ fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
/* not an error if asd already exists */
- if (ret == -EEXIST) {
- fwnode_handle_put(args.fwnode);
+ if (PTR_ERR(asd) == -EEXIST)
continue;
- }
- goto error;
+ return PTR_ERR(asd);
}
}
return 0;
-
-error:
- fwnode_handle_put(args.fwnode);
- return ret;
}
/*
@@ -1083,23 +1075,18 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
sizeof(*asd));
+ fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
/* not an error if asd already exists */
- if (ret == -EEXIST) {
- fwnode_handle_put(fwnode);
+ if (ret == -EEXIST)
continue;
- }
- goto error;
+ return PTR_ERR(asd);
}
}
return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
-
-error:
- fwnode_handle_put(fwnode);
- return ret;
}
int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev,
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
new file mode 100644
index 000000000000..5bf99e7c0c09
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-i2c - I2C helpers for Video4Linux2
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /*
+ * We need to unregister the i2c client
+ * explicitly. We cannot rely on
+ * i2c_del_adapter to always unregister
+ * clients for us, since if the i2c bus is a
+ * platform bus, then it is never deleted.
+ *
+ * Device tree or ACPI based devices must not
+ * be unregistered as they have not been
+ * registered by us, and would not be
+ * re-created by just probing the V4L2 driver.
+ */
+ if (client && !client->dev.of_node && !client->dev.fwnode)
+ i2c_unregister_device(client);
+}
+
+void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd,
+ struct i2c_client *client,
+ const char *devname, const char *postfix)
+{
+ if (!devname)
+ devname = client->dev.driver->name;
+ if (!postfix)
+ postfix = "";
+
+ snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
+ i2c_adapter_id(client->adapter), client->addr);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
+
+void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
+ /* the owner is the same as the i2c_client's driver owner */
+ sd->owner = client->dev.driver->owner;
+ sd->dev = &client->dev;
+ /* i2c_client and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, client);
+ i2c_set_clientdata(client, sd);
+ v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
+
+/* Load an i2c sub-device. */
+struct v4l2_subdev
+*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ if (!v4l2_dev)
+ return NULL;
+
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_probed_device(adapter, info, probe_addrs,
+ NULL);
+ else
+ client = i2c_new_device(adapter, info);
+
+ /*
+ * Note: by loading the module first we are certain that c->driver
+ * will be set if the driver was found. If the module was not loaded
+ * first, then the i2c core tries to delay-load the module for us,
+ * and then c->driver is still NULL until the module is finally
+ * loaded. This delay-load mechanism doesn't work if other drivers
+ * want to use the i2c device, so explicitly loading the module
+ * is the best alternative.
+ */
+ if (!client || !client->dev.driver)
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->dev.driver->owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (client && !sd)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *client_type,
+ u8 addr,
+ const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /*
+ * Setup the i2c board info with the device type and
+ * the device address.
+ */
+ memset(&info, 0, sizeof(info));
+ strscpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info,
+ probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
+
+/* Return i2c client address of v4l2_subdev. */
+unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return client ? client->addr : I2C_CLIENT_END;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
+
+/*
+ * Return a list of I2C tuner addresses to probe. Use only if the tuner
+ * addresses are unknown.
+ */
+const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
+{
+ static const unsigned short radio_addrs[] = {
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
+ 0x10,
+#endif
+ 0x60,
+ I2C_CLIENT_END
+ };
+ static const unsigned short demod_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b,
+ I2C_CLIENT_END
+ };
+ static const unsigned short tv_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
+ 0x60, 0x61, 0x62, 0x63, 0x64,
+ I2C_CLIENT_END
+ };
+
+ switch (type) {
+ case ADDRS_RADIO:
+ return radio_addrs;
+ case ADDRS_DEMOD:
+ return demod_addrs;
+ case ADDRS_TV:
+ return tv_addrs;
+ case ADDRS_TV_WITH_DEMOD:
+ return tv_addrs + 4;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b1f4b991dba6..51b912743f0f 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1057,14 +1057,19 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_querycap(file, fh, cap);
- cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
/*
- * Drivers MUST fill in device_caps, so check for this and
- * warn if it was forgotten.
+ * Drivers must not change device_caps, so check for this and
+ * warn if this happened.
+ */
+ WARN_ON(cap->device_caps != vfd->device_caps);
+ /*
+ * Check that capabilities is a superset of
+ * vfd->device_caps | V4L2_CAP_DEVICE_CAPS
*/
- WARN(!(cap->capabilities & V4L2_CAP_DEVICE_CAPS) ||
- !cap->device_caps, "Bad caps for driver %s, %x %x",
- cap->driver, cap->capabilities, cap->device_caps);
+ WARN_ON((cap->capabilities &
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) !=
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS));
+ cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
return ret;
@@ -1169,9 +1174,21 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break;
case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break;
case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break;
+ case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break;
case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break;
case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break;
case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break;
case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break;
case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break;
case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break;
@@ -1186,6 +1203,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
@@ -1301,13 +1322,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break;
case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break;
case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break;
- case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit signed deltas"; break;
- case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit signed deltas"; break;
- case V4L2_TCH_FMT_TU16: descr = "16-bit unsigned touch data"; break;
- case V4L2_TCH_FMT_TU08: descr = "8-bit unsigned touch data"; break;
+ case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break;
+ case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break;
case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
- case V4L2_META_FMT_UVC: descr = "UVC payload header metadata"; break;
+ case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
+ case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
default:
/* Compressed formats */
@@ -1321,16 +1343,17 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_H264: descr = "H.264"; break;
case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break;
case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break;
- case V4L2_PIX_FMT_H264_SLICE_RAW: descr = "H.264 Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break;
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
- case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break;
+ case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break;
case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
+ case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
@@ -1365,14 +1388,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
(char)((fmt->pixelformat >> 8) & 0x7f),
(char)((fmt->pixelformat >> 16) & 0x7f),
(char)((fmt->pixelformat >> 24) & 0x7f),
- (fmt->pixelformat & (1 << 31)) ? "-BE" : "");
+ (fmt->pixelformat & (1UL << 31)) ? "-BE" : "");
break;
}
}
if (descr)
WARN_ON(strscpy(fmt->description, descr, sz) < 0);
- fmt->flags = flags;
+ fmt->flags |= flags;
}
static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
@@ -1645,6 +1668,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
unsigned int i;
@@ -1661,6 +1685,8 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
@@ -2165,9 +2191,11 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
@@ -2184,9 +2212,11 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
@@ -2203,9 +2233,11 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 4f5176702937..19937dd3c6f6 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -603,11 +603,10 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
- struct poll_table_struct *wait)
+static __poll_t v4l2_m2m_poll_for_data(struct file *file,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
{
- struct video_device *vfd = video_devdata(file);
- __poll_t req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
__poll_t rc = 0;
@@ -619,16 +618,6 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
poll_wait(file, &src_q->done_wq, wait);
poll_wait(file, &dst_q->done_wq, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- rc = EPOLLPRI;
- if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
- return rc;
- }
-
/*
* There has to be at least one buffer queued on each queued_list, which
* means either in driver already or waiting for driver to claim it
@@ -637,10 +626,8 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if ((!src_q->streaming || src_q->error ||
list_empty(&src_q->queued_list)) &&
(!dst_q->streaming || dst_q->error ||
- list_empty(&dst_q->queued_list))) {
- rc |= EPOLLERR;
- goto end;
- }
+ list_empty(&dst_q->queued_list)))
+ return EPOLLERR;
spin_lock_irqsave(&dst_q->done_lock, flags);
if (list_empty(&dst_q->done_list)) {
@@ -650,7 +637,7 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
*/
if (dst_q->last_buffer_dequeued) {
spin_unlock_irqrestore(&dst_q->done_lock, flags);
- return rc | EPOLLIN | EPOLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
}
spin_unlock_irqrestore(&dst_q->done_lock, flags);
@@ -673,7 +660,27 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
rc |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&dst_q->done_lock, flags);
-end:
+ return rc;
+}
+
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+
+ if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
+ rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ rc |= EPOLLPRI;
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
diff --git a/drivers/media/v4l2-core/v4l2-spi.c b/drivers/media/v4l2-core/v4l2-spi.c
new file mode 100644
index 000000000000..eadecdff7349
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-spi.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-spi - SPI helpers for Video4Linux2
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi && !spi->dev.of_node && !spi->dev.fwnode)
+ spi_unregister_device(spi);
+}
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ sd->dev = &spi->dev;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ snprintf(sd->name, sizeof(sd->name), "%s %s",
+ spi->dev.driver->name, dev_name(&spi->dev));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master,
+ struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ if (!v4l2_dev)
+ return NULL;
+ if (info->modalias[0])
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (!spi || !spi->dev.driver)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (!sd)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 21fb90d66bfc..f725cd9b66b9 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -124,7 +124,7 @@ static inline int check_which(__u32 which)
static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (sd->entity.graph_obj.mdev) {
+ if (sd->entity.num_pads) {
if (pad >= sd->entity.num_pads)
return -EINVAL;
return 0;
@@ -372,19 +372,19 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_g_ext_ctrls(vfh->ctrl_handler,
- sd->v4l2_dev->mdev, arg);
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
- sd->v4l2_dev->mdev, arg);
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
return v4l2_try_ext_ctrls(vfh->ctrl_handler,
- sd->v4l2_dev->mdev, arg);
+ vdev, sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 7ef3e4d22bf6..939fc11cf080 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1123,7 +1123,6 @@ __poll_t videobuf_poll_stream(struct file *file,
struct videobuf_buffer *buf = NULL;
__poll_t rc = 0;
- poll_wait(file, &buf->done, wait);
videobuf_queue_lock(q);
if (q->streaming) {
if (!list_empty(&q->stream))
@@ -1143,7 +1142,9 @@ __poll_t videobuf_poll_stream(struct file *file,
}
buf = q->read_buf;
}
- if (!buf)
+ if (buf)
+ poll_wait(file, &buf->done, wait);
+ else
rc = EPOLLERR;
if (0 == rc) {
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 42ab43affbff..439d7d886873 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -41,17 +41,40 @@
#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
#define F_MMU_EN BIT(0)
+/* SMI COMMON */
+#define SMI_BUS_SEL 0x220
+#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+enum mtk_smi_gen {
+ MTK_SMI_GEN1,
+ MTK_SMI_GEN2
+};
+
+struct mtk_smi_common_plat {
+ enum mtk_smi_gen gen;
+ bool has_gals;
+ u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+};
+
struct mtk_smi_larb_gen {
- bool need_larbid;
int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *);
+ unsigned int larb_direct_to_common_mask;
+ bool has_gals;
};
struct mtk_smi {
struct device *dev;
struct clk *clk_apb, *clk_smi;
+ struct clk *clk_gals0, *clk_gals1;
struct clk *clk_async; /*only needed by mt2701*/
- void __iomem *smi_ao_base;
+ union {
+ void __iomem *smi_ao_base; /* only for gen1 */
+ void __iomem *base; /* only for gen2 */
+ };
+ const struct mtk_smi_common_plat *plat;
};
struct mtk_smi_larb { /* larb: local arbiter */
@@ -63,82 +86,56 @@ struct mtk_smi_larb { /* larb: local arbiter */
u32 *mmu;
};
-enum mtk_smi_gen {
- MTK_SMI_GEN1,
- MTK_SMI_GEN2
-};
-
-static int mtk_smi_enable(const struct mtk_smi *smi)
+static int mtk_smi_clk_enable(const struct mtk_smi *smi)
{
int ret;
- ret = pm_runtime_get_sync(smi->dev);
- if (ret < 0)
- return ret;
-
ret = clk_prepare_enable(smi->clk_apb);
if (ret)
- goto err_put_pm;
+ return ret;
ret = clk_prepare_enable(smi->clk_smi);
if (ret)
goto err_disable_apb;
+ ret = clk_prepare_enable(smi->clk_gals0);
+ if (ret)
+ goto err_disable_smi;
+
+ ret = clk_prepare_enable(smi->clk_gals1);
+ if (ret)
+ goto err_disable_gals0;
+
return 0;
+err_disable_gals0:
+ clk_disable_unprepare(smi->clk_gals0);
+err_disable_smi:
+ clk_disable_unprepare(smi->clk_smi);
err_disable_apb:
clk_disable_unprepare(smi->clk_apb);
-err_put_pm:
- pm_runtime_put_sync(smi->dev);
return ret;
}
-static void mtk_smi_disable(const struct mtk_smi *smi)
+static void mtk_smi_clk_disable(const struct mtk_smi *smi)
{
+ clk_disable_unprepare(smi->clk_gals1);
+ clk_disable_unprepare(smi->clk_gals0);
clk_disable_unprepare(smi->clk_smi);
clk_disable_unprepare(smi->clk_apb);
- pm_runtime_put_sync(smi->dev);
}
int mtk_smi_larb_get(struct device *larbdev)
{
- struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
- const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
- struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
- int ret;
-
- /* Enable the smi-common's power and clocks */
- ret = mtk_smi_enable(common);
- if (ret)
- return ret;
+ int ret = pm_runtime_get_sync(larbdev);
- /* Enable the larb's power and clocks */
- ret = mtk_smi_enable(&larb->smi);
- if (ret) {
- mtk_smi_disable(common);
- return ret;
- }
-
- /* Configure the iommu info for this larb */
- larb_gen->config_port(larbdev);
-
- return 0;
+ return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
void mtk_smi_larb_put(struct device *larbdev)
{
- struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
- struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
-
- /*
- * Don't de-configure the iommu info for this larb since there may be
- * several modules in this larb.
- * The iommu info will be reset after power off.
- */
-
- mtk_smi_disable(&larb->smi);
- mtk_smi_disable(common);
+ pm_runtime_put_sync(larbdev);
}
EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
@@ -146,39 +143,26 @@ static int
mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- struct mtk_smi_iommu *smi_iommu = data;
+ struct mtk_smi_larb_iommu *larb_mmu = data;
unsigned int i;
- if (larb->larb_gen->need_larbid) {
- larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu;
- return 0;
- }
-
- /*
- * If there is no larbid property, Loop to find the corresponding
- * iommu information.
- */
- for (i = 0; i < smi_iommu->larb_nr; i++) {
- if (dev == smi_iommu->larb_imu[i].dev) {
- /* The 'mmu' may be updated in iommu-attach/detach. */
- larb->mmu = &smi_iommu->larb_imu[i].mmu;
+ for (i = 0; i < MTK_LARB_NR_MAX; i++) {
+ if (dev == larb_mmu[i].dev) {
+ larb->larbid = i;
+ larb->mmu = &larb_mmu[i].mmu;
return 0;
}
}
return -ENODEV;
}
-static void mtk_smi_larb_config_port_mt2712(struct device *dev)
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg;
int i;
- /*
- * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.
- * Don't need to set it again.
- */
- if (larb->larbid == 8 || larb->larbid == 9)
+ if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
return;
for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
@@ -243,7 +227,6 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
- .need_larbid = true,
.port_in_larb = {
LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
@@ -252,8 +235,15 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
- .need_larbid = true,
- .config_port = mtk_smi_larb_config_port_mt2712,
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
+ .has_gals = true,
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
+ /* IPU0 | IPU1 | CCU */
};
static const struct of_device_id mtk_smi_larb_of_ids[] = {
@@ -269,6 +259,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt2712-smi-larb",
.data = &mtk_smi_larb_mt2712
},
+ {
+ .compatible = "mediatek,mt8183-smi-larb",
+ .data = &mtk_smi_larb_mt8183
+ },
{}
};
@@ -279,7 +273,6 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *smi_node;
struct platform_device *smi_pdev;
- int err;
larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
if (!larb)
@@ -298,16 +291,16 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
larb->smi.clk_smi = devm_clk_get(dev, "smi");
if (IS_ERR(larb->smi.clk_smi))
return PTR_ERR(larb->smi.clk_smi);
- larb->smi.dev = dev;
- if (larb->larb_gen->need_larbid) {
- err = of_property_read_u32(dev->of_node, "mediatek,larb-id",
- &larb->larbid);
- if (err) {
- dev_err(dev, "missing larbid property\n");
- return err;
- }
+ if (larb->larb_gen->has_gals) {
+ /* The larbs may still haven't gals even if the SoC support.*/
+ larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
+ if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
+ larb->smi.clk_gals0 = NULL;
+ else if (IS_ERR(larb->smi.clk_gals0))
+ return PTR_ERR(larb->smi.clk_gals0);
}
+ larb->smi.dev = dev;
smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
if (!smi_node)
@@ -336,27 +329,86 @@ static int mtk_smi_larb_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ int ret;
+
+ /* Power on smi-common. */
+ ret = pm_runtime_get_sync(larb->smi_common_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+ return ret;
+ }
+
+ ret = mtk_smi_clk_enable(&larb->smi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable clock(%d).\n", ret);
+ pm_runtime_put_sync(larb->smi_common_dev);
+ return ret;
+ }
+
+ /* Configure the basic setting for this larb */
+ larb_gen->config_port(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ mtk_smi_clk_disable(&larb->smi);
+ pm_runtime_put_sync(larb->smi_common_dev);
+ return 0;
+}
+
+static const struct dev_pm_ops smi_larb_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+};
+
static struct platform_driver mtk_smi_larb_driver = {
.probe = mtk_smi_larb_probe,
.remove = mtk_smi_larb_remove,
.driver = {
.name = "mtk-smi-larb",
.of_match_table = mtk_smi_larb_of_ids,
+ .pm = &smi_larb_pm_ops,
}
};
+static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
+ .gen = MTK_SMI_GEN1,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
+ .gen = MTK_SMI_GEN2,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
{
.compatible = "mediatek,mt8173-smi-common",
- .data = (void *)MTK_SMI_GEN2
+ .data = &mtk_smi_common_gen2,
},
{
.compatible = "mediatek,mt2701-smi-common",
- .data = (void *)MTK_SMI_GEN1
+ .data = &mtk_smi_common_gen1,
},
{
.compatible = "mediatek,mt2712-smi-common",
- .data = (void *)MTK_SMI_GEN2
+ .data = &mtk_smi_common_gen2,
+ },
+ {
+ .compatible = "mediatek,mt8183-smi-common",
+ .data = &mtk_smi_common_mt8183,
},
{}
};
@@ -366,13 +418,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_smi *common;
struct resource *res;
- enum mtk_smi_gen smi_gen;
int ret;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
return -ENOMEM;
common->dev = dev;
+ common->plat = of_device_get_match_data(dev);
common->clk_apb = devm_clk_get(dev, "apb");
if (IS_ERR(common->clk_apb))
@@ -382,14 +434,23 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->clk_smi))
return PTR_ERR(common->clk_smi);
+ if (common->plat->has_gals) {
+ common->clk_gals0 = devm_clk_get(dev, "gals0");
+ if (IS_ERR(common->clk_gals0))
+ return PTR_ERR(common->clk_gals0);
+
+ common->clk_gals1 = devm_clk_get(dev, "gals1");
+ if (IS_ERR(common->clk_gals1))
+ return PTR_ERR(common->clk_gals1);
+ }
+
/*
* for mtk smi gen 1, we need to get the ao(always on) base to config
* m4u port, and we need to enable the aync clock for transform the smi
* clock into emi clock domain, but for mtk smi gen2, there's no smi ao
* base.
*/
- smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev);
- if (smi_gen == MTK_SMI_GEN1) {
+ if (common->plat->gen == MTK_SMI_GEN1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
common->smi_ao_base = devm_ioremap_resource(dev, res);
if (IS_ERR(common->smi_ao_base))
@@ -402,6 +463,11 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
ret = clk_prepare_enable(common->clk_async);
if (ret)
return ret;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ common->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(common->base))
+ return PTR_ERR(common->base);
}
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
@@ -414,12 +480,42 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+{
+ struct mtk_smi *common = dev_get_drvdata(dev);
+ u32 bus_sel = common->plat->bus_sel;
+ int ret;
+
+ ret = mtk_smi_clk_enable(common);
+ if (ret) {
+ dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+ return ret;
+ }
+
+ if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
+ writel(bus_sel, common->base + SMI_BUS_SEL);
+ return 0;
+}
+
+static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
+{
+ struct mtk_smi *common = dev_get_drvdata(dev);
+
+ mtk_smi_clk_disable(common);
+ return 0;
+}
+
+static const struct dev_pm_ops smi_common_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+};
+
static struct platform_driver mtk_smi_common_driver = {
.probe = mtk_smi_common_probe,
.remove = mtk_smi_common_remove,
.driver = {
.name = "mtk-smi-common",
.of_match_table = mtk_smi_common_of_ids,
+ .pm = &smi_common_pm_ops,
}
};
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 384927ebde74..d9ee8e3dc72d 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1087,7 +1087,7 @@ static u16 msb_get_free_block(struct msb_data *msb, int zone)
pos %= msb->free_block_count[zone];
- dbg_verbose("have %d choices for a free block, selected randomally: %d",
+ dbg_verbose("have %d choices for a free block, selected randomly: %d",
msb->free_block_count[zone], pos);
pba = find_next_zero_bit(msb->used_blocks_bitmap,
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 2932f421b3ea..dd3a1f3dcc19 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -847,8 +847,7 @@ static void r592_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int r592_suspend(struct device *core_dev)
{
- struct pci_dev *pdev = to_pci_dev(core_dev);
- struct r592_device *dev = pci_get_drvdata(pdev);
+ struct r592_device *dev = dev_get_drvdata(core_dev);
r592_clear_interrupts(dev);
memstick_suspend_host(dev->host);
@@ -858,8 +857,7 @@ static int r592_suspend(struct device *core_dev)
static int r592_resume(struct device *core_dev)
{
- struct pci_dev *pdev = to_pci_dev(core_dev);
- struct r592_device *dev = pci_get_drvdata(pdev);
+ struct r592_device *dev = dev_get_drvdata(core_dev);
r592_clear_interrupts(dev);
r592_enable_device(dev, false);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f129f9678940..c8cbde59bbf6 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1105,7 +1105,6 @@ config MFD_SI476X_CORE
config MFD_SM501
tristate "Silicon Motion SM501"
depends on HAS_DMA
- select DMA_DECLARE_COHERENT
---help---
This is the core driver for the Silicon Motion SM501 multimedia
companion chip. This device is a multifunction device which may
@@ -1714,7 +1713,6 @@ config MFD_TC6393XB
select GPIOLIB
select MFD_CORE
select MFD_TMIO
- select DMA_DECLARE_COHERENT
help
Support for Toshiba Mobile IO Controller TC6393XB
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 9f58cb2d3789..78ee4b28fca2 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -321,18 +321,9 @@ static const struct file_operations aat2870_reg_fops = {
static void aat2870_init_debugfs(struct aat2870_data *aat2870)
{
aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
- if (!aat2870->dentry_root) {
- dev_warn(aat2870->dev,
- "Failed to create debugfs root directory\n");
- return;
- }
- aat2870->dentry_reg = debugfs_create_file("regs", 0644,
- aat2870->dentry_root,
- aat2870, &aat2870_reg_fops);
- if (!aat2870->dentry_reg)
- dev_warn(aat2870->dev,
- "Failed to create debugfs register file\n");
+ debugfs_create_file("regs", 0644, aat2870->dentry_root, aat2870,
+ &aat2870_reg_fops);
}
#else
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index e350ab64238e..9f3dbc31d3e9 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -575,58 +575,27 @@ static const struct file_operations ab3100_get_set_reg_fops = {
.llseek = noop_llseek,
};
-static struct dentry *ab3100_dir;
-static struct dentry *ab3100_reg_file;
static struct ab3100_get_set_reg_priv ab3100_get_priv;
-static struct dentry *ab3100_get_reg_file;
static struct ab3100_get_set_reg_priv ab3100_set_priv;
-static struct dentry *ab3100_set_reg_file;
static void ab3100_setup_debugfs(struct ab3100 *ab3100)
{
- int err;
+ struct dentry *ab3100_dir;
ab3100_dir = debugfs_create_dir("ab3100", NULL);
- if (!ab3100_dir)
- goto exit_no_debugfs;
- ab3100_reg_file = debugfs_create_file("registers",
- S_IRUGO, ab3100_dir, ab3100,
- &ab3100_registers_fops);
- if (!ab3100_reg_file) {
- err = -ENOMEM;
- goto exit_destroy_dir;
- }
+ debugfs_create_file("registers", S_IRUGO, ab3100_dir, ab3100,
+ &ab3100_registers_fops);
ab3100_get_priv.ab3100 = ab3100;
ab3100_get_priv.mode = false;
- ab3100_get_reg_file = debugfs_create_file("get_reg",
- S_IWUSR, ab3100_dir, &ab3100_get_priv,
- &ab3100_get_set_reg_fops);
- if (!ab3100_get_reg_file) {
- err = -ENOMEM;
- goto exit_destroy_reg;
- }
+ debugfs_create_file("get_reg", S_IWUSR, ab3100_dir, &ab3100_get_priv,
+ &ab3100_get_set_reg_fops);
ab3100_set_priv.ab3100 = ab3100;
ab3100_set_priv.mode = true;
- ab3100_set_reg_file = debugfs_create_file("set_reg",
- S_IWUSR, ab3100_dir, &ab3100_set_priv,
- &ab3100_get_set_reg_fops);
- if (!ab3100_set_reg_file) {
- err = -ENOMEM;
- goto exit_destroy_get_reg;
- }
- return;
-
- exit_destroy_get_reg:
- debugfs_remove(ab3100_get_reg_file);
- exit_destroy_reg:
- debugfs_remove(ab3100_reg_file);
- exit_destroy_dir:
- debugfs_remove(ab3100_dir);
- exit_no_debugfs:
- return;
+ debugfs_create_file("set_reg", S_IWUSR, ab3100_dir, &ab3100_set_priv,
+ &ab3100_get_set_reg_fops);
}
#else
static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index b3f8d359f409..c4751fb9bc22 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -122,17 +122,11 @@ static const struct file_operations ab3100_otp_operations = {
.release = single_release,
};
-static int __init ab3100_otp_init_debugfs(struct device *dev,
- struct ab3100_otp *otp)
+static void __init ab3100_otp_init_debugfs(struct device *dev,
+ struct ab3100_otp *otp)
{
otp->debugfs = debugfs_create_file("ab3100_otp", S_IFREG | S_IRUGO,
- NULL, otp,
- &ab3100_otp_operations);
- if (!otp->debugfs) {
- dev_err(dev, "AB3100 debugfs OTP file registration failed!\n");
- return -ENOENT;
- }
- return 0;
+ NULL, otp, &ab3100_otp_operations);
}
static void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp)
@@ -141,10 +135,9 @@ static void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp)
}
#else
/* Compile this out if debugfs not selected */
-static inline int __init ab3100_otp_init_debugfs(struct device *dev,
- struct ab3100_otp *otp)
+static inline void __init ab3100_otp_init_debugfs(struct device *dev,
+ struct ab3100_otp *otp)
{
- return 0;
}
static inline void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp)
@@ -211,9 +204,7 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
}
/* debugfs entries */
- err = ab3100_otp_init_debugfs(&pdev->dev, otp);
- if (err)
- goto err;
+ ab3100_otp_init_debugfs(&pdev->dev, otp);
return 0;
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index d24c6ecccb88..567a34b073dd 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2644,12 +2644,10 @@ static const struct file_operations ab8500_hwreg_fops = {
.owner = THIS_MODULE,
};
-static struct dentry *ab8500_dir;
-static struct dentry *ab8500_gpadc_dir;
-
static int ab8500_debug_probe(struct platform_device *plf)
{
- struct dentry *file;
+ struct dentry *ab8500_dir;
+ struct dentry *ab8500_gpadc_dir;
struct ab8500 *ab8500;
struct resource *res;
@@ -2694,47 +2692,22 @@ static int ab8500_debug_probe(struct platform_device *plf)
}
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
- if (!ab8500_dir)
- goto err;
ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
ab8500_dir);
- if (!ab8500_gpadc_dir)
- goto err;
-
- file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_bank_registers_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_all_banks_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_address_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("irq-subscribe",
- (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir,
- &plf->dev, &ab8500_subscribe_fops);
- if (!file)
- goto err;
+
+ debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
+ &plf->dev, &ab8500_bank_registers_fops);
+ debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
+ &plf->dev, &ab8500_all_banks_fops);
+ debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ debugfs_create_file("register-address", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ debugfs_create_file("register-value", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
if (is_ab8500(ab8500)) {
debug_ranges = ab8500_debug_ranges;
@@ -2750,194 +2723,93 @@ static int ab8500_debug_probe(struct platform_device *plf)
num_interrupt_lines = AB8540_NR_IRQS;
}
- file = debugfs_create_file("interrupts", (S_IRUGO), ab8500_dir,
- &plf->dev, &ab8500_interrupts_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("irq-unsubscribe",
- (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir,
- &plf->dev, &ab8500_unsubscribe_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("all-modem-registers",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_modem_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bat_ctrl_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir,
- &plf->dev, &ab8500_gpadc_btemp_ball_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("main_charger_v",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_v_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("acc_detect1",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect1_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("acc_detect2",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect2_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux1_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux2_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_bat_v_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_vbus_v_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("main_charger_c",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_c_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("usb_charger_c",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir,
- &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bk_bat_v_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_die_temp_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_id_fops);
- if (!file)
- goto err;
-
+ debugfs_create_file("interrupts", (S_IRUGO), ab8500_dir, &plf->dev,
+ &ab8500_interrupts_fops);
+ debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir,
+ &plf->dev, &ab8500_hwreg_fops);
+ debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_modem_fops);
+ debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_bat_ctrl_fops);
+ debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_btemp_ball_fops);
+ debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_charger_v_fops);
+ debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_acc_detect1_fops);
+ debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_acc_detect2_fops);
+ debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_aux1_fops);
+ debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_aux2_fops);
+ debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_bat_v_fops);
+ debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_vbus_v_fops);
+ debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_charger_c_fops);
+ debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_usb_charger_c_fops);
+ debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_bk_bat_v_fops);
+ debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_die_temp_fops);
+ debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_usb_id_fops);
if (is_ab8540(ab8500)) {
- file = debugfs_create_file("xtal_temp",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_xtal_temp_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("vbattruemeas",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("batctrl_and_ibat",
- (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir,
- &plf->dev,
- &ab8540_gpadc_bat_ctrl_and_ibat_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("vbatmeas_and_ibat",
- (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_meas_and_ibat_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("vbattruemeas_and_ibat",
- (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir,
- &plf->dev,
- &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("battemp_and_ibat",
- (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir,
- &plf->dev, &ab8540_gpadc_bat_temp_and_ibat_fops);
- if (!file)
- goto err;
- file = debugfs_create_file("otp_calib",
- (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir,
- &plf->dev, &ab8540_gpadc_otp_calib_fops);
- if (!file)
- goto err;
+ debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_xtal_temp_fops);
+ debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_vbat_true_meas_fops);
+ debugfs_create_file("batctrl_and_ibat", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_bat_ctrl_and_ibat_fops);
+ debugfs_create_file("vbatmeas_and_ibat", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_vbat_meas_and_ibat_fops);
+ debugfs_create_file("vbattruemeas_and_ibat", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
+ debugfs_create_file("battemp_and_ibat", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_bat_temp_and_ibat_fops);
+ debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_otp_calib_fops);
}
- file = debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_avg_sample_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_edge_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_timer_fops);
- if (!file)
- goto err;
-
- file = debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_conv_type_fops);
- if (!file)
- goto err;
+ debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_avg_sample_fops);
+ debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_trig_edge_fops);
+ debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_trig_timer_fops);
+ debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_conv_type_fops);
return 0;
-
-err:
- debugfs_remove_recursive(ab8500_dir);
- dev_err(&plf->dev, "failed to create debugfs entries.\n");
-
- return -ENOMEM;
}
static struct platform_driver ab8500_debug_driver = {
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 2ee14d8a6d31..d2a13a547a3c 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -88,16 +88,6 @@ static struct regmap_config altr_sysmgr_regmap_cfg = {
};
/**
- * sysmgr_match_phandle
- * Matching function used by driver_find_device().
- * Return: True if match is found, otherwise false.
- */
-static int sysmgr_match_phandle(struct device *dev, const void *data)
-{
- return dev->of_node == (const struct device_node *)data;
-}
-
-/**
* altr_sysmgr_regmap_lookup_by_phandle
* Find the sysmgr previous configured in probe() and return regmap property.
* Return: regmap if found or error if not found.
@@ -117,8 +107,8 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
if (!sysmgr_np)
return ERR_PTR(-ENODEV);
- dev = driver_find_device(&altr_sysmgr_driver.driver, NULL,
- (void *)sysmgr_np, sysmgr_match_phandle);
+ dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
+ (void *)sysmgr_np);
of_node_put(sysmgr_np);
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 3f21e26b8d36..90e0f21bc49c 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -1590,8 +1590,10 @@ static unsigned long dsiclk_rate(u8 n)
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
div *= 2;
+ /* Fall through */
case PRCM_DSI_PLLOUT_SEL_PHI_2:
div *= 2;
+ /* Fall through */
case PRCM_DSI_PLLOUT_SEL_PHI:
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW) / div;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 792b855a9104..4798d9f3f9d5 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev)
i, r);
}
}
- /* Fall through as HSIC mode needs utmi_clk */
+ /* Fall through - as HSIC mode needs utmi_clk */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i])) {
@@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev)
if (!IS_ERR(omap->hsic480m_clk[i]))
clk_disable_unprepare(omap->hsic480m_clk[i]);
- /* Fall through as utmi_clks were used in HSIC mode */
+ /* Fall through - as utmi_clks were used in HSIC mode */
case OMAP_EHCI_PORT_MODE_TLL:
if (!IS_ERR(omap->utmi_clk[i]))
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
return 0;
}
-static int rk8xx_suspend(struct device *dev)
+static int __maybe_unused rk8xx_suspend(struct device *dev)
{
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
return ret;
}
-static int rk8xx_resume(struct device *dev)
+static int __maybe_unused rk8xx_resume(struct device *dev)
{
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
return ret;
}
-SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
+static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
static struct i2c_driver rk808_i2c_driver = {
.driver = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6abfc8e92fcc..c55b63750757 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -126,18 +126,6 @@ config INTEL_MID_PTI
an Intel Atom (non-netbook) mobile device containing a MIPI
P1149.7 standard implementation.
-config SGI_IOC4
- tristate "SGI IOC4 Base IO support"
- depends on PCI
- ---help---
- This option enables basic support for the IOC4 chip on certain
- SGI IO controller cards (IO9, IO10, and PCI-RT). This option
- does not enable any specific functions on such a card, but provides
- necessary infrastructure for other drivers to utilize.
-
- If you have an SGI Altix with an IOC4-based card say Y.
- Otherwise say N.
-
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
@@ -200,9 +188,8 @@ config ENCLOSURE_SERVICES
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on NET
- depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
- select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
- select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
+ depends on (IA64_SGI_UV || X86_UV) && SMP
+ depends on X86_64 || BROKEN
select SGI_GRU if X86_64 && SMP
---help---
An SGI machine can be divided into multiple Single System
@@ -375,15 +362,6 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
-config SPEAR13XX_PCIE_GADGET
- bool "PCIe gadget support for SPEAr13XX platform"
- depends on ARCH_SPEAR13XX && BROKEN
- help
- This option enables gadget support for PCIe controller. If
- board file defines any controller as PCIe endpoint then a sysfs
- entry will be created for that controller. User can use these
- sysfs node to configure PCIe EP as per his requirements.
-
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
@@ -465,6 +443,7 @@ config PCI_ENDPOINT_TEST
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
+ depends on HAS_IOMEM
help
This option enables support for the Xilinx SDFEC (Soft Decision
Forward Error Correction) driver. This enables a char driver
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index abd8ae249746..c1860d35dc7e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
-obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
@@ -37,7 +36,6 @@ obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
-obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index bcb10fa4bc3a..259fe1dfec03 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -334,8 +334,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alcor_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
+ struct alcor_pci_priv *priv = dev_get_drvdata(dev);
alcor_pci_aspm_ctrl(priv, 1);
return 0;
@@ -344,8 +343,7 @@ static int alcor_suspend(struct device *dev)
static int alcor_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
+ struct alcor_pci_priv *priv = dev_get_drvdata(dev);
alcor_pci_aspm_ctrl(priv, 0);
return 0;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f88094719552..0f791bfdc1f5 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -5,6 +5,7 @@ config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
select NVMEM
+ select NVMEM_SYSFS
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs
@@ -34,6 +35,7 @@ config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
select NVMEM
+ select NVMEM_SYSFS
help
Enable this driver to get read/write support to most SPI EEPROMs,
after you configure the board init code to know about each eeprom
@@ -43,13 +45,16 @@ config EEPROM_AT25
will be called at25.
config EEPROM_LEGACY
- tristate "Old I2C EEPROM reader"
+ tristate "Old I2C EEPROM reader (DEPRECATED)"
depends on I2C && SYSFS
help
If you say yes here you get read-only access to the EEPROM data
available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
EEPROMs could theoretically be available on other devices as well.
+ This driver is deprecated and will be removed soon, please use the
+ better at24 driver instead.
+
This driver can also be built as a module. If so, the module
will be called eeprom.
@@ -80,6 +85,7 @@ config EEPROM_93XX46
depends on SPI && SYSFS
select REGMAP
select NVMEM
+ select NVMEM_SYSFS
help
Driver for the microwire EEPROM chipsets 93xx46x. The driver
supports both read and write commands and also the command to
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 35bf2477693d..518945b2f737 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client)
nvmem_config.name = dev_name(dev);
nvmem_config.dev = dev;
nvmem_config.read_only = !writable;
- nvmem_config.root_only = true;
+ nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
nvmem_config.owner = THIS_MODULE;
nvmem_config.compat = true;
nvmem_config.base_dev = dev;
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 6f00c33cfe22..b081c67416d7 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -195,13 +195,13 @@ static int ee1004_probe(struct i2c_client *client,
mutex_lock(&ee1004_bus_lock);
if (++ee1004_dev_count == 1) {
for (cnr = 0; cnr < 2; cnr++) {
- ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
+ ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter,
EE1004_ADDR_SET_PAGE + cnr);
- if (!ee1004_set_page[cnr]) {
+ if (IS_ERR(ee1004_set_page[cnr])) {
dev_err(&client->dev,
"address 0x%02x unavailable\n",
EE1004_ADDR_SET_PAGE + cnr);
- err = -EADDRINUSE;
+ err = PTR_ERR(ee1004_set_page[cnr]);
goto err_clients;
}
}
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 4d0cb90f4aeb..9da81f6d4a1c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -150,9 +150,9 @@ static int max6875_probe(struct i2c_client *client,
return -ENOMEM;
/* A fake client is created on the odd address */
- data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1);
- if (!data->fake_client) {
- err = -ENOMEM;
+ data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1);
+ if (IS_ERR(data->fake_client)) {
+ err = PTR_ERR(data->fake_client);
goto exit_kfree;
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 98603e235cf0..47ae84afac2e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -33,7 +33,6 @@
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
#define INIT_FILELEN_MAX (64 * 1024 * 1024)
-#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
/* Retrives number of input buffers from the scalars parameter */
@@ -186,6 +185,7 @@ struct fastrpc_channel_ctx {
struct idr ctx_idr;
struct list_head users;
struct miscdevice miscdev;
+ struct kref refcount;
};
struct fastrpc_user {
@@ -279,8 +279,11 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
- if (!buf->virt)
+ if (!buf->virt) {
+ mutex_destroy(&buf->lock);
+ kfree(buf);
return -ENOMEM;
+ }
if (fl->sctx && fl->sctx->sid)
buf->phys += ((u64)fl->sctx->sid << 32);
@@ -290,6 +293,25 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return 0;
}
+static void fastrpc_channel_ctx_free(struct kref *ref)
+{
+ struct fastrpc_channel_ctx *cctx;
+
+ cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
+
+ kfree(cctx);
+}
+
+static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
+{
+ kref_get(&cctx->refcount);
+}
+
+static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
+{
+ kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
+}
+
static void fastrpc_context_free(struct kref *ref)
{
struct fastrpc_invoke_ctx *ctx;
@@ -313,6 +335,8 @@ static void fastrpc_context_free(struct kref *ref)
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
+
+ fastrpc_channel_ctx_put(cctx);
}
static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
@@ -419,6 +443,9 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
fastrpc_get_buff_overlaps(ctx);
}
+ /* Released in fastrpc_context_put() */
+ fastrpc_channel_ctx_get(cctx);
+
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
@@ -448,6 +475,7 @@ err_idr:
spin_lock(&user->lock);
list_del(&ctx->node);
spin_unlock(&user->lock);
+ fastrpc_channel_ctx_put(cctx);
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
@@ -522,6 +550,7 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_del(&a->node);
mutex_unlock(&buffer->lock);
+ sg_free_table(&a->sgt);
kfree(a);
}
@@ -884,6 +913,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (!fl->sctx)
return -EINVAL;
+ if (!fl->cctx->rpdev)
+ return -EPIPE;
+
ctx = fastrpc_context_alloc(fl, kernel, sc, args);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1120,6 +1152,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
}
fastrpc_session_free(cctx, fl->sctx);
+ fastrpc_channel_ctx_put(cctx);
mutex_destroy(&fl->mutex);
kfree(fl);
@@ -1138,6 +1171,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
if (!fl)
return -ENOMEM;
+ /* Released in fastrpc_device_release() */
+ fastrpc_channel_ctx_get(cctx);
+
filp->private_data = fl;
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
@@ -1163,26 +1199,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
return 0;
}
-static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
-{
- struct dma_buf *buf;
- int info;
-
- if (copy_from_user(&info, argp, sizeof(info)))
- return -EFAULT;
-
- buf = dma_buf_get(info);
- if (IS_ERR_OR_NULL(buf))
- return -EINVAL;
- /*
- * one for the last get and other for the ALLOC_DMA_BUFF ioctl
- */
- dma_buf_put(buf);
- dma_buf_put(buf);
-
- return 0;
-}
-
static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_alloc_dma_buf bp;
@@ -1218,8 +1234,6 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
return -EFAULT;
}
- get_dma_buf(buf->dmabuf);
-
return 0;
}
@@ -1287,9 +1301,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
break;
- case FASTRPC_IOCTL_FREE_DMA_BUFF:
- err = fastrpc_dmabuf_free(fl, argp);
- break;
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
@@ -1395,10 +1406,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
int i, err, domain_id = -1;
const char *domain;
- data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
err = of_property_read_string(rdev->of_node, "label", &domain);
if (err) {
dev_info(rdev, "FastRPC Domain not specified in DT\n");
@@ -1417,6 +1424,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -EINVAL;
}
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
data->miscdev.minor = MISC_DYNAMIC_MINOR;
data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
domains[domain_id]);
@@ -1425,6 +1436,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
if (err)
return err;
+ kref_init(&data->refcount);
+
dev_set_drvdata(&rpdev->dev, data);
dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
INIT_LIST_HEAD(&data->users);
@@ -1459,7 +1472,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
misc_deregister(&cctx->miscdev);
of_platform_depopulate(&rpdev->dev);
- kfree(cctx);
+
+ cctx->rpdev = NULL;
+ fastrpc_channel_ctx_put(cctx);
}
static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
index 2c01461701a3..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/asid.c
@@ -18,7 +18,7 @@ int hl_asid_init(struct hl_device *hdev)
mutex_init(&hdev->asid_mutex);
- /* ASID 0 is reserved for KMD and device CPU */
+ /* ASID 0 is reserved for the kernel driver and device CPU */
set_bit(0, hdev->asid_bitmap);
return 0;
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index e495f44064fa..53fddbd8e693 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -397,7 +397,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
HL_KERNEL_ASID_ID);
if (rc) {
- dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
+ dev_err(hdev->dev,
+ "Failed to allocate CB for the kernel driver %d\n", rc);
return NULL;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 6ad83d5ef4b0..a9ac045dcfde 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -178,11 +178,23 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
- int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+ hdev->asic_funcs->hw_queues_lock(hdev);
- WARN_ONCE((cs_cnt < 0),
- "hl%d: error in CS active cnt %d\n",
- hdev->id, cs_cnt);
+ hdev->cs_active_cnt--;
+ if (!hdev->cs_active_cnt) {
+ struct hl_device_idle_busy_ts *ts;
+
+ ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
+ ts->busy_to_idle_ts = ktime_get();
+
+ if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
+ hdev->idle_busy_ts_idx = 0;
+ } else if (hdev->cs_active_cnt < 0) {
+ dev_crit(hdev->dev, "CS active cnt %d is negative\n",
+ hdev->cs_active_cnt);
+ }
+
+ hdev->asic_funcs->hw_queues_unlock(hdev);
hl_int_hw_queue_update_ci(cs);
@@ -305,6 +317,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
spin_unlock(&ctx->cs_lock);
+ dev_dbg(hdev->dev,
+ "Rejecting CS because of too many in-flights CS\n");
rc = -EAGAIN;
goto free_fence;
}
@@ -395,8 +409,9 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
return NULL;
}
- if (hw_queue_prop->kmd_only) {
- dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
+ if (hw_queue_prop->driver_only) {
+ dev_err(hdev->dev,
+ "Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
return NULL;
} else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
@@ -683,7 +698,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = hl_poll_timeout_memory(hdev,
&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
- 100, jiffies_to_usecs(hdev->timeout_jiffies));
+ 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
if (rc == -ETIMEDOUT) {
dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 8682590e3f6e..17db7b3dfb4c 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,12 +26,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
dma_fence_put(ctx->cs_pending[i]);
if (ctx->asid != HL_KERNEL_ASID_ID) {
- /*
- * The engines are stopped as there is no executing CS, but the
+ /* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
+ * Stop only if this is the compute context, as there can be
+ * only one compute context
*/
- if (hdev->in_debug)
+ if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
hl_device_set_debug_mode(hdev, false);
hl_vm_ctx_fini(ctx);
@@ -67,29 +68,36 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
goto out_err;
}
+ mutex_lock(&mgr->ctx_lock);
+ rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&mgr->ctx_lock);
+
+ if (rc < 0) {
+ dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
+ goto free_ctx;
+ }
+
+ ctx->handle = rc;
+
rc = hl_ctx_init(hdev, ctx, false);
if (rc)
- goto free_ctx;
+ goto remove_from_idr;
hl_hpriv_get(hpriv);
ctx->hpriv = hpriv;
- /* TODO: remove for multiple contexts */
+ /* TODO: remove for multiple contexts per process */
hpriv->ctx = ctx;
- hdev->user_ctx = ctx;
- mutex_lock(&mgr->ctx_lock);
- rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
- mutex_unlock(&mgr->ctx_lock);
-
- if (rc < 0) {
- dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
- hl_ctx_free(hdev, ctx);
- goto out_err;
- }
+ /* TODO: remove the following line for multiple process support */
+ hdev->compute_ctx = ctx;
return 0;
+remove_from_idr:
+ mutex_lock(&mgr->ctx_lock);
+ idr_remove(&mgr->ctx_handles, ctx->handle);
+ mutex_unlock(&mgr->ctx_lock);
free_ctx:
kfree(ctx);
out_err:
@@ -120,7 +128,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
ctx->thread_ctx_switch_wait_token = 0;
if (is_kernel_ctx) {
- ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
+ ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_mmu_ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "Failed to init mmu ctx module\n");
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 18e499c900c7..87f37ac31ccd 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -29,7 +29,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
@@ -55,12 +55,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
- pkt.value = __cpu_to_le64(val);
+ pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -81,10 +81,10 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.led_index = __cpu_to_le32(led);
- pkt.value = __cpu_to_le64(state);
+ pkt.led_index = cpu_to_le32(led);
+ pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -370,7 +370,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
ctx = hdev->kernel_ctx;
else
- ctx = hdev->user_ctx;
+ ctx = hdev->compute_ctx;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
@@ -533,7 +533,7 @@ out:
static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
- struct hl_ctx *ctx = hdev->user_ctx;
+ struct hl_ctx *ctx = hdev->compute_ctx;
u64 hop_addr, hop_pte_addr, hop_pte;
u64 offset_mask = HOP4_MASK | OFFSET_MASK;
int rc = 0;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0c4894dd9c02..459fee70a597 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,10 +42,12 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
+ struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
+ ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -53,13 +55,12 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->restore_phase_mutex);
- kfree(hpriv);
-
- /* Now the FD is really closed */
- atomic_dec(&hdev->fd_open_cnt);
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ hdev->compute_ctx = NULL;
+ mutex_unlock(&hdev->fpriv_list_lock);
- /* This allows a new user context to open the device */
- hdev->user_ctx = NULL;
+ kfree(hpriv);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -94,6 +95,24 @@ static int hl_device_release(struct inode *inode, struct file *filp)
return 0;
}
+static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
+{
+ struct hl_fpriv *hpriv = filp->private_data;
+ struct hl_device *hdev;
+
+ filp->private_data = NULL;
+
+ hdev = hpriv->hdev;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ kfree(hpriv);
+
+ return 0;
+}
+
/*
* hl_mmap - mmap function for habanalabs device
*
@@ -124,55 +143,102 @@ static const struct file_operations hl_ops = {
.compat_ioctl = hl_ioctl
};
+static const struct file_operations hl_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .open = hl_device_open_ctrl,
+ .release = hl_device_release_ctrl,
+ .unlocked_ioctl = hl_ioctl_control,
+ .compat_ioctl = hl_ioctl_control
+};
+
+static void device_release_func(struct device *dev)
+{
+ kfree(dev);
+}
+
/*
- * device_setup_cdev - setup cdev and device for habanalabs device
+ * device_init_cdev - Initialize cdev and device for habanalabs device
*
* @hdev: pointer to habanalabs device structure
* @hclass: pointer to the class object of the device
* @minor: minor number of the specific device
- * @fpos : file operations to install for this device
+ * @fpos: file operations to install for this device
+ * @name: name of the device as it will appear in the filesystem
+ * @cdev: pointer to the char device object that will be initialized
+ * @dev: pointer to the device object that will be initialized
*
- * Create a cdev and a Linux device for habanalabs's device. Need to be
- * called at the end of the habanalabs device initialization process,
- * because this function exposes the device to the user
+ * Initialize a cdev and a Linux device for habanalabs's device.
*/
-static int device_setup_cdev(struct hl_device *hdev, struct class *hclass,
- int minor, const struct file_operations *fops)
+static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
+ int minor, const struct file_operations *fops,
+ char *name, struct cdev *cdev,
+ struct device **dev)
{
- int err, devno = MKDEV(hdev->major, minor);
- struct cdev *hdev_cdev = &hdev->cdev;
- char *name;
+ cdev_init(cdev, fops);
+ cdev->owner = THIS_MODULE;
- name = kasprintf(GFP_KERNEL, "hl%d", hdev->id);
- if (!name)
+ *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
+ if (!*dev)
return -ENOMEM;
- cdev_init(hdev_cdev, fops);
- hdev_cdev->owner = THIS_MODULE;
- err = cdev_add(hdev_cdev, devno, 1);
- if (err) {
- pr_err("Failed to add char device %s\n", name);
- goto err_cdev_add;
+ device_initialize(*dev);
+ (*dev)->devt = MKDEV(hdev->major, minor);
+ (*dev)->class = hclass;
+ (*dev)->release = device_release_func;
+ dev_set_drvdata(*dev, hdev);
+ dev_set_name(*dev, "%s", name);
+
+ return 0;
+}
+
+static int device_cdev_sysfs_add(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = cdev_device_add(&hdev->cdev, hdev->dev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to add a char device to the system\n");
+ return rc;
}
- hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name);
- if (IS_ERR(hdev->dev)) {
- pr_err("Failed to create device %s\n", name);
- err = PTR_ERR(hdev->dev);
- goto err_device_create;
+ rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to add a control char device to the system\n");
+ goto delete_cdev_device;
}
- dev_set_drvdata(hdev->dev, hdev);
+ /* hl_sysfs_init() must be done after adding the device to the system */
+ rc = hl_sysfs_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize sysfs\n");
+ goto delete_ctrl_cdev_device;
+ }
- kfree(name);
+ hdev->cdev_sysfs_created = true;
return 0;
-err_device_create:
- cdev_del(hdev_cdev);
-err_cdev_add:
- kfree(name);
- return err;
+delete_ctrl_cdev_device:
+ cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
+delete_cdev_device:
+ cdev_device_del(&hdev->cdev, hdev->dev);
+ return rc;
+}
+
+static void device_cdev_sysfs_del(struct hl_device *hdev)
+{
+ /* device_release() won't be called so must free devices explicitly */
+ if (!hdev->cdev_sysfs_created) {
+ kfree(hdev->dev_ctrl);
+ kfree(hdev->dev);
+ return;
+ }
+
+ hl_sysfs_fini(hdev);
+ cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
+ cdev_device_del(&hdev->cdev, hdev->dev);
}
/*
@@ -227,20 +293,29 @@ static int device_early_init(struct hl_device *hdev)
goto free_eq_wq;
}
+ hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
+ sizeof(struct hl_device_idle_busy_ts),
+ (GFP_KERNEL | __GFP_ZERO));
+ if (!hdev->idle_busy_ts_arr) {
+ rc = -ENOMEM;
+ goto free_chip_info;
+ }
+
hl_cb_mgr_init(&hdev->kernel_cb_mgr);
- mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
+ INIT_LIST_HEAD(&hdev->fpriv_list);
+ mutex_init(&hdev->fpriv_list_lock);
atomic_set(&hdev->in_reset, 0);
- atomic_set(&hdev->fd_open_cnt, 0);
- atomic_set(&hdev->cs_active_cnt, 0);
return 0;
+free_chip_info:
+ kfree(hdev->hl_chip_info);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
@@ -266,8 +341,11 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
+ mutex_destroy(&hdev->fpriv_list_lock);
+
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
+ kfree(hdev->idle_busy_ts_arr);
kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq);
@@ -277,8 +355,6 @@ static void device_early_fini(struct hl_device *hdev)
if (hdev->asic_funcs->early_fini)
hdev->asic_funcs->early_fini(hdev);
-
- mutex_destroy(&hdev->fd_open_cnt_lock);
}
static void set_freq_to_low_job(struct work_struct *work)
@@ -286,9 +362,13 @@ static void set_freq_to_low_job(struct work_struct *work)
struct hl_device *hdev = container_of(work, struct hl_device,
work_freq.work);
- if (atomic_read(&hdev->fd_open_cnt) == 0)
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (!hdev->compute_ctx)
hl_device_set_frequency(hdev, PLL_LOW);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
schedule_delayed_work(&hdev->work_freq,
usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
}
@@ -338,7 +418,7 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll;
/* force setting to low frequency */
- atomic_set(&hdev->curr_pll_profile, PLL_LOW);
+ hdev->curr_pll_profile = PLL_LOW;
if (hdev->pm_mng_profile == PM_AUTO)
hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
@@ -381,44 +461,128 @@ static void device_late_fini(struct hl_device *hdev)
hdev->late_init_done = false;
}
+uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
+{
+ struct hl_device_idle_busy_ts *ts;
+ ktime_t zero_ktime, curr = ktime_get();
+ u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
+ s64 period_us, last_start_us, last_end_us, last_busy_time_us,
+ total_busy_time_us = 0, total_busy_time_ms;
+
+ zero_ktime = ktime_set(0, 0);
+ period_us = period_ms * USEC_PER_MSEC;
+ ts = &hdev->idle_busy_ts_arr[last_index];
+
+ /* check case that device is currently in idle */
+ if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
+ !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
+
+ last_index--;
+ /* Handle case idle_busy_ts_idx was 0 */
+ if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
+ last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+
+ ts = &hdev->idle_busy_ts_arr[last_index];
+ }
+
+ while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
+ /* Check if we are in last sample case. i.e. if the sample
+ * begun before the sampling period. This could be a real
+ * sample or 0 so need to handle both cases
+ */
+ last_start_us = ktime_to_us(
+ ktime_sub(curr, ts->idle_to_busy_ts));
+
+ if (last_start_us > period_us) {
+
+ /* First check two cases:
+ * 1. If the device is currently busy
+ * 2. If the device was idle during the whole sampling
+ * period
+ */
+
+ if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
+ /* Check if the device is currently busy */
+ if (ktime_compare(ts->idle_to_busy_ts,
+ zero_ktime))
+ return 100;
+
+ /* We either didn't have any activity or we
+ * reached an entry which is 0. Either way,
+ * exit and return what was accumulated so far
+ */
+ break;
+ }
+
+ /* If sample has finished, check it is relevant */
+ last_end_us = ktime_to_us(
+ ktime_sub(curr, ts->busy_to_idle_ts));
+
+ if (last_end_us > period_us)
+ break;
+
+ /* It is relevant so add it but with adjustment */
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(ts->busy_to_idle_ts,
+ ts->idle_to_busy_ts));
+ total_busy_time_us += last_busy_time_us -
+ (last_start_us - period_us);
+ break;
+ }
+
+ /* Check if the sample is finished or still open */
+ if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(ts->busy_to_idle_ts,
+ ts->idle_to_busy_ts));
+ else
+ last_busy_time_us = ktime_to_us(
+ ktime_sub(curr, ts->idle_to_busy_ts));
+
+ total_busy_time_us += last_busy_time_us;
+
+ last_index--;
+ /* Handle case idle_busy_ts_idx was 0 */
+ if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
+ last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
+
+ ts = &hdev->idle_busy_ts_arr[last_index];
+
+ overlap_cnt++;
+ }
+
+ total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
+ USEC_PER_MSEC);
+
+ return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
+}
+
/*
* hl_device_set_frequency - set the frequency of the device
*
* @hdev: pointer to habanalabs device structure
* @freq: the new frequency value
*
- * Change the frequency if needed.
- * We allose to set PLL to low only if there is no user process
- * Returns 0 if no change was done, otherwise returns 1;
+ * Change the frequency if needed. This function has no protection against
+ * concurrency, therefore it is assumed that the calling function has protected
+ * itself against the case of calling this function from multiple threads with
+ * different values
+ *
+ * Returns 0 if no change was done, otherwise returns 1
*/
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
{
- enum hl_pll_frequency old_freq =
- (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH;
- int ret;
-
- if (hdev->pm_mng_profile == PM_MANUAL)
- return 0;
-
- ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
- if (ret == freq)
- return 0;
-
- /*
- * in case we want to lower frequency, check if device is not
- * opened. We must have a check here to workaround race condition with
- * hl_device_open
- */
- if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
- atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
+ if ((hdev->pm_mng_profile == PM_MANUAL) ||
+ (hdev->curr_pll_profile == freq))
return 0;
- }
dev_dbg(hdev->dev, "Changing device frequency to %s\n",
freq == PLL_HIGH ? "high" : "low");
hdev->asic_funcs->set_pll_profile(hdev, freq);
+ hdev->curr_pll_profile = freq;
+
return 1;
}
@@ -449,19 +613,8 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- if (atomic_read(&hdev->fd_open_cnt) > 1) {
- dev_err(hdev->dev,
- "Failed to enable debug mode. More then a single user is using the device\n");
- rc = -EPERM;
- goto unlock_fd_open_lock;
- }
-
hdev->in_debug = 1;
-unlock_fd_open_lock:
- mutex_unlock(&hdev->fd_open_cnt_lock);
out:
mutex_unlock(&hdev->debug_lock);
@@ -568,6 +721,7 @@ disable_device:
static void device_kill_open_processes(struct hl_device *hdev)
{
u16 pending_total, pending_cnt;
+ struct hl_fpriv *hpriv;
struct task_struct *task = NULL;
if (hdev->pldm)
@@ -575,32 +729,31 @@ static void device_kill_open_processes(struct hl_device *hdev)
else
pending_total = HL_PENDING_RESET_PER_SEC;
- pending_cnt = pending_total;
-
- /* Flush all processes that are inside hl_open */
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
-
- pending_cnt--;
-
- dev_info(hdev->dev,
- "Can't HARD reset, waiting for user to close FD\n");
+ /* Giving time for user to close FD, and for processes that are inside
+ * hl_device_open to finish
+ */
+ if (!list_empty(&hdev->fpriv_list))
ssleep(1);
- }
- if (atomic_read(&hdev->fd_open_cnt)) {
- task = get_pid_task(hdev->user_ctx->hpriv->taskpid,
- PIDTYPE_PID);
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ /* This section must be protected because we are dereferencing
+ * pointers that are freed if the process exits
+ */
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
- dev_info(hdev->dev, "Killing user processes\n");
+ dev_info(hdev->dev, "Killing user process pid=%d\n",
+ task_pid_nr(task));
send_sig(SIGKILL, task, 1);
- msleep(100);
+ usleep_range(1000, 10000);
put_task_struct(task);
}
}
+ mutex_unlock(&hdev->fpriv_list_lock);
+
/* We killed the open users, but because the driver cleans up after the
* user contexts are closed (e.g. mmu mappings), we need to wait again
* to make sure the cleaning phase is finished before continuing with
@@ -609,19 +762,18 @@ static void device_kill_open_processes(struct hl_device *hdev)
pending_cnt = pending_total;
- while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+ while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
+ dev_info(hdev->dev,
+ "Waiting for all unmap operations to finish before hard reset\n");
pending_cnt--;
ssleep(1);
}
- if (atomic_read(&hdev->fd_open_cnt))
+ if (!list_empty(&hdev->fpriv_list))
dev_crit(hdev->dev,
"Going to hard reset with open user contexts\n");
-
- mutex_unlock(&hdev->fd_open_cnt_lock);
-
}
static void device_hard_reset_pending(struct work_struct *work)
@@ -630,8 +782,6 @@ static void device_hard_reset_pending(struct work_struct *work)
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
- device_kill_open_processes(hdev);
-
hl_device_reset(hdev, true, true);
kfree(device_reset_work);
@@ -679,13 +829,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
+ /* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
dev_err(hdev->dev, "Going to RESET device!\n");
}
@@ -736,6 +889,13 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
+ /* Kill processes here after CS rollback. This is because the process
+ * can't really exit until all its CSs are done, which is what we
+ * do in cs rollback
+ */
+ if (from_hard_reset_thread)
+ device_kill_open_processes(hdev);
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
@@ -754,12 +914,24 @@ again:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
+ hdev->idle_busy_ts_idx = 0;
+ hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
+ hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
+
+ if (hdev->cs_active_cnt)
+ dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
+ hdev->cs_active_cnt);
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
/* Make sure the context switch phase will run again */
- if (hdev->user_ctx) {
- atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
- hdev->user_ctx->thread_ctx_switch_wait_token = 0;
+ if (hdev->compute_ctx) {
+ atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
+ hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
}
+ mutex_unlock(&hdev->fpriv_list_lock);
+
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
@@ -788,7 +960,7 @@ again:
goto out_err;
}
- hdev->user_ctx = NULL;
+ hdev->compute_ctx = NULL;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
@@ -849,6 +1021,8 @@ again:
else
hdev->soft_reset_cnt++;
+ dev_warn(hdev->dev, "Successfully finished resetting the device\n");
+
return 0;
out_err:
@@ -883,17 +1057,43 @@ out_err:
int hl_device_init(struct hl_device *hdev, struct class *hclass)
{
int i, rc, cq_ready_cnt;
+ char *name;
+ bool add_cdev_sysfs_on_err = false;
+
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
+ if (!name) {
+ rc = -ENOMEM;
+ goto out_disabled;
+ }
+
+ /* Initialize cdev and device structures */
+ rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
+ &hdev->cdev, &hdev->dev);
- /* Create device */
- rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops);
+ kfree(name);
if (rc)
goto out_disabled;
+ name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
+ if (!name) {
+ rc = -ENOMEM;
+ goto free_dev;
+ }
+
+ /* Initialize cdev and device structures for control device */
+ rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
+ name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
+
+ kfree(name);
+
+ if (rc)
+ goto free_dev;
+
/* Initialize ASIC function pointers and perform early init */
rc = device_early_init(hdev);
if (rc)
- goto release_device;
+ goto free_dev_ctrl;
/*
* Start calling ASIC initialization. First S/W then H/W and finally
@@ -965,12 +1165,13 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto mmu_fini;
}
- hdev->user_ctx = NULL;
+ hdev->compute_ctx = NULL;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
dev_err(hdev->dev, "failed to initialize kernel context\n");
- goto free_ctx;
+ kfree(hdev->kernel_ctx);
+ goto mmu_fini;
}
rc = hl_cb_pool_init(hdev);
@@ -979,12 +1180,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
- rc = hl_sysfs_init(hdev);
- if (rc) {
- dev_err(hdev->dev, "failed to initialize sysfs\n");
- goto free_cb_pool;
- }
-
hl_debugfs_add_device(hdev);
if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
@@ -993,6 +1188,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->asic_funcs->hw_fini(hdev, true);
}
+ /*
+ * From this point, in case of an error, add char devices and create
+ * sysfs nodes as part of the error flow, to allow debugging.
+ */
+ add_cdev_sysfs_on_err = true;
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1029,9 +1230,24 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
/*
- * hl_hwmon_init must be called after device_late_init, because only
+ * Expose devices and sysfs nodes to user.
+ * From here there is no need to add char devices and create sysfs nodes
+ * in case of an error.
+ */
+ add_cdev_sysfs_on_err = false;
+ rc = device_cdev_sysfs_add(hdev);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add char devices and sysfs nodes\n");
+ rc = 0;
+ goto out_disabled;
+ }
+
+ /*
+ * hl_hwmon_init() must be called after device_late_init(), because only
* there we get the information from the device about which
- * hwmon-related sensors the device supports
+ * hwmon-related sensors the device supports.
+ * Furthermore, it must be done after adding the device to the system.
*/
rc = hl_hwmon_init(hdev);
if (rc) {
@@ -1047,14 +1263,10 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
return 0;
-free_cb_pool:
- hl_cb_pool_fini(hdev);
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
"kernel ctx is still alive on initialization failure\n");
-free_ctx:
- kfree(hdev->kernel_ctx);
mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
@@ -1069,18 +1281,21 @@ sw_fini:
hdev->asic_funcs->sw_fini(hdev);
early_fini:
device_early_fini(hdev);
-release_device:
- device_destroy(hclass, hdev->dev->devt);
- cdev_del(&hdev->cdev);
+free_dev_ctrl:
+ kfree(hdev->dev_ctrl);
+free_dev:
+ kfree(hdev->dev);
out_disabled:
hdev->disabled = true;
+ if (add_cdev_sysfs_on_err)
+ device_cdev_sysfs_add(hdev);
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
"Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id);
+ hdev->id / 2);
else
pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id);
+ hdev->id / 2);
return rc;
}
@@ -1121,16 +1336,17 @@ void hl_device_fini(struct hl_device *hdev)
/* Mark device as disabled */
hdev->disabled = true;
- /*
- * Flush anyone that is inside the critical section of enqueue
+ /* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
- hdev->hard_reset_pending = true;
+ /* Flush anyone that is inside device open */
+ mutex_lock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_list_lock);
- device_kill_open_processes(hdev);
+ hdev->hard_reset_pending = true;
hl_hwmon_fini(hdev);
@@ -1138,8 +1354,6 @@ void hl_device_fini(struct hl_device *hdev)
hl_debugfs_remove_device(hdev);
- hl_sysfs_fini(hdev);
-
/*
* Halt the engines and disable interrupts so we won't get any more
* completions from H/W and we won't have any accesses from the
@@ -1150,6 +1364,12 @@ void hl_device_fini(struct hl_device *hdev)
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
+ /* Kill processes here after CS rollback. This is because the process
+ * can't really exit until all its CSs are done, which is what we
+ * do in cs rollback
+ */
+ device_kill_open_processes(hdev);
+
hl_cb_pool_fini(hdev);
/* Release kernel context */
@@ -1176,9 +1396,8 @@ void hl_device_fini(struct hl_device *hdev)
device_early_fini(hdev);
- /* Hide device from user */
- device_destroy(hdev->dev->class, hdev->dev->devt);
- cdev_del(&hdev->cdev);
+ /* Hide devices and sysfs nodes from user */
+ device_cdev_sysfs_del(hdev);
pr_info("removed device successfully\n");
}
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index cc8168bacb24..ea2ca67fbfbf 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -24,7 +24,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
{
const struct firmware *fw;
const u64 *fw_data;
- size_t fw_size, i;
+ size_t fw_size;
int rc;
rc = request_firmware(&fw, fw_name, hdev->dev);
@@ -45,22 +45,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
fw_data = (const u64 *) fw->data;
- if ((fw->size % 8) != 0)
- fw_size -= 8;
-
- for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
- if (!(i & (0x80000 - 1))) {
- dev_dbg(hdev->dev,
- "copied so far %zu out of %zu for %s firmware",
- i, fw_size, fw_name);
- usleep_range(20, 100);
- }
-
- writeq(*fw_data, dst);
- }
-
- if ((fw->size % 8) != 0)
- writel(*(const u32 *) fw_data, dst);
+ memcpy_toio(dst, fw_data, fw_size);
out:
release_firmware(fw);
@@ -112,7 +97,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
}
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
- (tmp == ARMCP_PACKET_FENCE_VAL), 1000, timeout);
+ (tmp == ARMCP_PACKET_FENCE_VAL), 1000,
+ timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 75294ec65257..6fba14b81f90 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -9,6 +9,7 @@
#include "include/hw_ip/mmu/mmu_general.h"
#include "include/hw_ip/mmu/mmu_v1_0.h"
#include "include/goya/asic_reg/goya_masks.h"
+#include "include/goya/goya_reg_map.h"
#include <linux/pci.h>
#include <linux/genalloc.h>
@@ -41,8 +42,8 @@
* PQ, CQ and CP are not secured.
* PQ, CB and the data are on the SRAM/DRAM.
*
- * Since QMAN DMA is secured, KMD is parsing the DMA CB:
- * - KMD checks DMA pointer
+ * Since QMAN DMA is secured, the driver is parsing the DMA CB:
+ * - checks DMA pointer
* - WREG, MSG_PROT are not allowed.
* - MSG_LONG/SHORT are allowed.
*
@@ -55,15 +56,15 @@
* QMAN DMA: PQ, CQ and CP are secured.
* MMU is set to bypass on the Secure props register of the QMAN.
* The reasons we don't enable MMU for PQ, CQ and CP are:
- * - PQ entry is in kernel address space and KMD doesn't map it.
+ * - PQ entry is in kernel address space and the driver doesn't map it.
* - CP writes to MSIX register and to kernel address space (completion
* queue).
*
- * DMA is not secured but because CP is secured, KMD still needs to parse the
- * CB, but doesn't need to check the DMA addresses.
+ * DMA is not secured but because CP is secured, the driver still needs to parse
+ * the CB, but doesn't need to check the DMA addresses.
*
- * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
- * doesn't map memory in MMU.
+ * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
+ * the driver doesn't map memory in MMU.
*
* QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
*
@@ -335,18 +336,18 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
- prop->hw_queues_props[i].kmd_only = 0;
+ prop->hw_queues_props[i].driver_only = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
- prop->hw_queues_props[i].kmd_only = 1;
+ prop->hw_queues_props[i].driver_only = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
- prop->hw_queues_props[i].kmd_only = 0;
+ prop->hw_queues_props[i].driver_only = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -695,8 +696,8 @@ static int goya_sw_init(struct hl_device *hdev)
goto free_dma_pool;
}
- dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n",
- hdev->cpu_accessible_dma_address);
+ dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
+ &hdev->cpu_accessible_dma_address);
hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
@@ -1006,36 +1007,34 @@ int goya_init_cpu_queues(struct hl_device *hdev)
eq = &hdev->event_queue;
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0,
- lower_32_bits(cpu_pq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
- upper_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
+ WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
+ WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
+ WREG32(mmCPU_CQ_BASE_ADDR_LOW,
lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
+ WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
+ WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
/* Used for EQ CI */
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
+ WREG32(mmCPU_EQ_CI, 0);
WREG32(mmCPU_IF_PF_PQ_PI, 0);
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
+ WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
err = hl_poll_timeout(
hdev,
- mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
+ mmCPU_PQ_INIT_STATUS,
status,
(status == PQ_INIT_STATUS_READY_FOR_HOST),
1000,
@@ -2063,6 +2062,25 @@ static void goya_disable_msix(struct hl_device *hdev)
goya->hw_cap_initialized &= ~HW_CAP_MSIX;
}
+static void goya_enable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+
+ /* Zero the lower/upper parts of the 64-bit counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+
+ /* Enable the counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+}
+
+static void goya_disable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+}
+
static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
{
u32 wait_timeout_ms, cpu_timeout_ms;
@@ -2103,6 +2121,8 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
goya_disable_external_queues(hdev);
goya_disable_internal_queues(hdev);
+ goya_disable_timestamp(hdev);
+
if (hard_reset) {
goya_disable_msix(hdev);
goya_mmu_remove_device_cpu_mappings(hdev);
@@ -2205,12 +2225,12 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
switch (fwc) {
case FW_COMP_UBOOT:
- ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
+ ver_off = RREG32(mmUBOOT_VER_OFFSET);
dest = hdev->asic_prop.uboot_ver;
name = "U-Boot";
break;
case FW_COMP_PREBOOT:
- ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
+ ver_off = RREG32(mmPREBOOT_VER_OFFSET);
dest = hdev->asic_prop.preboot_ver;
name = "Preboot";
break;
@@ -2469,7 +2489,7 @@ static int goya_hw_init(struct hl_device *hdev)
* we need to reset the chip before doing H/W init. This register is
* cleared by the H/W upon H/W reset
*/
- WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
+ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
if (rc) {
@@ -2505,6 +2525,8 @@ static int goya_hw_init(struct hl_device *hdev)
goya_init_tpc_qmans(hdev);
+ goya_enable_timestamp(hdev);
+
/* MSI-X must be enabled before CPU queues are initialized */
rc = goya_enable_msix(hdev);
if (rc)
@@ -2729,9 +2751,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
GOYA_ASYNC_EVENT_ID_PI_UPDATE);
}
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
{
- /* Not needed in Goya */
+ /* The QMANs are on the SRAM so need to copy to IO space */
+ memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
}
static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
@@ -2830,7 +2853,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
dev_err_ratelimited(hdev->dev,
- "Can't send KMD job on QMAN0 because the device is not idle\n");
+ "Can't send driver job on QMAN0 because the device is not idle\n");
return -EBUSY;
}
@@ -2864,7 +2887,8 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
- (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout);
+ (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
+ timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
@@ -2945,7 +2969,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
}
rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
- 1000, GOYA_TEST_QUEUE_WAIT_USEC);
+ 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@@ -3312,9 +3336,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
int rc;
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
ctl = le32_to_cpu(user_dma_pkt->ctl);
user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
@@ -3343,9 +3369,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
struct packet_lin_dma *user_dma_pkt)
{
dev_dbg(hdev->dev, "DMA packet details:\n");
- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->dst_addr));
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
/*
* WA for HW-23.
@@ -3385,7 +3413,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
dev_dbg(hdev->dev, "WREG32 packet details:\n");
dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
- dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
+ dev_dbg(hdev->dev, "value == 0x%x\n",
+ le32_to_cpu(wreg_pkt->value));
if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
@@ -3427,12 +3456,13 @@ static int goya_validate_cb(struct hl_device *hdev,
while (cb_parsed_length < parser->user_cb_size) {
enum packet_id pkt_id;
u16 pkt_size;
- void *user_pkt;
+ struct goya_packet *user_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
@@ -3452,7 +3482,8 @@ static int goya_validate_cb(struct hl_device *hdev,
* need to validate here as well because patch_cb() is
* not called in MMU path while this function is called
*/
- rc = goya_validate_wreg32(hdev, parser, user_pkt);
+ rc = goya_validate_wreg32(hdev,
+ parser, (struct packet_wreg32 *) user_pkt);
break;
case PACKET_WREG_BULK:
@@ -3480,10 +3511,10 @@ static int goya_validate_cb(struct hl_device *hdev,
case PACKET_LIN_DMA:
if (is_mmu)
rc = goya_validate_dma_pkt_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
else
rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
- user_pkt);
+ (struct packet_lin_dma *) user_pkt);
break;
case PACKET_MSG_LONG:
@@ -3656,15 +3687,16 @@ static int goya_patch_cb(struct hl_device *hdev,
enum packet_id pkt_id;
u16 pkt_size;
u32 new_pkt_size = 0;
- void *user_pkt, *kernel_pkt;
+ struct goya_packet *user_pkt, *kernel_pkt;
- user_pkt = (void *) (uintptr_t)
+ user_pkt = (struct goya_packet *) (uintptr_t)
(parser->user_cb->kernel_address + cb_parsed_length);
- kernel_pkt = (void *) (uintptr_t)
+ kernel_pkt = (struct goya_packet *) (uintptr_t)
(parser->patched_cb->kernel_address +
cb_patched_cur_length);
- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
PACKET_HEADER_PACKET_ID_MASK) >>
PACKET_HEADER_PACKET_ID_SHIFT);
@@ -3679,15 +3711,18 @@ static int goya_patch_cb(struct hl_device *hdev,
switch (pkt_id) {
case PACKET_LIN_DMA:
- rc = goya_patch_dma_packet(hdev, parser, user_pkt,
- kernel_pkt, &new_pkt_size);
+ rc = goya_patch_dma_packet(hdev, parser,
+ (struct packet_lin_dma *) user_pkt,
+ (struct packet_lin_dma *) kernel_pkt,
+ &new_pkt_size);
cb_patched_cur_length += new_pkt_size;
break;
case PACKET_WREG_32:
memcpy(kernel_pkt, user_pkt, pkt_size);
cb_patched_cur_length += pkt_size;
- rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
+ rc = goya_validate_wreg32(hdev, parser,
+ (struct packet_wreg32 *) kernel_pkt);
break;
case PACKET_WREG_BULK:
@@ -3936,7 +3971,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
void goya_update_eq_ci(struct hl_device *hdev, u32 val)
{
- WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
+ WREG32(mmCPU_EQ_CI, val);
}
void goya_restore_phase_topology(struct hl_device *hdev)
@@ -4351,6 +4386,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
size_t total_pkt_size;
long result;
int rc;
+ int irq_num_entries, irq_arr_index;
+ __le32 *goya_irq_arr;
total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
irq_arr_size;
@@ -4368,8 +4405,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
if (!pkt)
return -ENOMEM;
- pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
- memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+ irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
+ pkt->length = cpu_to_le32(irq_num_entries);
+
+ /* We must perform any necessary endianness conversation on the irq
+ * array being passed to the goya hardware
+ */
+ for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
+ irq_arr_index < irq_num_entries ; irq_arr_index++)
+ goya_irq_arr[irq_arr_index] =
+ cpu_to_le32(irq_arr[irq_arr_index]);
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
@@ -4424,6 +4469,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
struct goya_device *goya = hdev->asic_specific;
goya->events_stat[event_type]++;
+ goya->events_stat_aggregate[event_type]++;
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_PCIE_IF:
@@ -4449,7 +4495,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
- case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
goya_print_irq_info(hdev, event_type, false);
hl_device_reset(hdev, true, false);
break;
@@ -4485,6 +4530,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_unmask_irq(hdev, event_type);
break;
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
@@ -4505,12 +4551,16 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
}
}
-void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
+void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
{
struct goya_device *goya = hdev->asic_specific;
- *size = (u32) sizeof(goya->events_stat);
+ if (aggregate) {
+ *size = (u32) sizeof(goya->events_stat_aggregate);
+ return goya->events_stat_aggregate;
+ }
+ *size = (u32) sizeof(goya->events_stat);
return goya->events_stat;
}
@@ -4911,6 +4961,10 @@ int goya_armcp_info_get(struct hl_device *hdev)
prop->dram_end_address = prop->dram_base_address + dram_size;
}
+ if (!strlen(prop->armcp_info.card_name))
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
return 0;
}
@@ -5024,7 +5078,7 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
{
- return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
+ return RREG32(mmHW_STATE);
}
static const struct hl_asic_funcs goya_funcs = {
@@ -5041,7 +5095,7 @@ static const struct hl_asic_funcs goya_funcs = {
.resume = goya_resume,
.cb_mmap = goya_cb_mmap,
.ring_doorbell = goya_ring_doorbell,
- .flush_pq_write = goya_flush_pq_write,
+ .pqe_write = goya_pqe_write,
.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
.asic_dma_free_coherent = goya_dma_free_coherent,
.get_int_queue_base = goya_get_int_queue_base,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index f8c611883dc1..89b6574f8e4f 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -55,6 +55,8 @@
#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
+#define GOYA_DEFAULT_CARD_NAME "HL1000"
+
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -68,19 +70,19 @@
MMU_PAGE_TABLES_SIZE)
#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
MMU_DRAM_DEFAULT_PAGE_SIZE)
-#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \
+#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + \
MMU_CACHE_MNG_SIZE)
#define DRAM_BASE_ADDR_USER 0x20000000
-#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
-#error "KMD must reserve no more than 512MB"
+#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "Driver must reserve no more than 512MB"
#endif
/*
- * SRAM Memory Map for KMD
+ * SRAM Memory Map for Driver
*
- * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for
+ * Driver occupies DRIVER_SRAM_SIZE bytes from the start of SRAM. It is used for
* MME/TPC QMANs
*
*/
@@ -106,10 +108,10 @@
#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
(TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
-#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
+#define SRAM_DRIVER_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
(TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
-#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
+#if (SRAM_DRIVER_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
#error "MME/TPC QMANs SRAM space exceeds limit"
#endif
@@ -162,6 +164,7 @@ struct goya_device {
u64 ddr_bar_cur_addr;
u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
+ u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE];
u32 hw_cap_initialized;
u8 device_cpu_mmu_mappings_done;
};
@@ -177,7 +180,7 @@ int goya_late_init(struct hl_device *hdev);
void goya_late_fini(struct hl_device *hdev);
void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
+void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
void goya_update_eq_ci(struct hl_device *hdev, u32 val);
void goya_restore_phase_topology(struct hl_device *hdev);
int goya_context_switch(struct hl_device *hdev, u32 asid);
@@ -215,7 +218,7 @@ int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
-void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
+void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index d7ec7ad84cc6..b4d406af1bed 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -15,6 +15,10 @@
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
+#define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET
+#define SPMU_EVENT_TYPES_OFFSET 0x400
+#define SPMU_MAX_COUNTERS 6
+
static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
[GOYA_STM_CPU] = mmCPU_STM_BASE,
[GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
@@ -226,9 +230,16 @@ static int goya_config_stm(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_stm *input;
- u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
int rc;
+ if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
+ dev_err(hdev->dev, "Invalid register index in STM\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
if (params->enable) {
@@ -288,10 +299,17 @@ static int goya_config_etf(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etf *input;
- u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
u32 val;
int rc;
+ if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
+ dev_err(hdev->dev, "Invalid register index in ETF\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
val = RREG32(base_reg + 0x304);
@@ -445,11 +463,18 @@ static int goya_config_etr(struct hl_device *hdev,
static int goya_config_funnel(struct hl_device *hdev,
struct hl_debug_params *params)
{
- WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
- CORESIGHT_UNLOCK);
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
+ dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
- WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
- params->enable ? 0x33F : 0);
+ WREG32(base_reg, params->enable ? 0x33F : 0);
return 0;
}
@@ -458,9 +483,16 @@ static int goya_config_bmon(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_bmon *input;
- u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
u32 pcie_base = 0;
+ if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
+ dev_err(hdev->dev, "Invalid register index in BMON\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+
WREG32(base_reg + 0x104, 1);
if (params->enable) {
@@ -522,7 +554,7 @@ static int goya_config_bmon(struct hl_device *hdev,
static int goya_config_spmu(struct hl_device *hdev,
struct hl_debug_params *params)
{
- u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+ u64 base_reg;
struct hl_debug_params_spmu *input = params->input;
u64 *output;
u32 output_arr_len;
@@ -531,6 +563,13 @@ static int goya_config_spmu(struct hl_device *hdev,
u32 cycle_cnt_idx;
int i;
+ if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
+ dev_err(hdev->dev, "Invalid register index in SPMU\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+
if (params->enable) {
input = params->input;
@@ -539,7 +578,13 @@ static int goya_config_spmu(struct hl_device *hdev,
if (input->event_types_num < 3) {
dev_err(hdev->dev,
- "not enough values for SPMU enable\n");
+ "not enough event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ if (input->event_types_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many event types values for SPMU enable\n");
return -EINVAL;
}
@@ -547,7 +592,8 @@ static int goya_config_spmu(struct hl_device *hdev,
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < input->event_types_num ; i++)
- WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
+ WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
+ input->event_types[i]);
WREG32(base_reg + 0xE04, 0x41013041);
WREG32(base_reg + 0xC00, 0x8000003F);
@@ -567,6 +613,12 @@ static int goya_config_spmu(struct hl_device *hdev,
return -EINVAL;
}
+ if (events_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many events values for SPMU disable\n");
+ return -EINVAL;
+ }
+
WREG32(base_reg + 0xE04, 0x41013040);
for (i = 0 ; i < events_num ; i++)
@@ -584,24 +636,11 @@ static int goya_config_spmu(struct hl_device *hdev,
return 0;
}
-static int goya_config_timestamp(struct hl_device *hdev,
- struct hl_debug_params *params)
-{
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
- if (params->enable) {
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
- WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
- }
-
- return 0;
-}
-
int goya_debug_coresight(struct hl_device *hdev, void *data)
{
struct hl_debug_params *params = data;
u32 val;
- int rc;
+ int rc = 0;
switch (params->op) {
case HL_DEBUG_OP_STM:
@@ -623,7 +662,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
rc = goya_config_spmu(hdev, params);
break;
case HL_DEBUG_OP_TIMESTAMP:
- rc = goya_config_timestamp(hdev, params);
+ /* Do nothing as this opcode is deprecated */
break;
default:
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 088692c852b6..a2a700c3d597 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -230,18 +230,127 @@ static ssize_t ic_clk_curr_show(struct device *dev,
return sprintf(buf, "%lu\n", value);
}
+static ssize_t pm_mng_profile_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n",
+ (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
+ (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ "unknown");
+}
+
+static ssize_t pm_mng_profile_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (hdev->compute_ctx) {
+ dev_err(hdev->dev,
+ "Can't change PM profile while compute context is opened on the device\n");
+ count = -EPERM;
+ goto unlock_mutex;
+ }
+
+ if (strncmp("auto", buf, strlen("auto")) == 0) {
+ /* Make sure we are in LOW PLL when changing modes */
+ if (hdev->pm_mng_profile == PM_MANUAL) {
+ hdev->curr_pll_profile = PLL_HIGH;
+ hl_device_set_frequency(hdev, PLL_LOW);
+ hdev->pm_mng_profile = PM_AUTO;
+ }
+ } else if (strncmp("manual", buf, strlen("manual")) == 0) {
+ if (hdev->pm_mng_profile == PM_AUTO) {
+ /* Must release the lock because the work thread also
+ * takes this lock. But before we release it, set
+ * the mode to manual so nothing will change if a user
+ * suddenly opens the device
+ */
+ hdev->pm_mng_profile = PM_MANUAL;
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ /* Flush the current work so we can return to the user
+ * knowing that he is the only one changing frequencies
+ */
+ flush_delayed_work(&hdev->work_freq);
+
+ return count;
+ }
+ } else {
+ dev_err(hdev->dev, "value should be auto or manual\n");
+ count = -EINVAL;
+ }
+
+unlock_mutex:
+ mutex_unlock(&hdev->fpriv_list_lock);
+out:
+ return count;
+}
+
+static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", hdev->high_pll);
+}
+
+static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+ int rc;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto out;
+ }
+
+ rc = kstrtoul(buf, 0, &value);
+
+ if (rc) {
+ count = -EINVAL;
+ goto out;
+ }
+
+ hdev->high_pll = value;
+
+out:
+ return count;
+}
+
+static DEVICE_ATTR_RW(high_pll);
static DEVICE_ATTR_RW(ic_clk);
static DEVICE_ATTR_RO(ic_clk_curr);
static DEVICE_ATTR_RW(mme_clk);
static DEVICE_ATTR_RO(mme_clk_curr);
+static DEVICE_ATTR_RW(pm_mng_profile);
static DEVICE_ATTR_RW(tpc_clk);
static DEVICE_ATTR_RO(tpc_clk_curr);
static struct attribute *goya_dev_attrs[] = {
+ &dev_attr_high_pll.attr,
&dev_attr_ic_clk.attr,
&dev_attr_ic_clk_curr.attr,
&dev_attr_mme_clk.attr,
&dev_attr_mme_clk_curr.attr,
+ &dev_attr_pm_mng_profile.attr,
&dev_attr_tpc_clk.attr,
&dev_attr_tpc_clk_curr.attr,
NULL,
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 10da9940ee0d..75862be53c60 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -36,6 +36,8 @@
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
+#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
+
#define HL_MAX_QUEUES 128
#define HL_MAX_JOBS_PER_CS 64
@@ -43,6 +45,8 @@
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
+#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
+
/* Memory */
#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -92,12 +96,12 @@ enum hl_queue_type {
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
- * @kmd_only: true if only KMD is allowed to send a job to this queue, false
- * otherwise.
+ * @driver_only: true if only the driver is allowed to send a job to this queue,
+ * false otherwise.
*/
struct hw_queue_properties {
enum hl_queue_type type;
- u8 kmd_only;
+ u8 driver_only;
};
/**
@@ -320,7 +324,7 @@ struct hl_cs_job;
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
-/* KMD <-> ArmCP shared memory size */
+/* Host <-> ArmCP shared memory size */
#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
/**
@@ -401,7 +405,7 @@ struct hl_cs_parser;
/**
* enum hl_pm_mng_profile - power management profile.
- * @PM_AUTO: internal clock is set by KMD.
+ * @PM_AUTO: internal clock is set by the Linux driver.
* @PM_MANUAL: internal clock is set by the user.
* @PM_LAST: last power management type.
*/
@@ -441,7 +445,11 @@ enum hl_pll_frequency {
* @resume: handles IP specific H/W or SW changes for resume.
* @cb_mmap: maps a CB.
* @ring_doorbell: increment PI on a given QMAN.
- * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
+ * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
+ * function because the PQs are located in different memory areas
+ * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
+ * writing the PQE must match the destination memory area
+ * properties.
* @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
* dma_alloc_coherent(). This is ASIC function because
* its implementation is not trivial when the driver
@@ -510,7 +518,8 @@ struct hl_asic_funcs {
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u64 kaddress, phys_addr_t paddress, u32 size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
- void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
+ void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
+ struct hl_bd *bd);
void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
@@ -549,7 +558,8 @@ struct hl_asic_funcs {
struct hl_eq_entry *eq_entry);
void (*set_pll_profile)(struct hl_device *hdev,
enum hl_pll_frequency freq);
- void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
+ void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
+ u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
@@ -603,7 +613,7 @@ struct hl_va_range {
* descriptor (hl_vm_phys_pg_list or hl_userptr).
* @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
- * @hpriv: pointer to the private (KMD) data of the process (fd).
+ * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
@@ -629,6 +639,7 @@ struct hl_va_range {
* execution phase before the context switch phase
* has finished.
* @asid: context's unique address space ID in the device's MMU.
+ * @handle: context's opaque handle for user
*/
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
@@ -650,6 +661,7 @@ struct hl_ctx {
atomic_t thread_ctx_switch_token;
u32 thread_ctx_switch_wait_token;
u32 asid;
+ u32 handle;
};
/**
@@ -901,23 +913,27 @@ struct hl_debug_params {
* @hdev: habanalabs device structure.
* @filp: pointer to the given file structure.
* @taskpid: current process ID.
- * @ctx: current executing context.
+ * @ctx: current executing context. TODO: remove for multiple ctx per process
* @ctx_mgr: context manager to handle multiple context for this FD.
* @cb_mgr: command buffer manager to handle multiple buffers for this FD.
* @debugfs_list: list of relevant ASIC debugfs.
+ * @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
+ * @is_control: true for control device, false otherwise
*/
struct hl_fpriv {
struct hl_device *hdev;
struct file *filp;
struct pid *taskpid;
- struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
+ struct hl_ctx *ctx;
struct hl_ctx_mgr ctx_mgr;
struct hl_cb_mgr cb_mgr;
struct list_head debugfs_list;
+ struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
+ u8 is_control;
};
@@ -1004,7 +1020,7 @@ struct hl_dbg_device_entry {
*/
/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
- * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
+ * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
*/
#define HL_MAX_MINORS 256
@@ -1036,14 +1052,18 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
(val) << REG_FIELD_SHIFT(reg, field))
+/* Timeout should be longer when working with simulator but cap the
+ * increased timeout to some maximum
+ */
#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = RREG32(addr); \
@@ -1062,25 +1082,38 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
/*
* address in this macro points always to a memory location in the
* host's (server's) memory. That location is updated asynchronously
- * either by the direct access of the device or by another core
+ * either by the direct access of the device or by another core.
+ *
+ * To work both in LE and BE architectures, we need to distinguish between the
+ * two states (device or another core updates the memory location). Therefore,
+ * if mem_written_by_device is true, the host memory being polled will be
+ * updated directly by the device. If false, the host memory being polled will
+ * be updated by host CPU. Required so host knows whether or not the memory
+ * might need to be byte-swapped before returning value to caller.
*/
-#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us) \
+#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
+ mem_written_by_device) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
/* Verify we read updates done by other cores or by device */ \
mb(); \
(val) = *((u32 *) (uintptr_t) (addr)); \
+ if (mem_written_by_device) \
+ (val) = le32_to_cpu(*(__le32 *) &(val)); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = *((u32 *) (uintptr_t) (addr)); \
+ if (mem_written_by_device) \
+ (val) = le32_to_cpu(*(__le32 *) &(val)); \
break; \
} \
if (sleep_us) \
@@ -1093,11 +1126,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
timeout_us) \
({ \
ktime_t __timeout; \
- /* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
- __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
+ __timeout = ktime_add_us(ktime_get(),\
+ min((u64)(timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = readl(addr); \
@@ -1126,12 +1160,24 @@ struct hl_device_reset_work {
};
/**
+ * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
+ * @idle_to_busy_ts: timestamp where device changed from idle to busy.
+ * @busy_to_idle_ts: timestamp where device changed from busy to idle.
+ */
+struct hl_device_idle_busy_ts {
+ ktime_t idle_to_busy_ts;
+ ktime_t busy_to_idle_ts;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar: array of available PCIe bars.
* @rmmio: configuration area address on SRAM.
* @cdev: related char device.
- * @dev: realted kernel basic device structure.
+ * @cdev_ctrl: char device for control operations only (INFO IOCTL)
+ * @dev: related kernel basic device structure.
+ * @dev_ctrl: related kernel device structure for the control device
* @work_freq: delayed work to lower device frequency if possible.
* @work_heartbeat: delayed work for ArmCP is-alive check.
* @asic_name: ASIC specific nmae.
@@ -1139,25 +1185,19 @@ struct hl_device_reset_work {
* @completion_queue: array of hl_cq.
* @cq_wq: work queue of completion queues for executing work in process context
* @eq_wq: work queue of event queue for executing work in process context.
- * @kernel_ctx: KMD context structure.
+ * @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue.
* @hw_queues_mirror_list: CS mirror list for TDR.
* @hw_queues_mirror_lock: protects hw_queues_mirror_list.
* @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
* @event_queue: event queue for IRQ from ArmCP.
* @dma_pool: DMA pool for small allocations.
- * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
- * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
- * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
+ * @cpu_accessible_dma_mem: Host <-> ArmCP shared memory CPU address.
+ * @cpu_accessible_dma_address: Host <-> ArmCP shared memory DMA address.
+ * @cpu_accessible_dma_pool: Host <-> ArmCP shared memory pool.
* @asid_bitmap: holds used/available ASIDs.
* @asid_mutex: protects asid_bitmap.
- * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
- * fd_open_cnt is atomic, we need this lock to serialize
- * the open function because the driver currently supports
- * only a single process at a time. In addition, we need a
- * lock here so we can flush user processes which are opening
- * the device while we are trying to hard reset it
- * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
+ * @send_cpu_message_lock: enforces only one message in Host <-> ArmCP queue.
* @debug_lock: protects critical section of setting debug mode for device
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
@@ -1172,22 +1212,28 @@ struct hl_device_reset_work {
* @hl_debugfs: device's debugfs manager.
* @cb_pool: list of preallocated CBs.
* @cb_pool_lock: protects the CB pool.
- * @user_ctx: current user context executing.
+ * @fpriv_list: list of file private data structures. Each structure is created
+ * when a user opens the device
+ * @fpriv_list_lock: protects the fpriv_list
+ * @compute_ctx: current compute context executing.
+ * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
+ * and vice-versa
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
- * value is saved so in case of hard-reset, KMD will restore this
- * value and update the F/W after the re-initialization
+ * value is saved so in case of hard-reset, the driver will restore
+ * this value and update the F/W after the re-initialization
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
* @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues)
- * @major: habanalabs KMD major.
+ * @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
- * @soft_reset_cnt: number of soft reset since KMD loading.
- * @hard_reset_cnt: number of hard reset since KMD loading.
+ * @soft_reset_cnt: number of soft reset since the driver was loaded.
+ * @hard_reset_cnt: number of hard reset since the driver was loaded.
+ * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
* @id: device minor.
+ * @id_control: minor of the control device
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -1201,15 +1247,18 @@ struct hl_device_reset_work {
* @mmu_enable: is MMU enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
- * @in_debug: is device under debug. This, together with fd_open_cnt, enforces
+ * @in_debug: is device under debug. This, together with fpriv_list, enforces
* that only a single user is configuring the debug infrastructure.
+ * @cdev_sysfs_created: were char devices and sysfs nodes created.
*/
struct hl_device {
struct pci_dev *pdev;
void __iomem *pcie_bar[6];
void __iomem *rmmio;
struct cdev cdev;
+ struct cdev cdev_ctrl;
struct device *dev;
+ struct device *dev_ctrl;
struct delayed_work work_freq;
struct delayed_work work_heartbeat;
char asic_name[16];
@@ -1229,8 +1278,6 @@ struct hl_device {
struct gen_pool *cpu_accessible_dma_pool;
unsigned long *asid_bitmap;
struct mutex asid_mutex;
- /* TODO: remove fd_open_cnt_lock for multiple process support */
- struct mutex fd_open_cnt_lock;
struct mutex send_cpu_message_lock;
struct mutex debug_lock;
struct asic_fixed_properties asic_prop;
@@ -1249,21 +1296,26 @@ struct hl_device {
struct list_head cb_pool;
spinlock_t cb_pool_lock;
- /* TODO: remove user_ctx for multiple process support */
- struct hl_ctx *user_ctx;
+ struct list_head fpriv_list;
+ struct mutex fpriv_list_lock;
+
+ struct hl_ctx *compute_ctx;
+
+ struct hl_device_idle_busy_ts *idle_busy_ts_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
atomic_t in_reset;
- atomic_t curr_pll_profile;
- atomic_t fd_open_cnt;
- atomic_t cs_active_cnt;
+ enum hl_pll_frequency curr_pll_profile;
+ int cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
u32 hard_reset_cnt;
+ u32 idle_busy_ts_idx;
u16 id;
+ u16 id_control;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -1276,6 +1328,7 @@ struct hl_device {
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
+ u8 cdev_sysfs_created;
/* Parameters for bring-up */
u8 mmu_enable;
@@ -1369,6 +1422,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
int hl_device_open(struct inode *inode, struct file *filp);
+int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
enum hl_device_status hl_device_status(struct hl_device *hdev);
int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
@@ -1422,6 +1476,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
void hl_hpriv_get(struct hl_fpriv *hpriv);
void hl_hpriv_put(struct hl_fpriv *hpriv);
int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
+uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct armcp_sensor *sensors_arr);
@@ -1608,6 +1663,7 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
/* IOCTLs */
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 6f6dbe93f1df..8c342fb499ca 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -95,80 +95,127 @@ int hl_device_open(struct inode *inode, struct file *filp)
return -ENXIO;
}
- mutex_lock(&hdev->fd_open_cnt_lock);
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ mutex_init(&hpriv->restore_phase_mutex);
+ kref_init(&hpriv->refcount);
+ nonseekable_open(inode, filp);
+
+ hl_cb_mgr_init(&hpriv->cb_mgr);
+ hl_ctx_mgr_init(&hpriv->ctx_mgr);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ mutex_lock(&hdev->fpriv_list_lock);
if (hl_device_disabled_or_in_reset(hdev)) {
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is disabled or in reset\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EPERM;
+ rc = -EPERM;
+ goto out_err;
}
if (hdev->in_debug) {
dev_err_ratelimited(hdev->dev,
"Can't open %s because it is being debugged by another user\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EPERM;
+ rc = -EPERM;
+ goto out_err;
}
- if (atomic_read(&hdev->fd_open_cnt)) {
- dev_info_ratelimited(hdev->dev,
+ if (hdev->compute_ctx) {
+ dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
- mutex_unlock(&hdev->fd_open_cnt_lock);
- return -EBUSY;
- }
-
- atomic_inc(&hdev->fd_open_cnt);
-
- mutex_unlock(&hdev->fd_open_cnt_lock);
-
- hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
- if (!hpriv) {
- rc = -ENOMEM;
- goto close_device;
+ rc = -EBUSY;
+ goto out_err;
}
- hpriv->hdev = hdev;
- filp->private_data = hpriv;
- hpriv->filp = filp;
- mutex_init(&hpriv->restore_phase_mutex);
- kref_init(&hpriv->refcount);
- nonseekable_open(inode, filp);
-
- hl_cb_mgr_init(&hpriv->cb_mgr);
- hl_ctx_mgr_init(&hpriv->ctx_mgr);
-
rc = hl_ctx_create(hdev, hpriv);
if (rc) {
- dev_err(hdev->dev, "Failed to open FD (CTX fail)\n");
+ dev_err(hdev->dev, "Failed to create context %d\n", rc);
goto out_err;
}
- hpriv->taskpid = find_get_pid(current->pid);
-
- /*
- * Device is IDLE at this point so it is legal to change PLLs. There
- * is no need to check anything because if the PLL is already HIGH, the
- * set function will return without doing anything
+ /* Device is IDLE at this point so it is legal to change PLLs.
+ * There is no need to check anything because if the PLL is
+ * already HIGH, the set function will return without doing
+ * anything
*/
hl_device_set_frequency(hdev, PLL_HIGH);
+ list_add(&hpriv->dev_node, &hdev->fpriv_list);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
hl_debugfs_add_file(hpriv);
return 0;
out_err:
- filp->private_data = NULL;
- hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ filp->private_data = NULL;
mutex_destroy(&hpriv->restore_phase_mutex);
+ put_pid(hpriv->taskpid);
+
kfree(hpriv);
+ return rc;
+}
+
+int hl_device_open_ctrl(struct inode *inode, struct file *filp)
+{
+ struct hl_device *hdev;
+ struct hl_fpriv *hpriv;
+ int rc;
+
+ mutex_lock(&hl_devs_idr_lock);
+ hdev = idr_find(&hl_devs_idr, iminor(inode));
+ mutex_unlock(&hl_devs_idr_lock);
+
+ if (!hdev) {
+ pr_err("Couldn't find device %d:%d\n",
+ imajor(inode), iminor(inode));
+ return -ENXIO;
+ }
+
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ dev_err_ratelimited(hdev->dev_ctrl,
+ "Can't open %s because it is disabled or in reset\n",
+ dev_name(hdev->dev_ctrl));
+ rc = -EPERM;
+ goto out_err;
+ }
-close_device:
- atomic_dec(&hdev->fd_open_cnt);
+ list_add(&hpriv->dev_node, &hdev->fpriv_list);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ hpriv->is_control = true;
+ nonseekable_open(inode, filp);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&hdev->fpriv_list_lock);
+ kfree(hpriv);
return rc;
}
@@ -199,7 +246,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
enum hl_asic_type asic_type, int minor)
{
struct hl_device *hdev;
- int rc;
+ int rc, main_id, ctrl_id = 0;
*dev = NULL;
@@ -240,33 +287,34 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
mutex_lock(&hl_devs_idr_lock);
- if (minor == -1) {
- rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
+ /* Always save 2 numbers, 1 for main device and 1 for control.
+ * They must be consecutive
+ */
+ main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
GFP_KERNEL);
- } else {
- void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
- if (IS_ERR_VALUE(old_idr)) {
- rc = PTR_ERR(old_idr);
- pr_err("Error %d when trying to replace minor %d\n",
- rc, minor);
- mutex_unlock(&hl_devs_idr_lock);
- goto free_hdev;
- }
- rc = minor;
- }
+ if (main_id >= 0)
+ ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
+ main_id + 2, GFP_KERNEL);
mutex_unlock(&hl_devs_idr_lock);
- if (rc < 0) {
- if (rc == -ENOSPC) {
+ if ((main_id < 0) || (ctrl_id < 0)) {
+ if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC))
pr_err("too many devices in the system\n");
- rc = -EBUSY;
+
+ if (main_id >= 0) {
+ mutex_lock(&hl_devs_idr_lock);
+ idr_remove(&hl_devs_idr, main_id);
+ mutex_unlock(&hl_devs_idr_lock);
}
+
+ rc = -EBUSY;
goto free_hdev;
}
- hdev->id = rc;
+ hdev->id = main_id;
+ hdev->id_control = ctrl_id;
*dev = hdev;
@@ -288,6 +336,7 @@ void destroy_hdev(struct hl_device *hdev)
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
idr_remove(&hl_devs_idr, hdev->id);
+ idr_remove(&hl_devs_idr, hdev->id_control);
mutex_unlock(&hl_devs_idr_lock);
kfree(hdev);
@@ -295,8 +344,7 @@ void destroy_hdev(struct hl_device *hdev)
static int hl_pmops_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct hl_device *hdev = pci_get_drvdata(pdev);
+ struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to suspend PCI device\n");
@@ -310,8 +358,7 @@ static int hl_pmops_suspend(struct device *dev)
static int hl_pmops_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct hl_device *hdev = pci_get_drvdata(pdev);
+ struct hl_device *hdev = dev_get_drvdata(dev);
pr_debug("Going to resume PCI device\n");
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 07127576b3e8..66d9c710073c 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -65,7 +65,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.num_of_events = prop->num_of_events;
memcpy(hw_ip.armcp_version,
prop->armcp_info.armcp_version, VERSION_MAX_LEN);
- hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
@@ -75,7 +75,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
}
-static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
+static int hw_events_info(struct hl_device *hdev, bool aggregate,
+ struct hl_info_args *args)
{
u32 size, max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -84,13 +85,14 @@ static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- arr = hdev->asic_funcs->get_events_stat(hdev, &size);
+ arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
}
-static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
+static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ struct hl_device *hdev = hpriv->hdev;
struct hl_info_dram_usage dram_usage = {0};
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -104,7 +106,9 @@ static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
prop->dram_base_address);
dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
atomic64_read(&hdev->dram_used_mem);
- dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem);
+ if (hpriv->ctx)
+ dram_usage.ctx_dram_mem =
+ atomic64_read(&hpriv->ctx->dram_phys_mem);
return copy_to_user(out, &dram_usage,
min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
@@ -141,13 +145,16 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
params->op = args->op;
if (args->input_ptr && args->input_size) {
- input = memdup_user(u64_to_user_ptr(args->input_ptr),
- args->input_size);
- if (IS_ERR(input)) {
- rc = PTR_ERR(input);
- input = NULL;
- dev_err(hdev->dev,
- "error %d when copying input debug data\n", rc);
+ input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
+ if (!input) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
+ args->input_size)) {
+ rc = -EFAULT;
+ dev_err(hdev->dev, "failed to copy input debug data\n");
goto out;
}
@@ -191,42 +198,81 @@ out:
return rc;
}
-static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_device_utilization device_util = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ if ((args->period_ms < 100) || (args->period_ms > 1000) ||
+ (args->period_ms % 100)) {
+ dev_err(hdev->dev,
+ "period %u must be between 100 - 1000 and must be divisible by 100\n",
+ args->period_ms);
+ return -EINVAL;
+ }
+
+ device_util.utilization = hl_device_utilization(hdev, args->period_ms);
+
+ return copy_to_user(out, &device_util,
+ min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
+}
+
+static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
+ struct device *dev)
{
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
int rc;
- /* We want to return device status even if it disabled or in reset */
- if (args->op == HL_INFO_DEVICE_STATUS)
+ /*
+ * Information is returned for the following opcodes even if the device
+ * is disabled or in reset.
+ */
+ switch (args->op) {
+ case HL_INFO_HW_IP_INFO:
+ return hw_ip_info(hdev, args);
+
+ case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ default:
+ break;
+ }
+
if (hl_device_disabled_or_in_reset(hdev)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_warn_ratelimited(dev,
"Device is %s. Can't execute INFO IOCTL\n",
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
return -EBUSY;
}
switch (args->op) {
- case HL_INFO_HW_IP_INFO:
- rc = hw_ip_info(hdev, args);
- break;
-
case HL_INFO_HW_EVENTS:
- rc = hw_events_info(hdev, args);
+ rc = hw_events_info(hdev, false, args);
break;
case HL_INFO_DRAM_USAGE:
- rc = dram_usage_info(hdev, args);
+ rc = dram_usage_info(hpriv, args);
break;
case HL_INFO_HW_IDLE:
rc = hw_idle(hdev, args);
break;
+ case HL_INFO_DEVICE_UTILIZATION:
+ rc = device_utilization(hdev, args);
+ break;
+
+ case HL_INFO_HW_EVENTS_AGGREGATE:
+ rc = hw_events_info(hdev, true, args);
+ break;
+
default:
- dev_err(hdev->dev, "Invalid request %d\n", args->op);
+ dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
break;
}
@@ -234,6 +280,16 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
+static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
+{
+ return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
+}
+
+static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
+{
+ return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
+}
+
static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_debug_args *args = data;
@@ -288,52 +344,45 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
};
-#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
+static const struct hl_ioctl_desc hl_ioctls_control[] = {
+ HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
+};
-long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
+ const struct hl_ioctl_desc *ioctl, struct device *dev)
{
struct hl_fpriv *hpriv = filep->private_data;
struct hl_device *hdev = hpriv->hdev;
- hl_ioctl_t *func;
- const struct hl_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
unsigned int usize, asize;
+ hl_ioctl_t *func;
+ u32 hl_size;
int retcode;
if (hdev->hard_reset_pending) {
- dev_crit_ratelimited(hdev->dev,
+ dev_crit_ratelimited(hdev->dev_ctrl,
"Device HARD reset pending! Please close FD\n");
return -ENODEV;
}
- if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
- u32 hl_size;
-
- ioctl = &hl_ioctls[nr];
-
- hl_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (hl_size > asize)
- asize = hl_size;
-
- cmd = ioctl->cmd;
- } else {
- dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
- task_pid_nr(current), nr);
- return -ENOTTY;
- }
-
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
- dev_dbg(hdev->dev, "no function\n");
+ dev_dbg(dev, "no function\n");
retcode = -ENOTTY;
goto out_err;
}
+ hl_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (hl_size > asize)
+ asize = hl_size;
+
+ cmd = ioctl->cmd;
+
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
@@ -363,8 +412,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
out_err:
if (retcode)
- dev_dbg(hdev->dev,
- "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current), cmd, nr);
if (kdata != stack_kdata)
@@ -372,3 +420,39 @@ out_err:
return retcode;
}
+
+long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+
+ if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
+ ioctl = &hl_ioctls[nr];
+ } else {
+ dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
+}
+
+long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct hl_fpriv *hpriv = filep->private_data;
+ struct hl_device *hdev = hpriv->hdev;
+ const struct hl_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+
+ if (nr == _IOC_NR(HL_IOCTL_INFO)) {
+ ioctl = &hl_ioctls_control[nr];
+ } else {
+ dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
+ task_pid_nr(current), nr);
+ return -ENOTTY;
+ }
+
+ return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
+}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index e3b5517897ea..55b383b2a116 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -80,9 +80,9 @@ static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
bd += hl_pi_2_offset(q->pi);
- bd->ctl = __cpu_to_le32(ctl);
- bd->len = __cpu_to_le32(len);
- bd->ptr = __cpu_to_le64(ptr);
+ bd->ctl = cpu_to_le32(ctl);
+ bd->len = cpu_to_le32(len);
+ bd->ptr = cpu_to_le64(ptr);
q->pi = hl_queue_inc_ptr(q->pi);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
@@ -249,7 +249,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
len = job->job_cb_size;
ptr = cb->bus_address;
- cq_pkt.data = __cpu_to_le32(
+ cq_pkt.data = cpu_to_le32(
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK) |
(1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
@@ -267,7 +267,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
cq_addr,
- __le32_to_cpu(cq_pkt.data),
+ le32_to_cpu(cq_pkt.data),
q->hw_queue_id);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_bd bd;
- u64 *pi, *pbd = (u64 *) &bd;
+ __le64 *pi;
bd.ctl = 0;
- bd.len = __cpu_to_le32(job->job_cb_size);
- bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
+ bd.len = cpu_to_le32(job->job_cb_size);
+ bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
- pi = (u64 *) (uintptr_t) (q->kernel_address +
+ pi = (__le64 *) (uintptr_t) (q->kernel_address +
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
- pi[0] = pbd[0];
- pi[1] = pbd[1];
-
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
- /* Flush PQ entry write. Relevant only for specific ASICs */
- hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
+ hdev->asic_funcs->pqe_write(hdev, pi, &bd);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
@@ -368,7 +364,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
- atomic_inc(&hdev->cs_active_cnt);
+ if (!hdev->cs_active_cnt++) {
+ struct hl_device_idle_busy_ts *ts;
+
+ ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
+ ts->busy_to_idle_ts = ktime_set(0, 0);
+ ts->idle_to_busy_ts = ktime_get();
+ }
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 77facd25c4a2..7be4bace9b4f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -26,7 +26,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
int rc, i, j;
for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
- type = __le32_to_cpu(sensors_arr[i].type);
+ type = le32_to_cpu(sensors_arr[i].type);
if ((type == 0) && (sensors_arr[i].flags == 0))
break;
@@ -58,10 +58,10 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
}
for (i = 0 ; i < arr_size ; i++) {
- type = __le32_to_cpu(sensors_arr[i].type);
+ type = le32_to_cpu(sensors_arr[i].type);
curr_arr = sensors_by_type[type];
curr_arr[sensors_by_type_next_index[type]++] =
- __le32_to_cpu(sensors_arr[i].flags);
+ le32_to_cpu(sensors_arr[i].flags);
}
channels_info = kcalloc(num_active_sensor_types + 1,
@@ -273,7 +273,7 @@ long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -299,7 +299,7 @@ long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -325,7 +325,7 @@ long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -351,7 +351,7 @@ long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -377,7 +377,7 @@ long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -403,11 +403,11 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
- pkt.value = __cpu_to_le64(value);
+ pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
@@ -421,6 +421,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
@@ -430,7 +431,8 @@ int hl_hwmon_init(struct hl_device *hdev)
hdev->hl_chip_info->ops = &hl_hwmon_ops;
hdev->hwmon_dev = hwmon_device_register_with_info(dev,
- "habanalabs", hdev, hdev->hl_chip_info, NULL);
+ prop->armcp_info.card_name, hdev,
+ hdev->hl_chip_info, NULL);
if (IS_ERR(hdev->hwmon_dev)) {
rc = PTR_ERR(hdev->hwmon_dev);
dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index 1f1e35e86d84..e4c6699a1868 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -41,33 +41,34 @@ enum pq_init_status {
/*
* ArmCP Primary Queue Packets
*
- * During normal operation, KMD needs to send various messages to ArmCP,
- * usually either to SET some value into a H/W periphery or to GET the current
- * value of some H/W periphery. For example, SET the frequency of MME/TPC and
- * GET the value of the thermal sensor.
- *
- * These messages can be initiated either by the User application or by KMD
- * itself, e.g. power management code. In either case, the communication from
- * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will
- * send a single message and poll until the message was acknowledged and the
- * results are ready (if results are needed).
- *
- * This means that only a single message can be sent at a time and KMD must
- * wait for its result before sending the next message. Having said that,
- * because these are control messages which are sent in a relatively low
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to ArmCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to ArmCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
* frequency, this limitation seems acceptable. It's important to note that
* in case of multiple devices, messages to different devices *can* be sent
* at the same time.
*
* The message, inputs/outputs (if relevant) and fence object will be located
- * on the device DDR at an address that will be determined by KMD. During
- * device initialization phase, KMD will pass to ArmCP that address. Most of
- * the message types will contain inputs/outputs inside the message itself.
- * The common part of each message will contain the opcode of the message (its
- * type) and a field representing a fence object.
- *
- * When KMD wishes to send a message to ArmCP, it will write the message
- * contents to the device DDR, clear the fence object and then write the
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to ArmCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to ArmCP, it will write the
+ * message contents to the device DDR, clear the fence object and then write the
* value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
* the 484 interrupt-id to the ARM core.
*
@@ -78,12 +79,13 @@ enum pq_init_status {
* device DDR and then write to the fence object. If an error occurred, ArmCP
* will fill the rc field with the right error code.
*
- * In the meantime, KMD will poll on the fence object. Once KMD sees that the
- * fence object is signaled, it will read the results from the device DDR
- * (if relevant) and resume the code execution in KMD.
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
*
* To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
- * so the value being put by the KMD matches the value read by ArmCP
+ * so the value being put by the host's driver matches the value read by ArmCP
*
* Non-QMAN packets should be limited to values 1 through (2^8 - 1)
*
@@ -148,9 +150,9 @@ enum pq_init_status {
*
* ARMCP_PACKET_INFO_GET -
* Fetch information from the device as specified in the packet's
- * structure. KMD passes the max size it allows the ArmCP to write to
- * the structure, to prevent data corruption in case of mismatched
- * KMD/FW versions.
+ * structure. The host's driver passes the max size it allows the ArmCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
*
* ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
*
@@ -183,9 +185,9 @@ enum pq_init_status {
* ARMCP_PACKET_EEPROM_DATA_GET -
* Get EEPROM data from the ArmCP kernel. The buffer is specified in the
* addr field. The CPU will put the returned data size in the result
- * field. In addition, KMD passes the max size it allows the ArmCP to
- * write to the structure, to prevent data corruption in case of
- * mismatched KMD/FW versions.
+ * field. In addition, the host's driver passes the max size it allows the
+ * ArmCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
*
*/
@@ -231,7 +233,7 @@ struct armcp_packet {
__le32 ctl;
- __le32 fence; /* Signal to KMD that message is completed */
+ __le32 fence; /* Signal to host that message is completed */
union {
struct {/* For temperature/current/voltage/fan/pwm get/set */
@@ -310,6 +312,7 @@ struct eq_generic_event {
* ArmCP info
*/
+#define CARD_NAME_MAX_LEN 16
#define VERSION_MAX_LEN 128
#define ARMCP_MAX_SENSORS 128
@@ -318,6 +321,19 @@ struct armcp_sensor {
__le32 flags;
};
+/**
+ * struct armcp_info - Info from ArmCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: ArmCP linux kernel version.
+ * @reserved: reserved field.
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @armcp_version: ArmCP S/W version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ */
struct armcp_info {
struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
@@ -328,6 +344,7 @@ struct armcp_info {
__u8 thermal_version[VERSION_MAX_LEN];
__u8 armcp_version[VERSION_MAX_LEN];
__le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
};
#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 3f02a52ba4ce..43d241891e45 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -38,4 +38,6 @@
#define TPC_MAX_NUM 8
+#define MME_MAX_NUM 1
+
#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index a14407b975e4..ef54bad20509 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -52,6 +52,19 @@ enum goya_dma_direction {
#define GOYA_PKT_CTL_MB_SHIFT 31
#define GOYA_PKT_CTL_MB_MASK 0x80000000
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct goya_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
struct packet_nop {
__le32 reserved;
__le32 ctl;
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
new file mode 100644
index 000000000000..cd89723c7f61
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GOYA_REG_MAP_H_
+#define GOYA_REG_MAP_H_
+
+/*
+ * PSOC scratch-pad registers
+ */
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmUBOOT_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+
+#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index ea9f72ff456c..fac65fbd70e8 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
struct hl_cs_job *job;
bool shadow_index_valid;
u16 shadow_index;
- u32 *cq_entry;
- u32 *cq_base;
+ struct hl_cq_entry *cq_entry, *cq_base;
if (hdev->disabled) {
dev_dbg(hdev->dev,
@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
- cq_base = (u32 *) (uintptr_t) cq->kernel_address;
+ cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
while (1) {
- bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
+ bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
+ CQ_ENTRY_READY_MASK)
>> CQ_ENTRY_READY_SHIFT);
if (!entry_ready)
break;
- cq_entry = (u32 *) &cq_base[cq->ci];
+ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
- /*
- * Make sure we read CQ entry contents after we've
+ /* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
- shadow_index_valid =
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
>> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
- shadow_index = (u16)
- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
+ shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
+ CQ_ENTRY_SHADOW_INDEX_MASK)
>> CQ_ENTRY_SHADOW_INDEX_SHIFT);
queue = &hdev->kernel_queues[cq->hw_queue_id];
@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
queue_work(hdev->cq_wq, &job->finish_work);
}
- /*
- * Update ci of the context's queue. There is no
+ /* Update ci of the context's queue. There is no
* need to protect it with spinlock because this update is
* done only inside IRQ and there is a different IRQ per
* queue
@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
queue->ci = hl_queue_inc_ptr(queue->ci);
/* Clear CQ entry ready bit */
- cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
+ cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
+ ~CQ_ENTRY_READY_MASK);
cq->ci = hl_cq_inc_ptr(cq->ci);
@@ -161,7 +160,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
while (1) {
bool entry_ready =
- ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
+ ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
if (!entry_ready)
@@ -195,7 +194,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
skip_irq:
/* Clear EQ entry ready bit */
eq_entry->hdr.ctl =
- __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) &
+ cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
~EQ_CTL_READY_MASK);
eq->ci = hl_eq_inc_ptr(eq->ci);
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 42d237cae1dc..365fb0cb8dff 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
dev_dbg(hdev->dev,
"page list 0x%p of asid %d is still alive\n",
phys_pg_list, ctx->asid);
+ atomic64_sub(phys_pg_list->total_size,
+ &hdev->dram_used_mem);
free_phys_pg_pack(hdev, phys_pg_list);
idr_remove(&vm->phys_pg_pack_handles, i);
}
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 25eb46d29d88..4cd622b017b9 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -21,12 +21,12 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
memset(&pkt, 0, sizeof(pkt));
if (curr)
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
else
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = __cpu_to_le32(pll_index);
+ pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, &result);
@@ -48,10 +48,10 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.pll_index = __cpu_to_le32(pll_index);
- pkt.value = __cpu_to_le64(freq);
+ pkt.pll_index = cpu_to_le32(pll_index);
+ pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, NULL);
@@ -70,7 +70,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -91,9 +91,9 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.value = __cpu_to_le64(value);
+ pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, NULL);
@@ -102,100 +102,6 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
-static ssize_t pm_mng_profile_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev))
- return -ENODEV;
-
- return sprintf(buf, "%s\n",
- (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
- (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
- "unknown");
-}
-
-static ssize_t pm_mng_profile_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev)) {
- count = -ENODEV;
- goto out;
- }
-
- mutex_lock(&hdev->fd_open_cnt_lock);
-
- if (atomic_read(&hdev->fd_open_cnt) > 0) {
- dev_err(hdev->dev,
- "Can't change PM profile while user process is opened on the device\n");
- count = -EPERM;
- goto unlock_mutex;
- }
-
- if (strncmp("auto", buf, strlen("auto")) == 0) {
- /* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_MANUAL) {
- atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
- hl_device_set_frequency(hdev, PLL_LOW);
- hdev->pm_mng_profile = PM_AUTO;
- }
- } else if (strncmp("manual", buf, strlen("manual")) == 0) {
- /* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_AUTO) {
- flush_delayed_work(&hdev->work_freq);
- hdev->pm_mng_profile = PM_MANUAL;
- }
- } else {
- dev_err(hdev->dev, "value should be auto or manual\n");
- count = -EINVAL;
- goto unlock_mutex;
- }
-
-unlock_mutex:
- mutex_unlock(&hdev->fd_open_cnt_lock);
-out:
- return count;
-}
-
-static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- if (hl_device_disabled_or_in_reset(hdev))
- return -ENODEV;
-
- return sprintf(buf, "%u\n", hdev->high_pll);
-}
-
-static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
- long value;
- int rc;
-
- if (hl_device_disabled_or_in_reset(hdev)) {
- count = -ENODEV;
- goto out;
- }
-
- rc = kstrtoul(buf, 0, &value);
-
- if (rc) {
- count = -EINVAL;
- goto out;
- }
-
- hdev->high_pll = value;
-
-out:
- return count;
-}
-
static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -351,14 +257,6 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", str);
}
-static ssize_t write_open_cnt_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct hl_device *hdev = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
-}
-
static ssize_t soft_reset_cnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -450,18 +348,15 @@ static DEVICE_ATTR_RO(device_type);
static DEVICE_ATTR_RO(fuse_ver);
static DEVICE_ATTR_WO(hard_reset);
static DEVICE_ATTR_RO(hard_reset_cnt);
-static DEVICE_ATTR_RW(high_pll);
static DEVICE_ATTR_RO(infineon_ver);
static DEVICE_ATTR_RW(max_power);
static DEVICE_ATTR_RO(pci_addr);
-static DEVICE_ATTR_RW(pm_mng_profile);
static DEVICE_ATTR_RO(preboot_btl_ver);
static DEVICE_ATTR_WO(soft_reset);
static DEVICE_ATTR_RO(soft_reset_cnt);
static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
-static DEVICE_ATTR_RO(write_open_cnt);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
@@ -477,18 +372,15 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_fuse_ver.attr,
&dev_attr_hard_reset.attr,
&dev_attr_hard_reset_cnt.attr,
- &dev_attr_high_pll.attr,
&dev_attr_infineon_ver.attr,
&dev_attr_max_power.attr,
&dev_attr_pci_addr.attr,
- &dev_attr_pm_mng_profile.attr,
&dev_attr_preboot_btl_ver.attr,
&dev_attr_soft_reset.attr,
&dev_attr_soft_reset_cnt.attr,
&dev_attr_status.attr,
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
- &dev_attr_write_open_cnt.attr,
NULL,
};
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
deleted file mode 100644
index 9d0445a567db..000000000000
--- a/drivers/misc/ioc4.c
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/* This file contains the master driver module for use by SGI IOC4 subdrivers.
- *
- * It allocates any resources shared between multiple subdevices, and
- * provides accessor functions (where needed) and the like for those
- * resources. It also provides a mechanism for the subdevice modules
- * to support loading and unloading.
- *
- * Non-shared resources (e.g. external interrupt A_INT_OUT register page
- * alias, serial port and UART registers) are handled by the subdevice
- * modules themselves.
- *
- * This is all necessary because IOC4 is not implemented as a multi-function
- * PCI device, but an amalgamation of disparate registers for several
- * types of device (ATA, serial, external interrupts). The normal
- * resource management in the kernel doesn't have quite the right interfaces
- * to handle this situation (e.g. multiple modules can't claim the same
- * PCI ID), thus this IOC4 master module.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ioc4.h>
-#include <linux/ktime.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-#include <asm/io.h>
-
-/***************
- * Definitions *
- ***************/
-
-/* Tweakable values */
-
-/* PCI bus speed detection/calibration */
-#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
-#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */
-#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */
-#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */
-#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */
-#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */
-
-/************************
- * Submodule management *
- ************************/
-
-static DEFINE_MUTEX(ioc4_mutex);
-
-static LIST_HEAD(ioc4_devices);
-static LIST_HEAD(ioc4_submodules);
-
-/* Register an IOC4 submodule */
-int
-ioc4_register_submodule(struct ioc4_submodule *is)
-{
- struct ioc4_driver_data *idd;
-
- mutex_lock(&ioc4_mutex);
- list_add(&is->is_list, &ioc4_submodules);
-
- /* Initialize submodule for each IOC4 */
- if (!is->is_probe)
- goto out;
-
- list_for_each_entry(idd, &ioc4_devices, idd_list) {
- if (is->is_probe(idd)) {
- printk(KERN_WARNING
- "%s: IOC4 submodule %s probe failed "
- "for pci_dev %s",
- __func__, module_name(is->is_owner),
- pci_name(idd->idd_pdev));
- }
- }
- out:
- mutex_unlock(&ioc4_mutex);
- return 0;
-}
-
-/* Unregister an IOC4 submodule */
-void
-ioc4_unregister_submodule(struct ioc4_submodule *is)
-{
- struct ioc4_driver_data *idd;
-
- mutex_lock(&ioc4_mutex);
- list_del(&is->is_list);
-
- /* Remove submodule for each IOC4 */
- if (!is->is_remove)
- goto out;
-
- list_for_each_entry(idd, &ioc4_devices, idd_list) {
- if (is->is_remove(idd)) {
- printk(KERN_WARNING
- "%s: IOC4 submodule %s remove failed "
- "for pci_dev %s.\n",
- __func__, module_name(is->is_owner),
- pci_name(idd->idd_pdev));
- }
- }
- out:
- mutex_unlock(&ioc4_mutex);
-}
-
-/*********************
- * Device management *
- *********************/
-
-#define IOC4_CALIBRATE_LOW_LIMIT \
- (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
-#define IOC4_CALIBRATE_HIGH_LIMIT \
- (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
-#define IOC4_CALIBRATE_DEFAULT \
- (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
-
-#define IOC4_CALIBRATE_END \
- (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
-
-#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */
-
-/* Determines external interrupt output clock period of the PCI bus an
- * IOC4 is attached to. This value can be used to determine the PCI
- * bus speed.
- *
- * IOC4 has a design feature that various internal timers are derived from
- * the PCI bus clock. This causes IOC4 device drivers to need to take the
- * bus speed into account when setting various register values (e.g. INT_OUT
- * register COUNT field, UART divisors, etc). Since this information is
- * needed by several subdrivers, it is determined by the main IOC4 driver,
- * even though the following code utilizes external interrupt registers
- * to perform the speed calculation.
- */
-static void
-ioc4_clock_calibrate(struct ioc4_driver_data *idd)
-{
- union ioc4_int_out int_out;
- union ioc4_gpcr gpcr;
- unsigned int state, last_state;
- uint64_t start, end, period;
- unsigned int count;
-
- /* Enable output */
- gpcr.raw = 0;
- gpcr.fields.dir = IOC4_GPCR_DIR_0;
- gpcr.fields.int_out_en = 1;
- writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
-
- /* Reset to power-on state */
- writel(0, &idd->idd_misc_regs->int_out.raw);
-
- /* Set up square wave */
- int_out.raw = 0;
- int_out.fields.count = IOC4_CALIBRATE_COUNT;
- int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
- int_out.fields.diag = 0;
- writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
-
- /* Check square wave period averaged over some number of cycles */
- start = ktime_get_ns();
- state = 1; /* make sure the first read isn't a rising edge */
- for (count = 0; count <= IOC4_CALIBRATE_END; count++) {
- do { /* wait for a rising edge */
- last_state = state;
- int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
- state = int_out.fields.int_out;
- } while (last_state || !state);
-
- /* discard the first few cycles */
- if (count == IOC4_CALIBRATE_DISCARD)
- start = ktime_get_ns();
- }
- end = ktime_get_ns();
-
- /* Calculation rearranged to preserve intermediate precision.
- * Logically:
- * 1. "end - start" gives us the measurement period over all
- * the square wave cycles.
- * 2. Divide by number of square wave cycles to get the period
- * of a square wave cycle.
- * 3. Divide by 2*(int_out.fields.count+1), which is the formula
- * by which the IOC4 generates the square wave, to get the
- * period of an IOC4 INT_OUT count.
- */
- period = (end - start) /
- (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
-
- /* Bounds check the result. */
- if (period > IOC4_CALIBRATE_LOW_LIMIT ||
- period < IOC4_CALIBRATE_HIGH_LIMIT) {
- printk(KERN_INFO
- "IOC4 %s: Clock calibration failed. Assuming"
- "PCI clock is %d ns.\n",
- pci_name(idd->idd_pdev),
- IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
- period = IOC4_CALIBRATE_DEFAULT;
- } else {
- u64 ns = period;
-
- do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
- printk(KERN_DEBUG
- "IOC4 %s: PCI clock is %llu ns.\n",
- pci_name(idd->idd_pdev), (unsigned long long)ns);
- }
-
- /* Remember results. We store the extint clock period rather
- * than the PCI clock period so that greater precision is
- * retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get
- * PCI clock period.
- */
- idd->count_period = period;
-}
-
-/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
- * Each brings out different combinations of IOC4 signals, thus.
- * the IOC4 subdrivers need to know to which we're attached.
- *
- * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
- * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
- * If neither is present, it's a PCI-RT.
- */
-static unsigned int
-ioc4_variant(struct ioc4_driver_data *idd)
-{
- struct pci_dev *pdev = NULL;
- int found = 0;
-
- /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
- do {
- pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
- PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
- if (pdev &&
- idd->idd_pdev->bus->number == pdev->bus->number &&
- 3 == PCI_SLOT(pdev->devfn))
- found = 1;
- } while (pdev && !found);
- if (NULL != pdev) {
- pci_dev_put(pdev);
- return IOC4_VARIANT_IO9;
- }
-
- /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
- pdev = NULL;
- do {
- pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
- PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
- if (pdev &&
- idd->idd_pdev->bus->number == pdev->bus->number &&
- 3 == PCI_SLOT(pdev->devfn))
- found = 1;
- } while (pdev && !found);
- if (NULL != pdev) {
- pci_dev_put(pdev);
- return IOC4_VARIANT_IO10;
- }
-
- /* PCI-RT: No SCSI/SATA controller will be present */
- return IOC4_VARIANT_PCI_RT;
-}
-
-static void
-ioc4_load_modules(struct work_struct *work)
-{
- request_module("sgiioc4");
-}
-
-static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
-
-/* Adds a new instance of an IOC4 card */
-static int
-ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
- struct ioc4_driver_data *idd;
- struct ioc4_submodule *is;
- uint32_t pcmd;
- int ret;
-
- /* Enable IOC4 and take ownership of it */
- if ((ret = pci_enable_device(pdev))) {
- printk(KERN_WARNING
- "%s: Failed to enable IOC4 device for pci_dev %s.\n",
- __func__, pci_name(pdev));
- goto out;
- }
- pci_set_master(pdev);
-
- /* Set up per-IOC4 data */
- idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
- if (!idd) {
- printk(KERN_WARNING
- "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
- __func__, pci_name(pdev));
- ret = -ENODEV;
- goto out_idd;
- }
- idd->idd_pdev = pdev;
- idd->idd_pci_id = pci_id;
-
- /* Map IOC4 misc registers. These are shared between subdevices
- * so the main IOC4 module manages them.
- */
- idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
- if (!idd->idd_bar0) {
- printk(KERN_WARNING
- "%s: Unable to find IOC4 misc resource "
- "for pci_dev %s.\n",
- __func__, pci_name(idd->idd_pdev));
- ret = -ENODEV;
- goto out_pci;
- }
- if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
- "ioc4_misc")) {
- printk(KERN_WARNING
- "%s: Unable to request IOC4 misc region "
- "for pci_dev %s.\n",
- __func__, pci_name(idd->idd_pdev));
- ret = -ENODEV;
- goto out_pci;
- }
- idd->idd_misc_regs = ioremap(idd->idd_bar0,
- sizeof(struct ioc4_misc_regs));
- if (!idd->idd_misc_regs) {
- printk(KERN_WARNING
- "%s: Unable to remap IOC4 misc region "
- "for pci_dev %s.\n",
- __func__, pci_name(idd->idd_pdev));
- ret = -ENODEV;
- goto out_misc_region;
- }
-
- /* Failsafe portion of per-IOC4 initialization */
-
- /* Detect card variant */
- idd->idd_variant = ioc4_variant(idd);
- printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
- idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
- idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
- idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
-
- /* Initialize IOC4 */
- pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
- pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
- pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
-
- /* Determine PCI clock */
- ioc4_clock_calibrate(idd);
-
- /* Disable/clear all interrupts. Need to do this here lest
- * one submodule request the shared IOC4 IRQ, but interrupt
- * is generated by a different subdevice.
- */
- /* Disable */
- writel(~0, &idd->idd_misc_regs->other_iec.raw);
- writel(~0, &idd->idd_misc_regs->sio_iec);
- /* Clear (i.e. acknowledge) */
- writel(~0, &idd->idd_misc_regs->other_ir.raw);
- writel(~0, &idd->idd_misc_regs->sio_ir);
-
- /* Track PCI-device specific data */
- idd->idd_serial_data = NULL;
- pci_set_drvdata(idd->idd_pdev, idd);
-
- mutex_lock(&ioc4_mutex);
- list_add_tail(&idd->idd_list, &ioc4_devices);
-
- /* Add this IOC4 to all submodules */
- list_for_each_entry(is, &ioc4_submodules, is_list) {
- if (is->is_probe && is->is_probe(idd)) {
- printk(KERN_WARNING
- "%s: IOC4 submodule 0x%s probe failed "
- "for pci_dev %s.\n",
- __func__, module_name(is->is_owner),
- pci_name(idd->idd_pdev));
- }
- }
- mutex_unlock(&ioc4_mutex);
-
- /* Request sgiioc4 IDE driver on boards that bring that functionality
- * off of IOC4. The root filesystem may be hosted on a drive connected
- * to IOC4, so we need to make sure the sgiioc4 driver is loaded as it
- * won't be picked up by modprobes due to the ioc4 module owning the
- * PCI device.
- */
- if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
- /* Request the module from a work procedure as the modprobe
- * goes out to a userland helper and that will hang if done
- * directly from ioc4_probe().
- */
- printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
- schedule_work(&ioc4_load_modules_work);
- }
-
- return 0;
-
-out_misc_region:
- release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
-out_pci:
- kfree(idd);
-out_idd:
- pci_disable_device(pdev);
-out:
- return ret;
-}
-
-/* Removes a particular instance of an IOC4 card. */
-static void
-ioc4_remove(struct pci_dev *pdev)
-{
- struct ioc4_submodule *is;
- struct ioc4_driver_data *idd;
-
- idd = pci_get_drvdata(pdev);
-
- /* Remove this IOC4 from all submodules */
- mutex_lock(&ioc4_mutex);
- list_for_each_entry(is, &ioc4_submodules, is_list) {
- if (is->is_remove && is->is_remove(idd)) {
- printk(KERN_WARNING
- "%s: IOC4 submodule 0x%s remove failed "
- "for pci_dev %s.\n",
- __func__, module_name(is->is_owner),
- pci_name(idd->idd_pdev));
- }
- }
- mutex_unlock(&ioc4_mutex);
-
- /* Release resources */
- iounmap(idd->idd_misc_regs);
- if (!idd->idd_bar0) {
- printk(KERN_WARNING
- "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
- "Device removal may be incomplete.\n",
- __func__, pci_name(idd->idd_pdev));
- }
- release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
-
- /* Disable IOC4 and relinquish */
- pci_disable_device(pdev);
-
- /* Remove and free driver data */
- mutex_lock(&ioc4_mutex);
- list_del(&idd->idd_list);
- mutex_unlock(&ioc4_mutex);
- kfree(idd);
-}
-
-static const struct pci_device_id ioc4_id_table[] = {
- {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
- PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
- {0}
-};
-
-static struct pci_driver ioc4_driver = {
- .name = "IOC4",
- .id_table = ioc4_id_table,
- .probe = ioc4_probe,
- .remove = ioc4_remove,
-};
-
-MODULE_DEVICE_TABLE(pci, ioc4_id_table);
-
-/*********************
- * Module management *
- *********************/
-
-/* Module load */
-static int __init
-ioc4_init(void)
-{
- return pci_register_driver(&ioc4_driver);
-}
-
-/* Module unload */
-static void __exit
-ioc4_exit(void)
-{
- /* Ensure ioc4_load_modules() has completed before exiting */
- flush_work(&ioc4_load_modules_work);
- pci_unregister_driver(&ioc4_driver);
-}
-
-module_init(ioc4_init);
-module_exit(ioc4_exit);
-
-MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
-MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(ioc4_register_submodule);
-EXPORT_SYMBOL(ioc4_unregister_submodule);
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index fb10eafe9bde..c70b3822013f 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -9,6 +9,7 @@ lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += cfi.o
KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 1606658b9b7e..7284a22b1a09 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -22,7 +22,7 @@ struct lkdtm_list {
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8)
#endif
@@ -75,7 +75,12 @@ static int warn_counter;
void lkdtm_WARNING(void)
{
- WARN(1, "Warning message trigger count: %d\n", warn_counter++);
+ WARN_ON(++warn_counter);
+}
+
+void lkdtm_WARNING_MESSAGE(void)
+{
+ WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
void lkdtm_EXCEPTION(void)
@@ -91,7 +96,7 @@ void lkdtm_LOOP(void)
void lkdtm_EXHAUST_STACK(void)
{
- pr_info("Calling function with %d frame size to depth %d ...\n",
+ pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
recursive_loop(recur_count);
pr_info("FAIL: survived without exhausting stack?!\n");
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
new file mode 100644
index 000000000000..e73ebdbfa806
--- /dev/null
+++ b/drivers/misc/lkdtm/cfi.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to Control Flow Integrity.
+ */
+#include "lkdtm.h"
+
+static int called_count;
+
+/* Function taking one argument, without a return value. */
+static noinline void lkdtm_increment_void(int *counter)
+{
+ (*counter)++;
+}
+
+/* Function taking one argument, returning int. */
+static noinline int lkdtm_increment_int(int *counter)
+{
+ (*counter)++;
+
+ return *counter;
+}
+/*
+ * This tries to call an indirect function with a mismatched prototype.
+ */
+void lkdtm_CFI_FORWARD_PROTO(void)
+{
+ /*
+ * Matches lkdtm_increment_void()'s prototype, but not
+ * lkdtm_increment_int()'s prototype.
+ */
+ void (*func)(int *);
+
+ pr_info("Calling matched prototype ...\n");
+ func = lkdtm_increment_void;
+ func(&called_count);
+
+ pr_info("Calling mismatched prototype ...\n");
+ func = (void *)lkdtm_increment_int;
+ func(&called_count);
+
+ pr_info("Fail: survived mismatched prototype function call!\n");
+}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 66ae6b2a6950..cbc4c9045a99 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -104,6 +104,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(EXHAUST_STACK),
@@ -169,6 +170,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL),
CRASHTYPE(USERCOPY_KERNEL_DS),
CRASHTYPE(STACKLEAK_ERASING),
+ CRASHTYPE(CFI_FORWARD_PROTO),
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 6a284a87a037..ab446e0bde97 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -11,6 +11,7 @@ void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
void lkdtm_BUG(void);
void lkdtm_WARNING(void);
+void lkdtm_WARNING_MESSAGE(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
void lkdtm_EXHAUST_STACK(void);
@@ -95,4 +96,7 @@ void lkdtm_USERCOPY_KERNEL_DS(void);
/* lkdtm_stackleak.c */
void lkdtm_STACKLEAK_ERASING(void);
+/* cfi.c */
+void lkdtm_CFI_FORWARD_PROTO(void);
+
#endif
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index d74b182e19f3..77f7dff7098d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,11 @@
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+
+#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
+#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index f894d1f8a53e..7310b476323c 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -858,13 +858,6 @@ static ssize_t dev_state_show(struct device *device,
}
static DEVICE_ATTR_RO(dev_state);
-static int match_devt(struct device *dev, const void *data)
-{
- const dev_t *devt = data;
-
- return dev->devt == *devt;
-}
-
/**
* dev_set_devstate: set to new device state and notify sysfs file.
*
@@ -880,7 +873,7 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
dev->dev_state = state;
- clsdev = class_find_device(mei_class, NULL, &dev->cdev.dev, match_devt);
+ clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev);
if (clsdev) {
sysfs_notify(&clsdev->kobj, NULL, "dev_state");
put_device(clsdev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7a2b3545a7f9..d5a92c6eadb3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
@@ -378,12 +383,11 @@ static int mei_me_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
- dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
+ dev_dbg(device, "rpm: me: runtime_idle\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
@@ -394,13 +398,12 @@ static int mei_me_pm_runtime_idle(struct device *device)
static int mei_me_pm_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
+ dev_dbg(device, "rpm: me: runtime suspend\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -413,7 +416,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
+ dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
if (ret && ret != -EAGAIN)
schedule_work(&dev->reset_work);
@@ -423,13 +426,12 @@ static int mei_me_pm_runtime_suspend(struct device *device)
static int mei_me_pm_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
+ dev_dbg(device, "rpm: me: runtime resume\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -439,7 +441,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
+ dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2e37fc2e0fa8..f1c16a587495 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -276,12 +276,11 @@ static int mei_txe_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_txe_pm_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
- dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n");
+ dev_dbg(device, "rpm: txe: runtime_idle\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
if (mei_write_is_idle(dev))
@@ -291,13 +290,12 @@ static int mei_txe_pm_runtime_idle(struct device *device)
}
static int mei_txe_pm_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n");
+ dev_dbg(device, "rpm: txe: runtime suspend\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -310,7 +308,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
/* keep irq on we are staying in D0 */
- dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
+ dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
mutex_unlock(&dev->device_lock);
@@ -322,13 +320,12 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
static int mei_txe_pm_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev;
int ret;
- dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n");
+ dev_dbg(device, "rpm: txe: runtime resume\n");
- dev = pci_get_drvdata(pdev);
+ dev = dev_get_drvdata(device);
if (!dev)
return -ENODEV;
@@ -340,7 +337,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
mutex_unlock(&dev->device_lock);
- dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret);
+ dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
if (ret)
schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 266ffb6f6c44..c8bff2916d3d 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -237,6 +237,9 @@ static int __init mic_probe(struct platform_device *pdev)
mdrv->dev = &pdev->dev;
snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
+ /* FIXME: use dma_set_mask_and_coherent() and check result */
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
mdev->mmio.pa = MIC_X100_MMIO_BASE;
mdev->mmio.len = MIC_X100_MMIO_LEN;
mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
@@ -282,18 +285,6 @@ static void mic_platform_shutdown(struct platform_device *pdev)
mic_remove(pdev);
}
-static u64 mic_dma_mask = DMA_BIT_MASK(64);
-
-static struct platform_device mic_platform_dev = {
- .name = mic_driver_name,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .dma_mask = &mic_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- },
-};
-
static struct platform_driver __refdata mic_platform_driver = {
.probe = mic_probe,
.remove = mic_remove,
@@ -303,6 +294,8 @@ static struct platform_driver __refdata mic_platform_driver = {
},
};
+static struct platform_device *mic_platform_dev;
+
static int __init mic_init(void)
{
int ret;
@@ -316,9 +309,12 @@ static int __init mic_init(void)
request_module("mic_x100_dma");
mic_init_card_debugfs();
- ret = platform_device_register(&mic_platform_dev);
+
+ mic_platform_dev = platform_device_register_simple(mic_driver_name,
+ 0, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(mic_platform_dev);
if (ret) {
- pr_err("platform_device_register ret %d\n", ret);
+ pr_err("platform_device_register_full ret %d\n", ret);
goto cleanup_debugfs;
}
ret = platform_driver_register(&mic_platform_driver);
@@ -329,7 +325,7 @@ static int __init mic_init(void)
return ret;
device_unregister:
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
cleanup_debugfs:
mic_exit_card_debugfs();
done:
@@ -339,7 +335,7 @@ done:
static void __exit mic_exit(void)
{
platform_driver_unregister(&mic_platform_driver);
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
mic_exit_card_debugfs();
}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index d3837f8a5ba0..0b9dfe1cc06c 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -156,9 +156,8 @@ static inline int scif_verify_epd(struct scif_endpt *ep)
static inline int scif_anon_inode_getfile(scif_epd_t epd)
{
epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
- if (IS_ERR(epd->anon))
- return PTR_ERR(epd->anon);
- return 0;
+
+ return PTR_ERR_OR_ZERO(epd->anon);
}
static inline void scif_anon_inode_fput(scif_epd_t epd)
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
index bbb622c19c06..34c55a4045af 100644
--- a/drivers/misc/sgi-xp/Makefile
+++ b/drivers/misc/sgi-xp/Makefile
@@ -4,17 +4,10 @@
#
obj-$(CONFIG_SGI_XP) += xp.o
-xp-y := xp_main.o
-xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
-xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
-xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
-xp-$(CONFIG_X86_64) += xp_uv.o
+xp-y := xp_main.o xp_uv.o
obj-$(CONFIG_SGI_XP) += xpc.o
-xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
-xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
-xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
-xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
-xpc-$(CONFIG_X86_64) += xpc_uv.o
+xpc-y := xpc_main.o xpc_channel.o xpc_partition.o \
+ xpc_uv.o
obj-$(CONFIG_SGI_XP) += xpnet.o
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index b8069eec18cb..06469b12aced 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -24,23 +24,6 @@
#define is_uv() 0
#endif
-#if defined CONFIG_IA64
-#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
-#define is_shub() ia64_platform_is("sn2")
-#endif
-
-#ifndef is_shub1
-#define is_shub1() 0
-#endif
-
-#ifndef is_shub2
-#define is_shub2() 0
-#endif
-
-#ifndef is_shub
-#define is_shub() 0
-#endif
-
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition)
#else
@@ -360,9 +343,7 @@ extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
extern struct device *xp;
-extern enum xp_retval xp_init_sn2(void);
extern enum xp_retval xp_init_uv(void);
-extern void xp_exit_sn2(void);
extern void xp_exit_uv(void);
#endif /* _DRIVERS_MISC_SGIXP_XP_H */
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 6d7f557fd1c1..5fd94d836070 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -233,9 +233,7 @@ xp_init(void)
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
- if (is_shub())
- ret = xp_init_sn2();
- else if (is_uv())
+ if (is_uv())
ret = xp_init_uv();
else
ret = 0;
@@ -251,9 +249,7 @@ module_init(xp_init);
void __exit
xp_exit(void)
{
- if (is_shub())
- xp_exit_sn2();
- else if (is_uv())
+ if (is_uv())
xp_exit_uv();
}
diff --git a/drivers/misc/sgi-xp/xp_nofault.S b/drivers/misc/sgi-xp/xp_nofault.S
deleted file mode 100644
index e38d43319429..000000000000
--- a/drivers/misc/sgi-xp/xp_nofault.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/*
- * The xp_nofault_PIOR function takes a pointer to a remote PIO register
- * and attempts to load and consume a value from it. This function
- * will be registered as a nofault code block. In the event that the
- * PIO read fails, the MCA handler will force the error to look
- * corrected and vector to the xp_error_PIOR which will return an error.
- *
- * The definition of "consumption" and the time it takes for an MCA
- * to surface is processor implementation specific. This code
- * is sufficient on Itanium through the Montvale processor family.
- * It may need to be adjusted for future processor implementations.
- *
- * extern int xp_nofault_PIOR(void *remote_register);
- */
-
- .global xp_nofault_PIOR
-xp_nofault_PIOR:
- mov r8=r0 // Stage a success return value
- ld8.acq r9=[r32];; // PIO Read the specified register
- adds r9=1,r9;; // Add to force consumption
- srlz.i;; // Allow time for MCA to surface
- br.ret.sptk.many b0;; // Return success
-
- .global xp_error_PIOR
-xp_error_PIOR:
- mov r8=1 // Return value of 1
- br.ret.sptk.many b0;; // Return failure
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
deleted file mode 100644
index d8e463f87241..000000000000
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/*
- * Cross Partition (XP) sn2-based functions.
- *
- * Architecture specific implementation of common functions.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/sn_sal.h>
-#include "xp.h"
-
-/*
- * The export of xp_nofault_PIOR needs to happen here since it is defined
- * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
- * defined here.
- */
-EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
-
-u64 xp_nofault_PIOR_target;
-EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
-
-/*
- * Register a nofault code region which performs a cross-partition PIO read.
- * If the PIO read times out, the MCA handler will consume the error and
- * return to a kernel-provided instruction to indicate an error. This PIO read
- * exists because it is guaranteed to timeout if the destination is down
- * (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
- * which unfortunately we have to work around).
- */
-static enum xp_retval
-xp_register_nofault_code_sn2(void)
-{
- int ret;
- u64 func_addr;
- u64 err_func_addr;
-
- func_addr = *(u64 *)xp_nofault_PIOR;
- err_func_addr = *(u64 *)xp_error_PIOR;
- ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
- 1, 1);
- if (ret != 0) {
- dev_err(xp, "can't register nofault code, error=%d\n", ret);
- return xpSalError;
- }
- /*
- * Setup the nofault PIO read target. (There is no special reason why
- * SH_IPI_ACCESS was selected.)
- */
- if (is_shub1())
- xp_nofault_PIOR_target = SH1_IPI_ACCESS;
- else if (is_shub2())
- xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
-
- return xpSuccess;
-}
-
-static void
-xp_unregister_nofault_code_sn2(void)
-{
- u64 func_addr = *(u64 *)xp_nofault_PIOR;
- u64 err_func_addr = *(u64 *)xp_error_PIOR;
-
- /* unregister the PIO read nofault code region */
- (void)sn_register_nofault_code(func_addr, err_func_addr,
- err_func_addr, 1, 0);
-}
-
-/*
- * Convert a virtual memory address to a physical memory address.
- */
-static unsigned long
-xp_pa_sn2(void *addr)
-{
- return __pa(addr);
-}
-
-/*
- * Convert a global physical to a socket physical address.
- */
-static unsigned long
-xp_socket_pa_sn2(unsigned long gpa)
-{
- return gpa;
-}
-
-/*
- * Wrapper for bte_copy().
- *
- * dst_pa - physical address of the destination of the transfer.
- * src_pa - physical address of the source of the transfer.
- * len - number of bytes to transfer from source to destination.
- *
- * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
- */
-static enum xp_retval
-xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
- size_t len)
-{
- bte_result_t ret;
-
- ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
- if (ret == BTE_SUCCESS)
- return xpSuccess;
-
- if (is_shub2()) {
- dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
- "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
- src_pa, len);
- } else {
- dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
- "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
- }
-
- return xpBteCopyError;
-}
-
-static int
-xp_cpu_to_nasid_sn2(int cpuid)
-{
- return cpuid_to_nasid(cpuid);
-}
-
-static enum xp_retval
-xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
-{
- u64 nasid_array = 0;
- int ret;
-
- ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
- &nasid_array);
- if (ret != 0) {
- dev_err(xp, "sn_change_memprotect(,, "
- "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
- return xpSalError;
- }
- return xpSuccess;
-}
-
-static enum xp_retval
-xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
-{
- u64 nasid_array = 0;
- int ret;
-
- ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
- &nasid_array);
- if (ret != 0) {
- dev_err(xp, "sn_change_memprotect(,, "
- "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
- return xpSalError;
- }
- return xpSuccess;
-}
-
-enum xp_retval
-xp_init_sn2(void)
-{
- BUG_ON(!is_shub());
-
- xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
- xp_partition_id = sn_partition_id;
- xp_region_size = sn_region_size;
-
- xp_pa = xp_pa_sn2;
- xp_socket_pa = xp_socket_pa_sn2;
- xp_remote_memcpy = xp_remote_memcpy_sn2;
- xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
- xp_expand_memprotect = xp_expand_memprotect_sn2;
- xp_restrict_memprotect = xp_restrict_memprotect_sn2;
-
- return xp_register_nofault_code_sn2();
-}
-
-void
-xp_exit_sn2(void)
-{
- BUG_ON(!is_shub());
-
- xp_unregister_nofault_code_sn2();
-}
-
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index a0d093274dc0..f15a9f2ac1dd 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -17,7 +17,7 @@
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/grukservices.h"
@@ -99,7 +99,7 @@ xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
return xpBiosError;
}
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
@@ -129,7 +129,7 @@ xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
return xpBiosError;
}
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
u64 nasid_array;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
@@ -151,9 +151,10 @@ xp_init_uv(void)
BUG_ON(!is_uv());
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
+#ifdef CONFIG_X86
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
-
+#endif
xp_pa = xp_pa_uv;
xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index b94d5f767703..71db60edff65 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -71,14 +71,10 @@
* 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids
* and xpc_mach_nasids.)
*
- * vars (ia64-sn2 only)
- * vars part (ia64-sn2 only)
- *
* Immediately following the mach_nasids mask are the XPC variables
* required by other partitions. First are those that are generic to all
* partitions (vars), followed on the next available cacheline by those
* which are partition specific (vars part). These are setup by XPC.
- * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
*
* Note: Until 'ts_jiffies' is set non-zero, the partition XPC code has not been
* initialized.
@@ -93,9 +89,6 @@ struct xpc_rsvd_page {
unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
union {
struct {
- unsigned long vars_pa; /* phys addr */
- } sn2;
- struct {
unsigned long heartbeat_gpa; /* phys addr */
unsigned long activate_gru_mq_desc_gpa; /* phys addr */
} uv;
@@ -106,84 +99,14 @@ struct xpc_rsvd_page {
#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */
-/*
- * Define the structures by which XPC variables can be exported to other
- * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
- */
-
-/*
- * The following structure describes the partition generic variables
- * needed by other partitions in order to properly initialize.
- *
- * struct xpc_vars version number also applies to struct xpc_vars_part.
- * Changes to either structure and/or related functionality should be
- * reflected by incrementing either the major or minor version numbers
- * of struct xpc_vars.
- */
-struct xpc_vars_sn2 {
- u8 version;
- u64 heartbeat;
- DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
- u64 heartbeat_offline; /* if 0, heartbeat should be changing */
- int activate_IRQ_nasid;
- int activate_IRQ_phys_cpuid;
- unsigned long vars_part_pa;
- unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */
- struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
-};
-
-#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
-
-/*
- * The following structure describes the per partition specific variables.
- *
- * An array of these structures, one per partition, will be defined. As a
- * partition becomes active XPC will copy the array entry corresponding to
- * itself from that partition. It is desirable that the size of this structure
- * evenly divides into a 128-byte cacheline, such that none of the entries in
- * this array crosses a 128-byte cacheline boundary. As it is now, each entry
- * occupies 64-bytes.
- */
-struct xpc_vars_part_sn2 {
- u64 magic;
-
- unsigned long openclose_args_pa; /* phys addr of open and close args */
- unsigned long GPs_pa; /* physical address of Get/Put values */
-
- unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */
-
- int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
- int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
-
- u8 nchannels; /* #of defined channels supported */
-
- u8 reserved[23]; /* pad to a full 64 bytes */
-};
-
-/*
- * The vars_part MAGIC numbers play a part in the first contact protocol.
- *
- * MAGIC1 indicates that the per partition specific variables for a remote
- * partition have been initialized by this partition.
- *
- * MAGIC2 indicates that this partition has pulled the remote partititions
- * per partition variables that pertain to this partition.
- */
-#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
-
/* the reserved page sizes and offsets */
#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
-#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2))
#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \
XPC_RP_HEADER_SIZE))
#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \
xpc_nasid_mask_nlongs)
-#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \
- (XPC_RP_MACH_NASIDS(_rp) + \
- xpc_nasid_mask_nlongs))
/*
@@ -298,17 +221,6 @@ struct xpc_activate_mq_msg_chctl_opencomplete_uv {
#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff)
/*
- * Define a Get/Put value pair (pointers) used with a message queue.
- */
-struct xpc_gp_sn2 {
- s64 get; /* Get value */
- s64 put; /* Put value */
-};
-
-#define XPC_GP_SIZE \
- L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS)
-
-/*
* Define a structure that contains arguments associated with opening and
* closing a channel.
*/
@@ -341,30 +253,6 @@ struct xpc_fifo_head_uv {
};
/*
- * Define a sn2 styled message.
- *
- * A user-defined message resides in the payload area. The max size of the
- * payload is defined by the user via xpc_connect().
- *
- * The size of a message entry (within a message queue) must be a 128-byte
- * cacheline sized multiple in order to facilitate the BTE transfer of messages
- * from one message queue to another.
- */
-struct xpc_msg_sn2 {
- u8 flags; /* FOR XPC INTERNAL USE ONLY */
- u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
- s64 number; /* FOR XPC INTERNAL USE ONLY */
-
- u64 payload; /* user defined portion of message */
-};
-
-/* struct xpc_msg_sn2 flags */
-
-#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */
-#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */
-#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */
-
-/*
* The format of a uv XPC notify_mq GRU message is as follows:
*
* A user-defined message resides in the payload area. The max size of the
@@ -390,20 +278,6 @@ struct xpc_notify_mq_msg_uv {
unsigned long payload;
};
-/*
- * Define sn2's notify entry.
- *
- * This is used to notify a message's sender that their message was received
- * and consumed by the intended recipient.
- */
-struct xpc_notify_sn2 {
- u8 type; /* type of notification */
-
- /* the following two fields are only used if type == XPC_N_CALL */
- xpc_notify_func func; /* user's notify function */
- void *key; /* pointer to user's key */
-};
-
/* struct xpc_notify_sn2 type of notification */
#define XPC_N_CALL 0x01 /* notify function provided by user */
@@ -431,102 +305,6 @@ struct xpc_send_msg_slot_uv {
* of these structures for each potential channel connection to that partition.
*/
-/*
- * The following is sn2 only.
- *
- * Each channel structure manages two message queues (circular buffers).
- * They are allocated at the time a channel connection is made. One of
- * these message queues (local_msgqueue) holds the locally created messages
- * that are destined for the remote partition. The other of these message
- * queues (remote_msgqueue) is a locally cached copy of the remote partition's
- * own local_msgqueue.
- *
- * The following is a description of the Get/Put pointers used to manage these
- * two message queues. Consider the local_msgqueue to be on one partition
- * and the remote_msgqueue to be its cached copy on another partition. A
- * description of what each of the lettered areas contains is included.
- *
- *
- * local_msgqueue remote_msgqueue
- *
- * |/////////| |/////////|
- * w_remote_GP.get --> +---------+ |/////////|
- * | F | |/////////|
- * remote_GP.get --> +---------+ +---------+ <-- local_GP->get
- * | | | |
- * | | | E |
- * | | | |
- * | | +---------+ <-- w_local_GP.get
- * | B | |/////////|
- * | | |////D////|
- * | | |/////////|
- * | | +---------+ <-- w_remote_GP.put
- * | | |////C////|
- * local_GP->put --> +---------+ +---------+ <-- remote_GP.put
- * | | |/////////|
- * | A | |/////////|
- * | | |/////////|
- * w_local_GP.put --> +---------+ |/////////|
- * |/////////| |/////////|
- *
- *
- * ( remote_GP.[get|put] are cached copies of the remote
- * partition's local_GP->[get|put], and thus their values can
- * lag behind their counterparts on the remote partition. )
- *
- *
- * A - Messages that have been allocated, but have not yet been sent to the
- * remote partition.
- *
- * B - Messages that have been sent, but have not yet been acknowledged by the
- * remote partition as having been received.
- *
- * C - Area that needs to be prepared for the copying of sent messages, by
- * the clearing of the message flags of any previously received messages.
- *
- * D - Area into which sent messages are to be copied from the remote
- * partition's local_msgqueue and then delivered to their intended
- * recipients. [ To allow for a multi-message copy, another pointer
- * (next_msg_to_pull) has been added to keep track of the next message
- * number needing to be copied (pulled). It chases after w_remote_GP.put.
- * Any messages lying between w_local_GP.get and next_msg_to_pull have
- * been copied and are ready to be delivered. ]
- *
- * E - Messages that have been copied and delivered, but have not yet been
- * acknowledged by the recipient as having been received.
- *
- * F - Messages that have been acknowledged, but XPC has not yet notified the
- * sender that the message was received by its intended recipient.
- * This is also an area that needs to be prepared for the allocating of
- * new messages, by the clearing of the message flags of the acknowledged
- * messages.
- */
-
-struct xpc_channel_sn2 {
- struct xpc_openclose_args *local_openclose_args; /* args passed on */
- /* opening or closing of channel */
-
- void *local_msgqueue_base; /* base address of kmalloc'd space */
- struct xpc_msg_sn2 *local_msgqueue; /* local message queue */
- void *remote_msgqueue_base; /* base address of kmalloc'd space */
- struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */
- /* partition's local message queue */
- unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */
- /* local message queue */
-
- struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */
-
- /* various flavors of local and remote Get/Put values */
-
- struct xpc_gp_sn2 *local_GP; /* local Get/Put values */
- struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */
- struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */
- struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */
- s64 next_msg_to_pull; /* Put value of next msg to pull */
-
- struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
-};
-
struct xpc_channel_uv {
void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */
/* gru mq descriptor */
@@ -579,7 +357,6 @@ struct xpc_channel {
wait_queue_head_t idle_wq; /* idle kthread wait queue */
union {
- struct xpc_channel_sn2 sn2;
struct xpc_channel_uv uv;
} sn;
@@ -666,43 +443,6 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
return 0;
}
-/*
- * Manage channels on a partition basis. There is one of these structures
- * for each partition (a partition will never utilize the structure that
- * represents itself).
- */
-
-struct xpc_partition_sn2 {
- unsigned long remote_amos_page_pa; /* paddr of partition's amos page */
- int activate_IRQ_nasid; /* active partition's act/deact nasid */
- int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
-
- unsigned long remote_vars_pa; /* phys addr of partition's vars */
- unsigned long remote_vars_part_pa; /* paddr of partition's vars part */
- u8 remote_vars_version; /* version# of partition's vars */
-
- void *local_GPs_base; /* base address of kmalloc'd space */
- struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */
- void *remote_GPs_base; /* base address of kmalloc'd space */
- struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */
- /* Get/Put values */
- unsigned long remote_GPs_pa; /* phys addr of remote partition's local */
- /* Get/Put values */
-
- void *local_openclose_args_base; /* base address of kmalloc'd space */
- struct xpc_openclose_args *local_openclose_args; /* local's args */
- unsigned long remote_openclose_args_pa; /* phys addr of remote's args */
-
- int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
- int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
- char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
-
- struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
- struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
-
- struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
-};
-
struct xpc_partition_uv {
unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */
struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */
@@ -774,7 +514,6 @@ struct xpc_partition {
wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
union {
- struct xpc_partition_sn2 sn2;
struct xpc_partition_uv uv;
} sn;
@@ -854,14 +593,6 @@ struct xpc_arch_operations {
#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
-/*
- * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
- * following interval #of seconds before checking for dropped notify IRQs.
- * These can occur whenever an IRQ's associated amo write doesn't complete
- * until after the IRQ was received.
- */
-#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
-
/* number of seconds to wait for other partitions to disengage */
#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
@@ -888,10 +619,6 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
-/* found in xpc_sn2.c */
-extern int xpc_init_sn2(void);
-extern void xpc_exit_sn2(void);
-
/* found in xpc_uv.c */
extern int xpc_init_uv(void);
extern void xpc_exit_uv(void);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 83fc748a91a7..79a963105983 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -279,13 +279,6 @@ xpc_hb_checker(void *ignore)
dev_dbg(xpc_part, "checking remote heartbeats\n");
xpc_check_remote_hb();
-
- /*
- * On sn2 we need to periodically recheck to ensure no
- * IRQ/amo pairs have been missed.
- */
- if (is_shub())
- force_IRQ = 1;
}
/* check for outstanding IRQs */
@@ -1050,9 +1043,7 @@ xpc_do_exit(enum xp_retval reason)
xpc_teardown_partitions();
- if (is_shub())
- xpc_exit_sn2();
- else if (is_uv())
+ if (is_uv())
xpc_exit_uv();
}
@@ -1235,21 +1226,7 @@ xpc_init(void)
dev_set_name(xpc_part, "part");
dev_set_name(xpc_chan, "chan");
- if (is_shub()) {
- /*
- * The ia64-sn2 architecture supports at most 64 partitions.
- * And the inability to unregister remote amos restricts us
- * further to only support exactly 64 partitions on this
- * architecture, no less.
- */
- if (xp_max_npartitions != 64) {
- dev_err(xpc_part, "max #of partitions not set to 64\n");
- ret = -EINVAL;
- } else {
- ret = xpc_init_sn2();
- }
-
- } else if (is_uv()) {
+ if (is_uv()) {
ret = xpc_init_uv();
} else {
@@ -1335,9 +1312,7 @@ out_2:
xpc_teardown_partitions();
out_1:
- if (is_shub())
- xpc_exit_sn2();
- else if (is_uv())
+ if (is_uv())
xpc_exit_uv();
return ret;
}
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 782ce95d3f17..21a04bc97d40 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -93,10 +93,6 @@ xpc_get_rsvd_page_pa(int nasid)
if (ret != xpNeedMoreInfo)
break;
- /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
- if (is_shub())
- len = L1_CACHE_ALIGN(len);
-
if (len > buf_len) {
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
@@ -452,7 +448,6 @@ xpc_discovery(void)
case 32:
max_regions *= 2;
region_size = 16;
- DBUG_ON(!is_shub2());
}
}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
deleted file mode 100644
index 0ae69b9390ce..000000000000
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ /dev/null
@@ -1,2459 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/*
- * Cross Partition Communication (XPC) sn2-based functions.
- *
- * Architecture specific implementation of common functions.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uncached.h>
-#include <asm/sn/mspec.h>
-#include <asm/sn/sn_sal.h>
-#include "xpc.h"
-
-/*
- * Define the number of u64s required to represent all the C-brick nasids
- * as a bitmap. The cross-partition kernel modules deal only with
- * C-brick nasids, thus the need for bitmaps which don't account for
- * odd-numbered (non C-brick) nasids.
- */
-#define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
-#define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
-#define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
-
-/*
- * Memory for XPC's amo variables is allocated by the MSPEC driver. These
- * pages are located in the lowest granule. The lowest granule uses 4k pages
- * for cached references and an alternate TLB handler to never provide a
- * cacheable mapping for the entire region. This will prevent speculative
- * reading of cached copies of our lines from being issued which will cause
- * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
- * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
- * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
- * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
- * partitions (i.e., XPCs) consider themselves currently engaged with the
- * local XPC and 1 amo variable to request partition deactivation.
- */
-#define XPC_NOTIFY_IRQ_AMOS_SN2 0
-#define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
- XP_MAX_NPARTITIONS_SN2)
-#define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
- XP_NASID_MASK_WORDS_SN2)
-#define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
-
-/*
- * Buffer used to store a local copy of portions of a remote partition's
- * reserved page (either its header and part_nasids mask, or its vars).
- */
-static void *xpc_remote_copy_buffer_base_sn2;
-static char *xpc_remote_copy_buffer_sn2;
-
-static struct xpc_vars_sn2 *xpc_vars_sn2;
-static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
-
-static int
-xpc_setup_partitions_sn2(void)
-{
- /* nothing needs to be done */
- return 0;
-}
-
-static void
-xpc_teardown_partitions_sn2(void)
-{
- /* nothing needs to be done */
-}
-
-/* SH_IPI_ACCESS shub register value on startup */
-static u64 xpc_sh1_IPI_access_sn2;
-static u64 xpc_sh2_IPI_access0_sn2;
-static u64 xpc_sh2_IPI_access1_sn2;
-static u64 xpc_sh2_IPI_access2_sn2;
-static u64 xpc_sh2_IPI_access3_sn2;
-
-/*
- * Change protections to allow IPI operations.
- */
-static void
-xpc_allow_IPI_ops_sn2(void)
-{
- int node;
- int nasid;
-
- /* !!! The following should get moved into SAL. */
- if (is_shub2()) {
- xpc_sh2_IPI_access0_sn2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
- xpc_sh2_IPI_access1_sn2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
- xpc_sh2_IPI_access2_sn2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
- xpc_sh2_IPI_access3_sn2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- -1UL);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- -1UL);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- -1UL);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- -1UL);
- }
- } else {
- xpc_sh1_IPI_access_sn2 =
- (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- -1UL);
- }
- }
-}
-
-/*
- * Restrict protections to disallow IPI operations.
- */
-static void
-xpc_disallow_IPI_ops_sn2(void)
-{
- int node;
- int nasid;
-
- /* !!! The following should get moved into SAL. */
- if (is_shub2()) {
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
- xpc_sh2_IPI_access0_sn2);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
- xpc_sh2_IPI_access1_sn2);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
- xpc_sh2_IPI_access2_sn2);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
- xpc_sh2_IPI_access3_sn2);
- }
- } else {
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
- xpc_sh1_IPI_access_sn2);
- }
- }
-}
-
-/*
- * The following set of functions are used for the sending and receiving of
- * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
- * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
- * is associated with channel activity (SGI_XPC_NOTIFY).
- */
-
-static u64
-xpc_receive_IRQ_amo_sn2(struct amo *amo)
-{
- return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
-}
-
-static enum xp_retval
-xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
- int vector)
-{
- int ret = 0;
- unsigned long irq_flags;
-
- local_irq_save(irq_flags);
-
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
- sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IRQs and amos to it until the heartbeat times out.
- */
- ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-
- return (ret == 0) ? xpSuccess : xpPioReadError;
-}
-
-static struct amo *
-xpc_init_IRQ_amo_sn2(int index)
-{
- struct amo *amo = xpc_vars_sn2->amos_page + index;
-
- (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
- return amo;
-}
-
-/*
- * Functions associated with SGI_XPC_ACTIVATE IRQ.
- */
-
-/*
- * Notify the heartbeat check thread that an activate IRQ has been received.
- */
-static irqreturn_t
-xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
- xpc_activate_IRQ_rcvd++;
- spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
-
- wake_up_interruptible(&xpc_activate_IRQ_wq);
- return IRQ_HANDLED;
-}
-
-/*
- * Flag the appropriate amo variable and send an IRQ to the specified node.
- */
-static void
-xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
- int to_nasid, int to_phys_cpuid)
-{
- struct amo *amos = (struct amo *)__va(amos_page_pa +
- (XPC_ACTIVATE_IRQ_AMOS_SN2 *
- sizeof(struct amo)));
-
- (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)],
- BIT_MASK(from_nasid / 2), to_nasid,
- to_phys_cpuid, SGI_XPC_ACTIVATE);
-}
-
-static void
-xpc_send_local_activate_IRQ_sn2(int from_nasid)
-{
- unsigned long irq_flags;
- struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
- (XPC_ACTIVATE_IRQ_AMOS_SN2 *
- sizeof(struct amo)));
-
- /* fake the sending and receipt of an activate IRQ from remote nasid */
- FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
- FETCHOP_OR, BIT_MASK(from_nasid / 2));
-
- spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
- xpc_activate_IRQ_rcvd++;
- spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
-
- wake_up_interruptible(&xpc_activate_IRQ_wq);
-}
-
-/*
- * Functions associated with SGI_XPC_NOTIFY IRQ.
- */
-
-/*
- * Check to see if any chctl flags were sent from the specified partition.
- */
-static void
-xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
-{
- union xpc_channel_ctl_flags chctl;
- unsigned long irq_flags;
-
- chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
- local_chctl_amo_va);
- if (chctl.all_flags == 0)
- return;
-
- spin_lock_irqsave(&part->chctl_lock, irq_flags);
- part->chctl.all_flags |= chctl.all_flags;
- spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
-
- dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
- "0x%llx\n", XPC_PARTID(part), chctl.all_flags);
-
- xpc_wakeup_channel_mgr(part);
-}
-
-/*
- * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
- * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
- * than one partition, we use an amo structure per partition to indicate
- * whether a partition has sent an IRQ or not. If it has, then wake up the
- * associated kthread to handle it.
- *
- * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
- * running on other partitions.
- *
- * Noteworthy Arguments:
- *
- * irq - Interrupt ReQuest number. NOT USED.
- *
- * dev_id - partid of IRQ's potential sender.
- */
-static irqreturn_t
-xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
-{
- short partid = (short)(u64)dev_id;
- struct xpc_partition *part = &xpc_partitions[partid];
-
- DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2);
-
- if (xpc_part_ref(part)) {
- xpc_check_for_sent_chctl_flags_sn2(part);
-
- xpc_part_deref(part);
- }
- return IRQ_HANDLED;
-}
-
-/*
- * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
- * because the write to their associated amo variable completed after the IRQ
- * was received.
- */
-static void
-xpc_check_for_dropped_notify_IRQ_sn2(struct timer_list *t)
-{
- struct xpc_partition *part =
- from_timer(part, t, sn.sn2.dropped_notify_IRQ_timer);
-
- if (xpc_part_ref(part)) {
- xpc_check_for_sent_chctl_flags_sn2(part);
-
- t->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
- add_timer(t);
- xpc_part_deref(part);
- }
-}
-
-/*
- * Send a notify IRQ to the remote partition that is associated with the
- * specified channel.
- */
-static void
-xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
- char *chctl_flag_string, unsigned long *irq_flags)
-{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- union xpc_channel_ctl_flags chctl = { 0 };
- enum xp_retval ret;
-
- if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
- chctl.flags[ch->number] = chctl_flag;
- ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
- chctl.all_flags,
- part_sn2->notify_IRQ_nasid,
- part_sn2->notify_IRQ_phys_cpuid,
- SGI_XPC_NOTIFY);
- dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
- chctl_flag_string, ch->partid, ch->number, ret);
- if (unlikely(ret != xpSuccess)) {
- if (irq_flags != NULL)
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- XPC_DEACTIVATE_PARTITION(part, ret);
- if (irq_flags != NULL)
- spin_lock_irqsave(&ch->lock, *irq_flags);
- }
- }
-}
-
-#define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
- xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
-
-/*
- * Make it look like the remote partition, which is associated with the
- * specified channel, sent us a notify IRQ. This faked IRQ will be handled
- * by xpc_check_for_dropped_notify_IRQ_sn2().
- */
-static void
-xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
- char *chctl_flag_string)
-{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
- union xpc_channel_ctl_flags chctl = { 0 };
-
- chctl.flags[ch->number] = chctl_flag;
- FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
- variable), FETCHOP_OR, chctl.all_flags);
- dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
- chctl_flag_string, ch->partid, ch->number);
-}
-
-#define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
- xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
-
-static void
-xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
- unsigned long *irq_flags)
-{
- struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
-
- args->reason = ch->reason;
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
-}
-
-static void
-xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
-{
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags);
-}
-
-static void
-xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
-{
- struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
-
- args->entry_size = ch->entry_size;
- args->local_nentries = ch->local_nentries;
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags);
-}
-
-static void
-xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
-{
- struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args;
-
- args->remote_nentries = ch->remote_nentries;
- args->local_nentries = ch->local_nentries;
- args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue);
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
-}
-
-static void
-xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch,
- unsigned long *irq_flags)
-{
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags);
-}
-
-static void
-xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
-{
- XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
-}
-
-static void
-xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
-{
- XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
-}
-
-static enum xp_retval
-xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch,
- unsigned long msgqueue_pa)
-{
- ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa;
- return xpSuccess;
-}
-
-/*
- * This next set of functions are used to keep track of when a partition is
- * potentially engaged in accessing memory belonging to another partition.
- */
-
-static void
-xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
-{
- unsigned long irq_flags;
- struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
- (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
- sizeof(struct amo)));
-
- local_irq_save(irq_flags);
-
- /* set bit corresponding to our partid in remote partition's amo */
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
- BIT(sn_partition_id));
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IRQs and amos to it until the heartbeat times out.
- */
- (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-}
-
-static void
-xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- unsigned long irq_flags;
- struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
- (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
- sizeof(struct amo)));
-
- local_irq_save(irq_flags);
-
- /* clear bit corresponding to our partid in remote partition's amo */
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
- ~BIT(sn_partition_id));
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IRQs and amos to it until the heartbeat times out.
- */
- (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-
- /*
- * Send activate IRQ to get other side to see that we've cleared our
- * bit in their engaged partitions amo.
- */
- xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
- cnodeid_to_nasid(0),
- part_sn2->activate_IRQ_nasid,
- part_sn2->activate_IRQ_phys_cpuid);
-}
-
-static void
-xpc_assume_partition_disengaged_sn2(short partid)
-{
- struct amo *amo = xpc_vars_sn2->amos_page +
- XPC_ENGAGED_PARTITIONS_AMO_SN2;
-
- /* clear bit(s) based on partid mask in our partition's amo */
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
- ~BIT(partid));
-}
-
-static int
-xpc_partition_engaged_sn2(short partid)
-{
- struct amo *amo = xpc_vars_sn2->amos_page +
- XPC_ENGAGED_PARTITIONS_AMO_SN2;
-
- /* our partition's amo variable ANDed with partid mask */
- return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
- BIT(partid)) != 0;
-}
-
-static int
-xpc_any_partition_engaged_sn2(void)
-{
- struct amo *amo = xpc_vars_sn2->amos_page +
- XPC_ENGAGED_PARTITIONS_AMO_SN2;
-
- /* our partition's amo variable */
- return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
-}
-
-/* original protection values for each node */
-static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
-
-/*
- * Change protections to allow amo operations on non-Shub 1.1 systems.
- */
-static enum xp_retval
-xpc_allow_amo_ops_sn2(struct amo *amos_page)
-{
- enum xp_retval ret = xpSuccess;
-
- /*
- * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
- * collides with memory operations. On those systems we call
- * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
- */
- if (!enable_shub_wars_1_1())
- ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
-
- return ret;
-}
-
-/*
- * Change protections to allow amo operations on Shub 1.1 systems.
- */
-static void
-xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
-{
- int node;
- int nasid;
-
- if (!enable_shub_wars_1_1())
- return;
-
- for_each_online_node(node) {
- nasid = cnodeid_to_nasid(node);
- /* save current protection values */
- xpc_prot_vec_sn2[node] =
- (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0));
- /* open up everything */
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQLP_MMR_DIR_PRIVEC0),
- -1UL);
- HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
- SH1_MD_DQRP_MMR_DIR_PRIVEC0),
- -1UL);
- }
-}
-
-static enum xp_retval
-xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
- size_t *len)
-{
- s64 status;
- enum xp_retval ret;
-
- status = sn_partition_reserved_page_pa((u64)buf, cookie,
- (u64 *)rp_pa, (u64 *)len);
- if (status == SALRET_OK)
- ret = xpSuccess;
- else if (status == SALRET_MORE_PASSES)
- ret = xpNeedMoreInfo;
- else
- ret = xpSalError;
-
- return ret;
-}
-
-
-static int
-xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp)
-{
- struct amo *amos_page;
- int i;
- int ret;
-
- xpc_vars_sn2 = XPC_RP_VARS(rp);
-
- rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2);
-
- /* vars_part array follows immediately after vars */
- xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
- XPC_RP_VARS_SIZE);
-
- /*
- * Before clearing xpc_vars_sn2, see if a page of amos had been
- * previously allocated. If not we'll need to allocate one and set
- * permissions so that cross-partition amos are allowed.
- *
- * The allocated amo page needs MCA reporting to remain disabled after
- * XPC has unloaded. To make this work, we keep a copy of the pointer
- * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
- * which is pointed to by the reserved page, and re-use that saved copy
- * on subsequent loads of XPC. This amo page is never freed, and its
- * memory protections are never restricted.
- */
- amos_page = xpc_vars_sn2->amos_page;
- if (amos_page == NULL) {
- amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
- if (amos_page == NULL) {
- dev_err(xpc_part, "can't allocate page of amos\n");
- return -ENOMEM;
- }
-
- /*
- * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
- * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
- */
- ret = xpc_allow_amo_ops_sn2(amos_page);
- if (ret != xpSuccess) {
- dev_err(xpc_part, "can't allow amo operations\n");
- uncached_free_page(__IA64_UNCACHED_OFFSET |
- TO_PHYS((u64)amos_page), 1);
- return -EPERM;
- }
- }
-
- /* clear xpc_vars_sn2 */
- memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
-
- xpc_vars_sn2->version = XPC_V_VERSION;
- xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
- xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
- xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
- xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
- xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
-
- /* clear xpc_vars_part_sn2 */
- memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
- XP_MAX_NPARTITIONS_SN2);
-
- /* initialize the activate IRQ related amo variables */
- for (i = 0; i < xpc_nasid_mask_nlongs; i++)
- (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i);
-
- /* initialize the engaged remote partitions related amo variables */
- (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
- (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
-
- return 0;
-}
-
-static int
-xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask)
-{
- return test_bit(partid, heartbeating_to_mask);
-}
-
-static void
-xpc_allow_hb_sn2(short partid)
-{
- DBUG_ON(xpc_vars_sn2 == NULL);
- set_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
-}
-
-static void
-xpc_disallow_hb_sn2(short partid)
-{
- DBUG_ON(xpc_vars_sn2 == NULL);
- clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
-}
-
-static void
-xpc_disallow_all_hbs_sn2(void)
-{
- DBUG_ON(xpc_vars_sn2 == NULL);
- bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions);
-}
-
-static void
-xpc_increment_heartbeat_sn2(void)
-{
- xpc_vars_sn2->heartbeat++;
-}
-
-static void
-xpc_offline_heartbeat_sn2(void)
-{
- xpc_increment_heartbeat_sn2();
- xpc_vars_sn2->heartbeat_offline = 1;
-}
-
-static void
-xpc_online_heartbeat_sn2(void)
-{
- xpc_increment_heartbeat_sn2();
- xpc_vars_sn2->heartbeat_offline = 0;
-}
-
-static void
-xpc_heartbeat_init_sn2(void)
-{
- DBUG_ON(xpc_vars_sn2 == NULL);
-
- bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
- xpc_online_heartbeat_sn2();
-}
-
-static void
-xpc_heartbeat_exit_sn2(void)
-{
- xpc_offline_heartbeat_sn2();
-}
-
-static enum xp_retval
-xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
-{
- struct xpc_vars_sn2 *remote_vars;
- enum xp_retval ret;
-
- remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
-
- /* pull the remote vars structure that contains the heartbeat */
- ret = xp_remote_memcpy(xp_pa(remote_vars),
- part->sn.sn2.remote_vars_pa,
- XPC_RP_VARS_SIZE);
- if (ret != xpSuccess)
- return ret;
-
- dev_dbg(xpc_part, "partid=%d, heartbeat=%lld, last_heartbeat=%lld, "
- "heartbeat_offline=%lld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
- remote_vars->heartbeat, part->last_heartbeat,
- remote_vars->heartbeat_offline,
- remote_vars->heartbeating_to_mask[0]);
-
- if ((remote_vars->heartbeat == part->last_heartbeat &&
- !remote_vars->heartbeat_offline) ||
- !xpc_hb_allowed_sn2(sn_partition_id,
- remote_vars->heartbeating_to_mask)) {
- ret = xpNoHeartbeat;
- } else {
- part->last_heartbeat = remote_vars->heartbeat;
- }
-
- return ret;
-}
-
-/*
- * Get a copy of the remote partition's XPC variables from the reserved page.
- *
- * remote_vars points to a buffer that is cacheline aligned for BTE copies and
- * assumed to be of size XPC_RP_VARS_SIZE.
- */
-static enum xp_retval
-xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
- struct xpc_vars_sn2 *remote_vars)
-{
- enum xp_retval ret;
-
- if (remote_vars_pa == 0)
- return xpVarsNotSet;
-
- /* pull over the cross partition variables */
- ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
- XPC_RP_VARS_SIZE);
- if (ret != xpSuccess)
- return ret;
-
- if (XPC_VERSION_MAJOR(remote_vars->version) !=
- XPC_VERSION_MAJOR(XPC_V_VERSION)) {
- return xpBadVersion;
- }
-
- return xpSuccess;
-}
-
-static void
-xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
- unsigned long remote_rp_pa, int nasid)
-{
- xpc_send_local_activate_IRQ_sn2(nasid);
-}
-
-static void
-xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
-{
- xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
-}
-
-static void
-xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- unsigned long irq_flags;
- struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
- (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
- sizeof(struct amo)));
-
- local_irq_save(irq_flags);
-
- /* set bit corresponding to our partid in remote partition's amo */
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
- BIT(sn_partition_id));
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IRQs and amos to it until the heartbeat times out.
- */
- (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-
- /*
- * Send activate IRQ to get other side to see that we've set our
- * bit in their deactivate request amo.
- */
- xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
- cnodeid_to_nasid(0),
- part_sn2->activate_IRQ_nasid,
- part_sn2->activate_IRQ_phys_cpuid);
-}
-
-static void
-xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
-{
- unsigned long irq_flags;
- struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
- (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
- sizeof(struct amo)));
-
- local_irq_save(irq_flags);
-
- /* clear bit corresponding to our partid in remote partition's amo */
- FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
- ~BIT(sn_partition_id));
-
- /*
- * We must always use the nofault function regardless of whether we
- * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
- * didn't, we'd never know that the other partition is down and would
- * keep sending IRQs and amos to it until the heartbeat times out.
- */
- (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
- variable),
- xp_nofault_PIOR_target));
-
- local_irq_restore(irq_flags);
-}
-
-static int
-xpc_partition_deactivation_requested_sn2(short partid)
-{
- struct amo *amo = xpc_vars_sn2->amos_page +
- XPC_DEACTIVATE_REQUEST_AMO_SN2;
-
- /* our partition's amo variable ANDed with partid mask */
- return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
- BIT(partid)) != 0;
-}
-
-/*
- * Update the remote partition's info.
- */
-static void
-xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
- unsigned long *remote_rp_ts_jiffies,
- unsigned long remote_rp_pa,
- unsigned long remote_vars_pa,
- struct xpc_vars_sn2 *remote_vars)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
-
- part->remote_rp_version = remote_rp_version;
- dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
- part->remote_rp_version);
-
- part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies;
- dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n",
- part->remote_rp_ts_jiffies);
-
- part->remote_rp_pa = remote_rp_pa;
- dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
-
- part_sn2->remote_vars_pa = remote_vars_pa;
- dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
- part_sn2->remote_vars_pa);
-
- part->last_heartbeat = remote_vars->heartbeat - 1;
- dev_dbg(xpc_part, " last_heartbeat = 0x%016llx\n",
- part->last_heartbeat);
-
- part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
- dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
- part_sn2->remote_vars_part_pa);
-
- part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
- dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
- part_sn2->activate_IRQ_nasid);
-
- part_sn2->activate_IRQ_phys_cpuid =
- remote_vars->activate_IRQ_phys_cpuid;
- dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
- part_sn2->activate_IRQ_phys_cpuid);
-
- part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
- dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
- part_sn2->remote_amos_page_pa);
-
- part_sn2->remote_vars_version = remote_vars->version;
- dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
- part_sn2->remote_vars_version);
-}
-
-/*
- * Prior code has determined the nasid which generated a activate IRQ.
- * Inspect that nasid to determine if its partition needs to be activated
- * or deactivated.
- *
- * A partition is considered "awaiting activation" if our partition
- * flags indicate it is not active and it has a heartbeat. A
- * partition is considered "awaiting deactivation" if our partition
- * flags indicate it is active but it has no heartbeat or it is not
- * sending its heartbeat to us.
- *
- * To determine the heartbeat, the remote nasid must have a properly
- * initialized reserved page.
- */
-static void
-xpc_identify_activate_IRQ_req_sn2(int nasid)
-{
- struct xpc_rsvd_page *remote_rp;
- struct xpc_vars_sn2 *remote_vars;
- unsigned long remote_rp_pa;
- unsigned long remote_vars_pa;
- int remote_rp_version;
- int reactivate = 0;
- unsigned long remote_rp_ts_jiffies = 0;
- short partid;
- struct xpc_partition *part;
- struct xpc_partition_sn2 *part_sn2;
- enum xp_retval ret;
-
- /* pull over the reserved page structure */
-
- remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2;
-
- ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
- if (ret != xpSuccess) {
- dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
- return;
- }
-
- remote_vars_pa = remote_rp->sn.sn2.vars_pa;
- remote_rp_version = remote_rp->version;
- remote_rp_ts_jiffies = remote_rp->ts_jiffies;
-
- partid = remote_rp->SAL_partid;
- part = &xpc_partitions[partid];
- part_sn2 = &part->sn.sn2;
-
- /* pull over the cross partition variables */
-
- remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
-
- ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
- if (ret != xpSuccess) {
- dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
- "which sent interrupt, reason=%d\n", nasid, ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
- return;
- }
-
- part->activate_IRQ_rcvd++;
-
- dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
- "%lld:0x%lx\n", (int)nasid, (int)partid,
- part->activate_IRQ_rcvd,
- remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
-
- if (xpc_partition_disengaged(part) &&
- part->act_state == XPC_P_AS_INACTIVE) {
-
- xpc_update_partition_info_sn2(part, remote_rp_version,
- &remote_rp_ts_jiffies,
- remote_rp_pa, remote_vars_pa,
- remote_vars);
-
- if (xpc_partition_deactivation_requested_sn2(partid)) {
- /*
- * Other side is waiting on us to deactivate even though
- * we already have.
- */
- return;
- }
-
- xpc_activate_partition(part);
- return;
- }
-
- DBUG_ON(part->remote_rp_version == 0);
- DBUG_ON(part_sn2->remote_vars_version == 0);
-
- if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) {
-
- /* the other side rebooted */
-
- DBUG_ON(xpc_partition_engaged_sn2(partid));
- DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
-
- xpc_update_partition_info_sn2(part, remote_rp_version,
- &remote_rp_ts_jiffies,
- remote_rp_pa, remote_vars_pa,
- remote_vars);
- reactivate = 1;
- }
-
- if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
- /* still waiting on other side to disengage from us */
- return;
- }
-
- if (reactivate)
- XPC_DEACTIVATE_PARTITION(part, xpReactivating);
- else if (xpc_partition_deactivation_requested_sn2(partid))
- XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
-}
-
-/*
- * Loop through the activation amo variables and process any bits
- * which are set. Each bit indicates a nasid sending a partition
- * activation or deactivation request.
- *
- * Return #of IRQs detected.
- */
-int
-xpc_identify_activate_IRQ_sender_sn2(void)
-{
- int l;
- int b;
- unsigned long nasid_mask_long;
- u64 nasid; /* remote nasid */
- int n_IRQs_detected = 0;
- struct amo *act_amos;
-
- act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
-
- /* scan through activate amo variables looking for non-zero entries */
- for (l = 0; l < xpc_nasid_mask_nlongs; l++) {
-
- if (xpc_exiting)
- break;
-
- nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]);
-
- b = find_first_bit(&nasid_mask_long, BITS_PER_LONG);
- if (b >= BITS_PER_LONG) {
- /* no IRQs from nasids in this amo variable */
- continue;
- }
-
- dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l,
- nasid_mask_long);
-
- /*
- * If this nasid has been added to the machine since
- * our partition was reset, this will retain the
- * remote nasid in our reserved pages machine mask.
- * This is used in the event of module reload.
- */
- xpc_mach_nasids[l] |= nasid_mask_long;
-
- /* locate the nasid(s) which sent interrupts */
-
- do {
- n_IRQs_detected++;
- nasid = (l * BITS_PER_LONG + b) * 2;
- dev_dbg(xpc_part, "interrupt from nasid %lld\n", nasid);
- xpc_identify_activate_IRQ_req_sn2(nasid);
-
- b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
- b + 1);
- } while (b < BITS_PER_LONG);
- }
- return n_IRQs_detected;
-}
-
-static void
-xpc_process_activate_IRQ_rcvd_sn2(void)
-{
- unsigned long irq_flags;
- int n_IRQs_expected;
- int n_IRQs_detected;
-
- spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
- n_IRQs_expected = xpc_activate_IRQ_rcvd;
- xpc_activate_IRQ_rcvd = 0;
- spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
-
- n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
- if (n_IRQs_detected < n_IRQs_expected) {
- /* retry once to help avoid missing amo */
- (void)xpc_identify_activate_IRQ_sender_sn2();
- }
-}
-
-/*
- * Setup the channel structures that are sn2 specific.
- */
-static enum xp_retval
-xpc_setup_ch_structures_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- struct xpc_channel_sn2 *ch_sn2;
- enum xp_retval retval;
- int ret;
- int cpuid;
- int ch_number;
- struct timer_list *timer;
- short partid = XPC_PARTID(part);
-
- /* allocate all the required GET/PUT values */
-
- part_sn2->local_GPs =
- xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
- &part_sn2->local_GPs_base);
- if (part_sn2->local_GPs == NULL) {
- dev_err(xpc_chan, "can't get memory for local get/put "
- "values\n");
- return xpNoMemory;
- }
-
- part_sn2->remote_GPs =
- xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL,
- &part_sn2->remote_GPs_base);
- if (part_sn2->remote_GPs == NULL) {
- dev_err(xpc_chan, "can't get memory for remote get/put "
- "values\n");
- retval = xpNoMemory;
- goto out_1;
- }
-
- part_sn2->remote_GPs_pa = 0;
-
- /* allocate all the required open and close args */
-
- part_sn2->local_openclose_args =
- xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
- GFP_KERNEL, &part_sn2->
- local_openclose_args_base);
- if (part_sn2->local_openclose_args == NULL) {
- dev_err(xpc_chan, "can't get memory for local connect args\n");
- retval = xpNoMemory;
- goto out_2;
- }
-
- part_sn2->remote_openclose_args_pa = 0;
-
- part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
-
- part_sn2->notify_IRQ_nasid = 0;
- part_sn2->notify_IRQ_phys_cpuid = 0;
- part_sn2->remote_chctl_amo_va = NULL;
-
- sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
- ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
- IRQF_SHARED, part_sn2->notify_IRQ_owner,
- (void *)(u64)partid);
- if (ret != 0) {
- dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
- "errno=%d\n", -ret);
- retval = xpLackOfResources;
- goto out_3;
- }
-
- /* Setup a timer to check for dropped notify IRQs */
- timer = &part_sn2->dropped_notify_IRQ_timer;
- timer_setup(timer, xpc_check_for_dropped_notify_IRQ_sn2, 0);
- timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
- add_timer(timer);
-
- for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
- ch_sn2 = &part->channels[ch_number].sn.sn2;
-
- ch_sn2->local_GP = &part_sn2->local_GPs[ch_number];
- ch_sn2->local_openclose_args =
- &part_sn2->local_openclose_args[ch_number];
-
- mutex_init(&ch_sn2->msg_to_pull_mutex);
- }
-
- /*
- * Setup the per partition specific variables required by the
- * remote partition to establish channel connections with us.
- *
- * The setting of the magic # indicates that these per partition
- * specific variables are ready to be used.
- */
- xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
- xpc_vars_part_sn2[partid].openclose_args_pa =
- xp_pa(part_sn2->local_openclose_args);
- xpc_vars_part_sn2[partid].chctl_amo_pa =
- xp_pa(part_sn2->local_chctl_amo_va);
- cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
- xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
- xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
- cpu_physical_id(cpuid);
- xpc_vars_part_sn2[partid].nchannels = part->nchannels;
- xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2;
-
- return xpSuccess;
-
- /* setup of ch structures failed */
-out_3:
- kfree(part_sn2->local_openclose_args_base);
- part_sn2->local_openclose_args = NULL;
-out_2:
- kfree(part_sn2->remote_GPs_base);
- part_sn2->remote_GPs = NULL;
-out_1:
- kfree(part_sn2->local_GPs_base);
- part_sn2->local_GPs = NULL;
- return retval;
-}
-
-/*
- * Teardown the channel structures that are sn2 specific.
- */
-static void
-xpc_teardown_ch_structures_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- short partid = XPC_PARTID(part);
-
- /*
- * Indicate that the variables specific to the remote partition are no
- * longer available for its use.
- */
- xpc_vars_part_sn2[partid].magic = 0;
-
- /* in case we've still got outstanding timers registered... */
- del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
- free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
-
- kfree(part_sn2->local_openclose_args_base);
- part_sn2->local_openclose_args = NULL;
- kfree(part_sn2->remote_GPs_base);
- part_sn2->remote_GPs = NULL;
- kfree(part_sn2->local_GPs_base);
- part_sn2->local_GPs = NULL;
- part_sn2->local_chctl_amo_va = NULL;
-}
-
-/*
- * Create a wrapper that hides the underlying mechanism for pulling a cacheline
- * (or multiple cachelines) from a remote partition.
- *
- * src_pa must be a cacheline aligned physical address on the remote partition.
- * dst must be a cacheline aligned virtual address on this partition.
- * cnt must be cacheline sized
- */
-/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
-static enum xp_retval
-xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
- const unsigned long src_pa, size_t cnt)
-{
- enum xp_retval ret;
-
- DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
- DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
- DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
-
- if (part->act_state == XPC_P_AS_DEACTIVATING)
- return part->reason;
-
- ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
- if (ret != xpSuccess) {
- dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
- " ret=%d\n", XPC_PARTID(part), ret);
- }
- return ret;
-}
-
-/*
- * Pull the remote per partition specific variables from the specified
- * partition.
- */
-static enum xp_retval
-xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- u8 buffer[L1_CACHE_BYTES * 2];
- struct xpc_vars_part_sn2 *pulled_entry_cacheline =
- (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
- struct xpc_vars_part_sn2 *pulled_entry;
- unsigned long remote_entry_cacheline_pa;
- unsigned long remote_entry_pa;
- short partid = XPC_PARTID(part);
- enum xp_retval ret;
-
- /* pull the cacheline that contains the variables we're interested in */
-
- DBUG_ON(part_sn2->remote_vars_part_pa !=
- L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
- DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
-
- remote_entry_pa = part_sn2->remote_vars_part_pa +
- sn_partition_id * sizeof(struct xpc_vars_part_sn2);
-
- remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
-
- pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
- + (remote_entry_pa &
- (L1_CACHE_BYTES - 1)));
-
- ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
- remote_entry_cacheline_pa,
- L1_CACHE_BYTES);
- if (ret != xpSuccess) {
- dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
- "partition %d, ret=%d\n", partid, ret);
- return ret;
- }
-
- /* see if they've been set up yet */
-
- if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 &&
- pulled_entry->magic != XPC_VP_MAGIC2_SN2) {
-
- if (pulled_entry->magic != 0) {
- dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
- "partition %d has bad magic value (=0x%llx)\n",
- partid, sn_partition_id, pulled_entry->magic);
- return xpBadMagic;
- }
-
- /* they've not been initialized yet */
- return xpRetry;
- }
-
- if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) {
-
- /* validate the variables */
-
- if (pulled_entry->GPs_pa == 0 ||
- pulled_entry->openclose_args_pa == 0 ||
- pulled_entry->chctl_amo_pa == 0) {
-
- dev_err(xpc_chan, "partition %d's XPC vars_part for "
- "partition %d are not valid\n", partid,
- sn_partition_id);
- return xpInvalidAddress;
- }
-
- /* the variables we imported look to be valid */
-
- part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
- part_sn2->remote_openclose_args_pa =
- pulled_entry->openclose_args_pa;
- part_sn2->remote_chctl_amo_va =
- (struct amo *)__va(pulled_entry->chctl_amo_pa);
- part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
- part_sn2->notify_IRQ_phys_cpuid =
- pulled_entry->notify_IRQ_phys_cpuid;
-
- if (part->nchannels > pulled_entry->nchannels)
- part->nchannels = pulled_entry->nchannels;
-
- /* let the other side know that we've pulled their variables */
-
- xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2;
- }
-
- if (pulled_entry->magic == XPC_VP_MAGIC1_SN2)
- return xpRetry;
-
- return xpSuccess;
-}
-
-/*
- * Establish first contact with the remote partititon. This involves pulling
- * the XPC per partition variables from the remote partition and waiting for
- * the remote partition to pull ours.
- */
-static enum xp_retval
-xpc_make_first_contact_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- enum xp_retval ret;
-
- /*
- * Register the remote partition's amos with SAL so it can handle
- * and cleanup errors within that address range should the remote
- * partition go down. We don't unregister this range because it is
- * difficult to tell when outstanding writes to the remote partition
- * are finished and thus when it is safe to unregister. This should
- * not result in wasted space in the SAL xp_addr_region table because
- * we should get the same page for remote_amos_page_pa after module
- * reloads and system reboots.
- */
- if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
- PAGE_SIZE, 1) < 0) {
- dev_warn(xpc_part, "xpc_activating(%d) failed to register "
- "xp_addr region\n", XPC_PARTID(part));
-
- ret = xpPhysAddrRegFailed;
- XPC_DEACTIVATE_PARTITION(part, ret);
- return ret;
- }
-
- /*
- * Send activate IRQ to get other side to activate if they've not
- * already begun to do so.
- */
- xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
- cnodeid_to_nasid(0),
- part_sn2->activate_IRQ_nasid,
- part_sn2->activate_IRQ_phys_cpuid);
-
- while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
- if (ret != xpRetry) {
- XPC_DEACTIVATE_PARTITION(part, ret);
- return ret;
- }
-
- dev_dbg(xpc_part, "waiting to make first contact with "
- "partition %d\n", XPC_PARTID(part));
-
- /* wait a 1/4 of a second or so */
- (void)msleep_interruptible(250);
-
- if (part->act_state == XPC_P_AS_DEACTIVATING)
- return part->reason;
- }
-
- return xpSuccess;
-}
-
-/*
- * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
- */
-static u64
-xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
-{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
- unsigned long irq_flags;
- union xpc_channel_ctl_flags chctl;
- enum xp_retval ret;
-
- /*
- * See if there are any chctl flags to be handled.
- */
-
- spin_lock_irqsave(&part->chctl_lock, irq_flags);
- chctl = part->chctl;
- if (chctl.all_flags != 0)
- part->chctl.all_flags = 0;
-
- spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
-
- if (xpc_any_openclose_chctl_flags_set(&chctl)) {
- ret = xpc_pull_remote_cachelines_sn2(part, part->
- remote_openclose_args,
- part_sn2->
- remote_openclose_args_pa,
- XPC_OPENCLOSE_ARGS_SIZE);
- if (ret != xpSuccess) {
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- dev_dbg(xpc_chan, "failed to pull openclose args from "
- "partition %d, ret=%d\n", XPC_PARTID(part),
- ret);
-
- /* don't bother processing chctl flags anymore */
- chctl.all_flags = 0;
- }
- }
-
- if (xpc_any_msg_chctl_flags_set(&chctl)) {
- ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
- part_sn2->remote_GPs_pa,
- XPC_GP_SIZE);
- if (ret != xpSuccess) {
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- dev_dbg(xpc_chan, "failed to pull GPs from partition "
- "%d, ret=%d\n", XPC_PARTID(part), ret);
-
- /* don't bother processing chctl flags anymore */
- chctl.all_flags = 0;
- }
- }
-
- return chctl.all_flags;
-}
-
-/*
- * Allocate the local message queue and the notify queue.
- */
-static enum xp_retval
-xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- unsigned long irq_flags;
- int nentries;
- size_t nbytes;
-
- for (nentries = ch->local_nentries; nentries > 0; nentries--) {
-
- nbytes = nentries * ch->entry_size;
- ch_sn2->local_msgqueue =
- xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL,
- &ch_sn2->local_msgqueue_base);
- if (ch_sn2->local_msgqueue == NULL)
- continue;
-
- nbytes = nentries * sizeof(struct xpc_notify_sn2);
- ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL);
- if (ch_sn2->notify_queue == NULL) {
- kfree(ch_sn2->local_msgqueue_base);
- ch_sn2->local_msgqueue = NULL;
- continue;
- }
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- if (nentries < ch->local_nentries) {
- dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
- "partid=%d, channel=%d\n", nentries,
- ch->local_nentries, ch->partid, ch->number);
-
- ch->local_nentries = nentries;
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpSuccess;
- }
-
- dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
- "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpNoMemory;
-}
-
-/*
- * Allocate the cached remote message queue.
- */
-static enum xp_retval
-xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- unsigned long irq_flags;
- int nentries;
- size_t nbytes;
-
- DBUG_ON(ch->remote_nentries <= 0);
-
- for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
-
- nbytes = nentries * ch->entry_size;
- ch_sn2->remote_msgqueue =
- xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->
- remote_msgqueue_base);
- if (ch_sn2->remote_msgqueue == NULL)
- continue;
-
- spin_lock_irqsave(&ch->lock, irq_flags);
- if (nentries < ch->remote_nentries) {
- dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
- "partid=%d, channel=%d\n", nentries,
- ch->remote_nentries, ch->partid, ch->number);
-
- ch->remote_nentries = nentries;
- }
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpSuccess;
- }
-
- dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
- "partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpNoMemory;
-}
-
-/*
- * Allocate message queues and other stuff associated with a channel.
- *
- * Note: Assumes all of the channel sizes are filled in.
- */
-static enum xp_retval
-xpc_setup_msg_structures_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- enum xp_retval ret;
-
- DBUG_ON(ch->flags & XPC_C_SETUP);
-
- ret = xpc_allocate_local_msgqueue_sn2(ch);
- if (ret == xpSuccess) {
-
- ret = xpc_allocate_remote_msgqueue_sn2(ch);
- if (ret != xpSuccess) {
- kfree(ch_sn2->local_msgqueue_base);
- ch_sn2->local_msgqueue = NULL;
- kfree(ch_sn2->notify_queue);
- ch_sn2->notify_queue = NULL;
- }
- }
- return ret;
-}
-
-/*
- * Free up message queues and other stuff that were allocated for the specified
- * channel.
- */
-static void
-xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
-
- lockdep_assert_held(&ch->lock);
-
- ch_sn2->remote_msgqueue_pa = 0;
-
- ch_sn2->local_GP->get = 0;
- ch_sn2->local_GP->put = 0;
- ch_sn2->remote_GP.get = 0;
- ch_sn2->remote_GP.put = 0;
- ch_sn2->w_local_GP.get = 0;
- ch_sn2->w_local_GP.put = 0;
- ch_sn2->w_remote_GP.get = 0;
- ch_sn2->w_remote_GP.put = 0;
- ch_sn2->next_msg_to_pull = 0;
-
- if (ch->flags & XPC_C_SETUP) {
- dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
- ch->flags, ch->partid, ch->number);
-
- kfree(ch_sn2->local_msgqueue_base);
- ch_sn2->local_msgqueue = NULL;
- kfree(ch_sn2->remote_msgqueue_base);
- ch_sn2->remote_msgqueue = NULL;
- kfree(ch_sn2->notify_queue);
- ch_sn2->notify_queue = NULL;
- }
-}
-
-/*
- * Notify those who wanted to be notified upon delivery of their message.
- */
-static void
-xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
-{
- struct xpc_notify_sn2 *notify;
- u8 notify_type;
- s64 get = ch->sn.sn2.w_remote_GP.get - 1;
-
- while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
-
- notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries];
-
- /*
- * See if the notify entry indicates it was associated with
- * a message who's sender wants to be notified. It is possible
- * that it is, but someone else is doing or has done the
- * notification.
- */
- notify_type = notify->type;
- if (notify_type == 0 ||
- cmpxchg(&notify->type, notify_type, 0) != notify_type) {
- continue;
- }
-
- DBUG_ON(notify_type != XPC_N_CALL);
-
- atomic_dec(&ch->n_to_notify);
-
- if (notify->func != NULL) {
- dev_dbg(xpc_chan, "notify->func() called, notify=0x%p "
- "msg_number=%lld partid=%d channel=%d\n",
- (void *)notify, get, ch->partid, ch->number);
-
- notify->func(reason, ch->partid, ch->number,
- notify->key);
-
- dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p"
- " msg_number=%lld partid=%d channel=%d\n",
- (void *)notify, get, ch->partid, ch->number);
- }
- }
-}
-
-static void
-xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
-{
- xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
-}
-
-/*
- * Clear some of the msg flags in the local message queue.
- */
-static inline void
-xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- s64 get;
-
- get = ch_sn2->w_remote_GP.get;
- do {
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
- (get % ch->local_nentries) *
- ch->entry_size);
- DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
- msg->flags = 0;
- } while (++get < ch_sn2->remote_GP.get);
-}
-
-/*
- * Clear some of the msg flags in the remote message queue.
- */
-static inline void
-xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- s64 put, remote_nentries = ch->remote_nentries;
-
- /* flags are zeroed when the buffer is allocated */
- if (ch_sn2->remote_GP.put < remote_nentries)
- return;
-
- put = max(ch_sn2->w_remote_GP.put, remote_nentries);
- do {
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
- (put % remote_nentries) *
- ch->entry_size);
- DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
- DBUG_ON(!(msg->flags & XPC_M_SN2_DONE));
- DBUG_ON(msg->number != put - remote_nentries);
- msg->flags = 0;
- } while (++put < ch_sn2->remote_GP.put);
-}
-
-static int
-xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch)
-{
- return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
-}
-
-static void
-xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
-{
- struct xpc_channel *ch = &part->channels[ch_number];
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- int npayloads_sent;
-
- ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
-
- /* See what, if anything, has changed for each connected channel */
-
- xpc_msgqueue_ref(ch);
-
- if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
- ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
- /* nothing changed since GPs were last pulled */
- xpc_msgqueue_deref(ch);
- return;
- }
-
- if (!(ch->flags & XPC_C_CONNECTED)) {
- xpc_msgqueue_deref(ch);
- return;
- }
-
- /*
- * First check to see if messages recently sent by us have been
- * received by the other side. (The remote GET value will have
- * changed since we last looked at it.)
- */
-
- if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
-
- /*
- * We need to notify any senders that want to be notified
- * that their sent messages have been received by their
- * intended recipients. We need to do this before updating
- * w_remote_GP.get so that we don't allocate the same message
- * queue entries prematurely (see xpc_allocate_msg()).
- */
- if (atomic_read(&ch->n_to_notify) > 0) {
- /*
- * Notify senders that messages sent have been
- * received and delivered by the other side.
- */
- xpc_notify_senders_sn2(ch, xpMsgDelivered,
- ch_sn2->remote_GP.get);
- }
-
- /*
- * Clear msg->flags in previously sent messages, so that
- * they're ready for xpc_allocate_msg().
- */
- xpc_clear_local_msgqueue_flags_sn2(ch);
-
- ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
-
- dev_dbg(xpc_chan, "w_remote_GP.get changed to %lld, partid=%d, "
- "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
- ch->number);
-
- /*
- * If anyone was waiting for message queue entries to become
- * available, wake them up.
- */
- if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
- wake_up(&ch->msg_allocate_wq);
- }
-
- /*
- * Now check for newly sent messages by the other side. (The remote
- * PUT value will have changed since we last looked at it.)
- */
-
- if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
- /*
- * Clear msg->flags in previously received messages, so that
- * they're ready for xpc_get_deliverable_payload_sn2().
- */
- xpc_clear_remote_msgqueue_flags_sn2(ch);
-
- smp_wmb(); /* ensure flags have been cleared before bte_copy */
- ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
-
- dev_dbg(xpc_chan, "w_remote_GP.put changed to %lld, partid=%d, "
- "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
- ch->number);
-
- npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch);
- if (npayloads_sent > 0) {
- dev_dbg(xpc_chan, "msgs waiting to be copied and "
- "delivered=%d, partid=%d, channel=%d\n",
- npayloads_sent, ch->partid, ch->number);
-
- if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
- xpc_activate_kthreads(ch, npayloads_sent);
- }
- }
-
- xpc_msgqueue_deref(ch);
-}
-
-static struct xpc_msg_sn2 *
-xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
-{
- struct xpc_partition *part = &xpc_partitions[ch->partid];
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- unsigned long remote_msg_pa;
- struct xpc_msg_sn2 *msg;
- u32 msg_index;
- u32 nmsgs;
- u64 msg_offset;
- enum xp_retval ret;
-
- if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
- /* we were interrupted by a signal */
- return NULL;
- }
-
- while (get >= ch_sn2->next_msg_to_pull) {
-
- /* pull as many messages as are ready and able to be pulled */
-
- msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
-
- DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
- nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
- if (msg_index + nmsgs > ch->remote_nentries) {
- /* ignore the ones that wrap the msg queue for now */
- nmsgs = ch->remote_nentries - msg_index;
- }
-
- msg_offset = msg_index * ch->entry_size;
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue +
- msg_offset);
- remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset;
-
- ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
- nmsgs * ch->entry_size);
- if (ret != xpSuccess) {
-
- dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
- " msg %lld from partition %d, channel=%d, "
- "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
- ch->partid, ch->number, ret);
-
- XPC_DEACTIVATE_PARTITION(part, ret);
-
- mutex_unlock(&ch_sn2->msg_to_pull_mutex);
- return NULL;
- }
-
- ch_sn2->next_msg_to_pull += nmsgs;
- }
-
- mutex_unlock(&ch_sn2->msg_to_pull_mutex);
-
- /* return the message we were looking for */
- msg_offset = (get % ch->remote_nentries) * ch->entry_size;
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset);
-
- return msg;
-}
-
-/*
- * Get the next deliverable message's payload.
- */
-static void *
-xpc_get_deliverable_payload_sn2(struct xpc_channel *ch)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- void *payload = NULL;
- s64 get;
-
- do {
- if (ch->flags & XPC_C_DISCONNECTING)
- break;
-
- get = ch_sn2->w_local_GP.get;
- smp_rmb(); /* guarantee that .get loads before .put */
- if (get == ch_sn2->w_remote_GP.put)
- break;
-
- /* There are messages waiting to be pulled and delivered.
- * We need to try to secure one for ourselves. We'll do this
- * by trying to increment w_local_GP.get and hope that no one
- * else beats us to it. If they do, we'll we'll simply have
- * to try again for the next one.
- */
-
- if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
- /* we got the entry referenced by get */
-
- dev_dbg(xpc_chan, "w_local_GP.get changed to %lld, "
- "partid=%d, channel=%d\n", get + 1,
- ch->partid, ch->number);
-
- /* pull the message from the remote partition */
-
- msg = xpc_pull_remote_msg_sn2(ch, get);
-
- if (msg != NULL) {
- DBUG_ON(msg->number != get);
- DBUG_ON(msg->flags & XPC_M_SN2_DONE);
- DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
-
- payload = &msg->payload;
- }
- break;
- }
-
- } while (1);
-
- return payload;
-}
-
-/*
- * Now we actually send the messages that are ready to be sent by advancing
- * the local message queue's Put value and then send a chctl msgrequest to the
- * recipient partition.
- */
-static void
-xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- s64 put = initial_put + 1;
- int send_msgrequest = 0;
-
- while (1) {
-
- while (1) {
- if (put == ch_sn2->w_local_GP.put)
- break;
-
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
- local_msgqueue + (put %
- ch->local_nentries) *
- ch->entry_size);
-
- if (!(msg->flags & XPC_M_SN2_READY))
- break;
-
- put++;
- }
-
- if (put == initial_put) {
- /* nothing's changed */
- break;
- }
-
- if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
- initial_put) {
- /* someone else beat us to it */
- DBUG_ON(ch_sn2->local_GP->put < initial_put);
- break;
- }
-
- /* we just set the new value of local_GP->put */
-
- dev_dbg(xpc_chan, "local_GP->put changed to %lld, partid=%d, "
- "channel=%d\n", put, ch->partid, ch->number);
-
- send_msgrequest = 1;
-
- /*
- * We need to ensure that the message referenced by
- * local_GP->put is not XPC_M_SN2_READY or that local_GP->put
- * equals w_local_GP.put, so we'll go have a look.
- */
- initial_put = put;
- }
-
- if (send_msgrequest)
- xpc_send_chctl_msgrequest_sn2(ch);
-}
-
-/*
- * Allocate an entry for a message from the message queue associated with the
- * specified channel.
- */
-static enum xp_retval
-xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
- struct xpc_msg_sn2 **address_of_msg)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- enum xp_retval ret;
- s64 put;
-
- /*
- * Get the next available message entry from the local message queue.
- * If none are available, we'll make sure that we grab the latest
- * GP values.
- */
- ret = xpTimeout;
-
- while (1) {
-
- put = ch_sn2->w_local_GP.put;
- smp_rmb(); /* guarantee that .put loads before .get */
- if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
-
- /* There are available message entries. We need to try
- * to secure one for ourselves. We'll do this by trying
- * to increment w_local_GP.put as long as someone else
- * doesn't beat us to it. If they do, we'll have to
- * try again.
- */
- if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
- put) {
- /* we got the entry referenced by put */
- break;
- }
- continue; /* try again */
- }
-
- /*
- * There aren't any available msg entries at this time.
- *
- * In waiting for a message entry to become available,
- * we set a timeout in case the other side is not sending
- * completion interrupts. This lets us fake a notify IRQ
- * that will cause the notify IRQ handler to fetch the latest
- * GP values as if an interrupt was sent by the other side.
- */
- if (ret == xpTimeout)
- xpc_send_chctl_local_msgrequest_sn2(ch);
-
- if (flags & XPC_NOWAIT)
- return xpNoWait;
-
- ret = xpc_allocate_msg_wait(ch);
- if (ret != xpInterrupted && ret != xpTimeout)
- return ret;
- }
-
- /* get the message's address and initialize it */
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue +
- (put % ch->local_nentries) *
- ch->entry_size);
-
- DBUG_ON(msg->flags != 0);
- msg->number = put;
-
- dev_dbg(xpc_chan, "w_local_GP.put changed to %lld; msg=0x%p, "
- "msg_number=%lld, partid=%d, channel=%d\n", put + 1,
- (void *)msg, msg->number, ch->partid, ch->number);
-
- *address_of_msg = msg;
- return xpSuccess;
-}
-
-/*
- * Common code that does the actual sending of the message by advancing the
- * local message queue's Put value and sends a chctl msgrequest to the
- * partition the message is being sent to.
- */
-static enum xp_retval
-xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload,
- u16 payload_size, u8 notify_type, xpc_notify_func func,
- void *key)
-{
- enum xp_retval ret = xpSuccess;
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg = msg;
- struct xpc_notify_sn2 *notify = notify;
- s64 msg_number;
- s64 put;
-
- DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
-
- if (XPC_MSG_SIZE(payload_size) > ch->entry_size)
- return xpPayloadTooBig;
-
- xpc_msgqueue_ref(ch);
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- ret = ch->reason;
- goto out_1;
- }
- if (!(ch->flags & XPC_C_CONNECTED)) {
- ret = xpNotConnected;
- goto out_1;
- }
-
- ret = xpc_allocate_msg_sn2(ch, flags, &msg);
- if (ret != xpSuccess)
- goto out_1;
-
- msg_number = msg->number;
-
- if (notify_type != 0) {
- /*
- * Tell the remote side to send an ACK interrupt when the
- * message has been delivered.
- */
- msg->flags |= XPC_M_SN2_INTERRUPT;
-
- atomic_inc(&ch->n_to_notify);
-
- notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries];
- notify->func = func;
- notify->key = key;
- notify->type = notify_type;
-
- /* ??? Is a mb() needed here? */
-
- if (ch->flags & XPC_C_DISCONNECTING) {
- /*
- * An error occurred between our last error check and
- * this one. We will try to clear the type field from
- * the notify entry. If we succeed then
- * xpc_disconnect_channel() didn't already process
- * the notify entry.
- */
- if (cmpxchg(&notify->type, notify_type, 0) ==
- notify_type) {
- atomic_dec(&ch->n_to_notify);
- ret = ch->reason;
- }
- goto out_1;
- }
- }
-
- memcpy(&msg->payload, payload, payload_size);
-
- msg->flags |= XPC_M_SN2_READY;
-
- /*
- * The preceding store of msg->flags must occur before the following
- * load of local_GP->put.
- */
- smp_mb();
-
- /* see if the message is next in line to be sent, if so send it */
-
- put = ch_sn2->local_GP->put;
- if (put == msg_number)
- xpc_send_msgs_sn2(ch, put);
-
-out_1:
- xpc_msgqueue_deref(ch);
- return ret;
-}
-
-/*
- * Now we actually acknowledge the messages that have been delivered and ack'd
- * by advancing the cached remote message queue's Get value and if requested
- * send a chctl msgrequest to the message sender's partition.
- *
- * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition
- * that sent the message.
- */
-static void
-xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
-{
- struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- struct xpc_msg_sn2 *msg;
- s64 get = initial_get + 1;
- int send_msgrequest = 0;
-
- while (1) {
-
- while (1) {
- if (get == ch_sn2->w_local_GP.get)
- break;
-
- msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->
- remote_msgqueue + (get %
- ch->remote_nentries) *
- ch->entry_size);
-
- if (!(msg->flags & XPC_M_SN2_DONE))
- break;
-
- msg_flags |= msg->flags;
- get++;
- }
-
- if (get == initial_get) {
- /* nothing's changed */
- break;
- }
-
- if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
- initial_get) {
- /* someone else beat us to it */
- DBUG_ON(ch_sn2->local_GP->get <= initial_get);
- break;
- }
-
- /* we just set the new value of local_GP->get */
-
- dev_dbg(xpc_chan, "local_GP->get changed to %lld, partid=%d, "
- "channel=%d\n", get, ch->partid, ch->number);
-
- send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT);
-
- /*
- * We need to ensure that the message referenced by
- * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get
- * equals w_local_GP.get, so we'll go have a look.
- */
- initial_get = get;
- }
-
- if (send_msgrequest)
- xpc_send_chctl_msgrequest_sn2(ch);
-}
-
-static void
-xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
-{
- struct xpc_msg_sn2 *msg;
- s64 msg_number;
- s64 get;
-
- msg = container_of(payload, struct xpc_msg_sn2, payload);
- msg_number = msg->number;
-
- dev_dbg(xpc_chan, "msg=0x%p, msg_number=%lld, partid=%d, channel=%d\n",
- (void *)msg, msg_number, ch->partid, ch->number);
-
- DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) !=
- msg_number % ch->remote_nentries);
- DBUG_ON(!(msg->flags & XPC_M_SN2_READY));
- DBUG_ON(msg->flags & XPC_M_SN2_DONE);
-
- msg->flags |= XPC_M_SN2_DONE;
-
- /*
- * The preceding store of msg->flags must occur before the following
- * load of local_GP->get.
- */
- smp_mb();
-
- /*
- * See if this message is next in line to be acknowledged as having
- * been delivered.
- */
- get = ch->sn.sn2.local_GP->get;
- if (get == msg_number)
- xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
-}
-
-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
- .setup_partitions = xpc_setup_partitions_sn2,
- .teardown_partitions = xpc_teardown_partitions_sn2,
- .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
- .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2,
- .setup_rsvd_page = xpc_setup_rsvd_page_sn2,
-
- .allow_hb = xpc_allow_hb_sn2,
- .disallow_hb = xpc_disallow_hb_sn2,
- .disallow_all_hbs = xpc_disallow_all_hbs_sn2,
- .increment_heartbeat = xpc_increment_heartbeat_sn2,
- .offline_heartbeat = xpc_offline_heartbeat_sn2,
- .online_heartbeat = xpc_online_heartbeat_sn2,
- .heartbeat_init = xpc_heartbeat_init_sn2,
- .heartbeat_exit = xpc_heartbeat_exit_sn2,
- .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2,
-
- .request_partition_activation =
- xpc_request_partition_activation_sn2,
- .request_partition_reactivation =
- xpc_request_partition_reactivation_sn2,
- .request_partition_deactivation =
- xpc_request_partition_deactivation_sn2,
- .cancel_partition_deactivation_request =
- xpc_cancel_partition_deactivation_request_sn2,
-
- .setup_ch_structures = xpc_setup_ch_structures_sn2,
- .teardown_ch_structures = xpc_teardown_ch_structures_sn2,
-
- .make_first_contact = xpc_make_first_contact_sn2,
-
- .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2,
- .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2,
- .send_chctl_closereply = xpc_send_chctl_closereply_sn2,
- .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2,
- .send_chctl_openreply = xpc_send_chctl_openreply_sn2,
- .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2,
- .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2,
-
- .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2,
-
- .setup_msg_structures = xpc_setup_msg_structures_sn2,
- .teardown_msg_structures = xpc_teardown_msg_structures_sn2,
-
- .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2,
- .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2,
- .partition_engaged = xpc_partition_engaged_sn2,
- .any_partition_engaged = xpc_any_partition_engaged_sn2,
- .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2,
-
- .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2,
- .send_payload = xpc_send_payload_sn2,
- .get_deliverable_payload = xpc_get_deliverable_payload_sn2,
- .received_payload = xpc_received_payload_sn2,
- .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2,
-};
-
-int
-xpc_init_sn2(void)
-{
- int ret;
- size_t buf_size;
-
- xpc_arch_ops = xpc_arch_ops_sn2;
-
- if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
- dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
- "larger than %d\n", XPC_MSG_HDR_MAX_SIZE);
- return -E2BIG;
- }
-
- buf_size = max(XPC_RP_VARS_SIZE,
- XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2);
- xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size,
- GFP_KERNEL,
- &xpc_remote_copy_buffer_base_sn2);
- if (xpc_remote_copy_buffer_sn2 == NULL) {
- dev_err(xpc_part, "can't get memory for remote copy buffer\n");
- return -ENOMEM;
- }
-
- /* open up protections for IPI and [potentially] amo operations */
- xpc_allow_IPI_ops_sn2();
- xpc_allow_amo_ops_shub_wars_1_1_sn2();
-
- /*
- * This is safe to do before the xpc_hb_checker thread has started
- * because the handler releases a wait queue. If an interrupt is
- * received before the thread is waiting, it will not go to sleep,
- * but rather immediately process the interrupt.
- */
- ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
- "xpc hb", NULL);
- if (ret != 0) {
- dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
- "errno=%d\n", -ret);
- xpc_disallow_IPI_ops_sn2();
- kfree(xpc_remote_copy_buffer_base_sn2);
- }
- return ret;
-}
-
-void
-xpc_exit_sn2(void)
-{
- free_irq(SGI_XPC_ACTIVATE, NULL);
- xpc_disallow_IPI_ops_sn2();
- kfree(xpc_remote_copy_buffer_base_sn2);
-}
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 0c6de97dd347..98c60f11b76b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -27,7 +27,7 @@
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#include <asm/uv/uv_irq.h>
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#endif
@@ -35,7 +35,7 @@
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
-#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#if defined CONFIG_IA64_SGI_UV
struct uv_IO_APIC_route_entry {
__u64 vector : 8,
delivery_mode : 3,
@@ -48,6 +48,8 @@ struct uv_IO_APIC_route_entry {
__reserved_2 : 15,
dest : 32;
};
+
+#define sn_partition_id 0
#endif
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
@@ -119,7 +121,7 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
mq->irq = SGI_XPC_ACTIVATE;
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
@@ -142,7 +144,7 @@ xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
#if defined CONFIG_X86_64
uv_teardown_irq(mq->irq);
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;
@@ -160,7 +162,7 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
-#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#if defined CONFIG_IA64_SGI_UV
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
@@ -195,7 +197,7 @@ xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
@@ -694,7 +696,7 @@ again:
if (gru_mq_desc == NULL) {
gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (gru_mq_desc == NULL) {
ret = xpNoMemory;
goto done;
@@ -794,7 +796,7 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
else
ret = xpBiosError;
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+#elif defined CONFIG_IA64_SGI_UV
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
if (status == SALRET_OK)
ret = xpSuccess;
@@ -1678,7 +1680,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
}
-static struct xpc_arch_operations xpc_arch_ops_uv = {
+static const struct xpc_arch_operations xpc_arch_ops_uv = {
.setup_partitions = xpc_setup_partitions_uv,
.teardown_partitions = xpc_teardown_partitions_uv,
.process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 44d750d98bc8..f7d610a22347 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -515,7 +515,7 @@ xpnet_init(void)
{
int result;
- if (!is_shub() && !is_uv())
+ if (!is_uv())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
deleted file mode 100644
index ee120dcbb3e6..000000000000
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * drivers/misc/spear13xx_pcie_gadget.c
- *
- * Copyright (C) 2010 ST Microelectronics
- * Pratyush Anand<pratyush.anand@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pci_regs.h>
-#include <linux/configfs.h>
-#include <mach/pcie.h>
-#include <mach/misc_regs.h>
-
-#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
-/* In current implementation address translation is done using IN0 only.
- * So IN1 start address and IN0 end address has been kept same
-*/
-#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
-#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
-#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
-#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
-#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
-/* Keep default BAR size as 4K*/
-/* AORAM would be mapped by default*/
-#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
-
-#define INT_TYPE_NO_INT 0
-#define INT_TYPE_INTX 1
-#define INT_TYPE_MSI 2
-struct spear_pcie_gadget_config {
- void __iomem *base;
- void __iomem *va_app_base;
- void __iomem *va_dbi_base;
- char int_type[10];
- ulong requested_msi;
- ulong configured_msi;
- ulong bar0_size;
- ulong bar0_rw_offset;
- void __iomem *va_bar0_address;
-};
-
-struct pcie_gadget_target {
- struct configfs_subsystem subsys;
- struct spear_pcie_gadget_config config;
-};
-
-struct pcie_gadget_target_attr {
- struct configfs_attribute attr;
- ssize_t (*show)(struct spear_pcie_gadget_config *config,
- char *buf);
- ssize_t (*store)(struct spear_pcie_gadget_config *config,
- const char *buf,
- size_t count);
-};
-
-static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
-{
- /* Enable DBI access */
- writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_armisc);
- writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_awmisc);
-
-}
-
-static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
-{
- /* disable DBI access */
- writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_armisc);
- writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
- &app_reg->slv_awmisc);
-
-}
-
-static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
- int where, int size, u32 *val)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong va_address;
-
- /* Enable DBI access */
- enable_dbi_access(app_reg);
-
- va_address = (ulong)config->va_dbi_base + (where & ~0x3);
-
- *val = readl(va_address);
-
- if (size == 1)
- *val = (*val >> (8 * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (8 * (where & 3))) & 0xffff;
-
- /* Disable DBI access */
- disable_dbi_access(app_reg);
-}
-
-static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
- int where, int size, u32 val)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong va_address;
-
- /* Enable DBI access */
- enable_dbi_access(app_reg);
-
- va_address = (ulong)config->va_dbi_base + (where & ~0x3);
-
- if (size == 4)
- writel(val, va_address);
- else if (size == 2)
- writew(val, va_address + (where & 2));
- else if (size == 1)
- writeb(val, va_address + (where & 3));
-
- /* Disable DBI access */
- disable_dbi_access(app_reg);
-}
-
-#define PCI_FIND_CAP_TTL 48
-
-static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
- u32 pos, int cap, int *ttl)
-{
- u32 id;
-
- while ((*ttl)--) {
- spear_dbi_read_reg(config, pos, 1, &pos);
- if (pos < 0x40)
- break;
- pos &= ~3;
- spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos += PCI_CAP_LIST_NEXT;
- }
- return 0;
-}
-
-static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
- u32 pos, int cap)
-{
- int ttl = PCI_FIND_CAP_TTL;
-
- return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
-}
-
-static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
- u8 hdr_type)
-{
- u32 status;
-
- spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
- if (!(status & PCI_STATUS_CAP_LIST))
- return 0;
-
- switch (hdr_type) {
- case PCI_HEADER_TYPE_NORMAL:
- case PCI_HEADER_TYPE_BRIDGE:
- return PCI_CAPABILITY_LIST;
- case PCI_HEADER_TYPE_CARDBUS:
- return PCI_CB_CAPABILITY_LIST;
- default:
- return 0;
- }
-
- return 0;
-}
-
-/*
- * Tell if a device supports a given PCI capability.
- * Returns the address of the requested capability structure within the
- * device's PCI configuration space or 0 in case the device does not
- * support it. Possible values for @cap:
- *
- * %PCI_CAP_ID_PM Power Management
- * %PCI_CAP_ID_AGP Accelerated Graphics Port
- * %PCI_CAP_ID_VPD Vital Product Data
- * %PCI_CAP_ID_SLOTID Slot Identification
- * %PCI_CAP_ID_MSI Message Signalled Interrupts
- * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
- * %PCI_CAP_ID_PCIX PCI-X
- * %PCI_CAP_ID_EXP PCI Express
- */
-static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
- int cap)
-{
- u32 pos;
- u32 hdr_type;
-
- spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
-
- pos = pci_find_own_cap_start(config, hdr_type);
- if (pos)
- pos = pci_find_own_next_cap(config, pos, cap);
-
- return pos;
-}
-
-static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
-{
- return 0;
-}
-
-/*
- * configfs interfaces show/store functions
- */
-
-static struct pcie_gadget_target *to_target(struct config_item *item)
-{
- return item ?
- container_of(to_configfs_subsystem(to_config_group(item)),
- struct pcie_gadget_target, subsys) : NULL;
-}
-
-static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
- return sprintf(buf, "UP");
- else
- return sprintf(buf, "DOWN");
-}
-
-static ssize_t pcie_gadget_link_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- if (sysfs_streq(buf, "UP"))
- writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
- &app_reg->app_ctrl_0);
- else if (sysfs_streq(buf, "DOWN"))
- writel(readl(&app_reg->app_ctrl_0)
- & ~(1 << APP_LTSSM_ENABLE_ID),
- &app_reg->app_ctrl_0);
- else
- return -EINVAL;
- return count;
-}
-
-static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
-{
- return sprintf(buf, "%s", to_target(item)->int_type);
-}
-
-static ssize_t pcie_gadget_int_type_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- u32 cap, vec, flags;
- ulong vector;
-
- if (sysfs_streq(buf, "INTA"))
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
-
- else if (sysfs_streq(buf, "MSI")) {
- vector = config->requested_msi;
- vec = 0;
- while (vector > 1) {
- vector /= 2;
- vec++;
- }
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
- cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
- spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
- flags &= ~PCI_MSI_FLAGS_QMASK;
- flags |= vec << 1;
- spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
- } else
- return -EINVAL;
-
- strcpy(config->int_type, buf);
-
- return count;
-}
-
-static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
- u32 cap, vec, flags;
- ulong vector;
-
- if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
- != (1 << CFG_MSI_EN_ID))
- vector = 0;
- else {
- cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
- spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
- flags &= ~PCI_MSI_FLAGS_QSIZE;
- vec = flags >> 4;
- vector = 1;
- while (vec--)
- vector *= 2;
- }
- config->configured_msi = vector;
-
- return sprintf(buf, "%lu", vector);
-}
-
-static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
- const char *buf, size_t count)
-{
- int ret;
-
- ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
- if (ret)
- return ret;
-
- if (config->requested_msi > 32)
- config->requested_msi = 32;
-
- return count;
-}
-
-static ssize_t pcie_gadget_inta_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
- ulong en;
- int ret;
-
- ret = kstrtoul(buf, 0, &en);
- if (ret)
- return ret;
-
- if (en)
- writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
- &app_reg->app_ctrl_0);
- else
- writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
- &app_reg->app_ctrl_0);
-
- return count;
-}
-
-static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong vector;
- u32 ven_msi;
- int ret;
-
- ret = kstrtoul(buf, 0, &vector);
- if (ret)
- return ret;
-
- if (!config->configured_msi)
- return -EINVAL;
-
- if (vector >= config->configured_msi)
- return -EINVAL;
-
- ven_msi = readl(&app_reg->ven_msi_1);
- ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
- ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
- ven_msi &= ~VEN_MSI_TC_MASK;
- ven_msi |= 0 << VEN_MSI_TC_ID;
- ven_msi &= ~VEN_MSI_VECTOR_MASK;
- ven_msi |= vector << VEN_MSI_VECTOR_ID;
-
- /* generating interrupt for msi vector */
- ven_msi |= VEN_MSI_REQ_EN;
- writel(ven_msi, &app_reg->ven_msi_1);
- udelay(1);
- ven_msi &= ~VEN_MSI_REQ_EN;
- writel(ven_msi, &app_reg->ven_msi_1);
-
- return count;
-}
-
-static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
-{
- u32 id;
-
- spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);
-
- return sprintf(buf, "%x", id);
-}
-
-static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong id;
- int ret;
-
- ret = kstrtoul(buf, 0, &id);
- if (ret)
- return ret;
-
- spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);
-
- return count;
-}
-
-static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
-{
- u32 id;
-
- spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);
-
- return sprintf(buf, "%x", id);
-}
-
-static ssize_t pcie_gadget_device_id_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong id;
- int ret;
-
- ret = kstrtoul(buf, 0, &id);
- if (ret)
- return ret;
-
- spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
-{
- return sprintf(buf, "%lx", to_target(item)->bar0_size);
-}
-
-static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong size;
- u32 pos, pos1;
- u32 no_of_bit = 0;
- int ret;
-
- ret = kstrtoul(buf, 0, &size);
- if (ret)
- return ret;
-
- /* min bar size is 256 */
- if (size <= 0x100)
- size = 0x100;
- /* max bar size is 1MB*/
- else if (size >= 0x100000)
- size = 0x100000;
- else {
- pos = 0;
- pos1 = 0;
- while (pos < 21) {
- pos = find_next_bit((ulong *)&size, 21, pos);
- if (pos != 21)
- pos1 = pos + 1;
- pos++;
- no_of_bit++;
- }
- if (no_of_bit == 2)
- pos1--;
-
- size = 1 << pos1;
- }
- config->bar0_size = size;
- spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
- char *buf)
-{
- struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
-
- u32 address = readl(&app_reg->pim0_mem_addr_start);
-
- return sprintf(buf, "%x", address);
-}
-
-static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
- ulong address;
- int ret;
-
- ret = kstrtoul(buf, 0, &address);
- if (ret)
- return ret;
-
- address &= ~(config->bar0_size - 1);
- if (config->va_bar0_address)
- iounmap(config->va_bar0_address);
- config->va_bar0_address = ioremap(address, config->bar0_size);
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- writel(address, &app_reg->pim0_mem_addr_start);
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
- char *buf)
-{
- return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
-}
-
-static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
- const char *buf, size_t count)
-{
- ulong offset;
- int ret;
-
- ret = kstrtoul(buf, 0, &offset);
- if (ret)
- return ret;
-
- if (offset % 4)
- return -EINVAL;
-
- to_target(item)->bar0_rw_offset = offset;
-
- return count;
-}
-
-static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong data;
-
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
-
- return sprintf(buf, "%lx", data);
-}
-
-static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
- const char *buf, size_t count)
-{
- struct spear_pcie_gadget_config *config = to_target(item)
- ulong data;
- int ret;
-
- ret = kstrtoul(buf, 0, &data);
- if (ret)
- return ret;
-
- if (!config->va_bar0_address)
- return -ENOMEM;
-
- writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
-
- return count;
-}
-
-CONFIGFS_ATTR(pcie_gadget_, link);
-CONFIGFS_ATTR(pcie_gadget_, int_type);
-CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
-CONFIGFS_ATTR_WO(pcie_gadget_, inta);
-CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
-CONFIGFS_ATTR(pcie_gadget_, vendor_id);
-CONFIGFS_ATTR(pcie_gadget_, device_id);
-CONFIGFS_ATTR(pcie_gadget_, bar0_size);
-CONFIGFS_ATTR(pcie_gadget_, bar0_address);
-CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
-CONFIGFS_ATTR(pcie_gadget_, bar0_data);
-
-static struct configfs_attribute *pcie_gadget_target_attrs[] = {
- &pcie_gadget_attr_link,
- &pcie_gadget_attr_int_type,
- &pcie_gadget_attr_no_of_msi,
- &pcie_gadget_attr_inta,
- &pcie_gadget_attr_send_msi,
- &pcie_gadget_attr_vendor_id,
- &pcie_gadget_attr_device_id,
- &pcie_gadget_attr_bar0_size,
- &pcie_gadget_attr_bar0_address,
- &pcie_gadget_attr_bar0_rw_offset,
- &pcie_gadget_attr_bar0_data,
- NULL,
-};
-
-static struct config_item_type pcie_gadget_target_type = {
- .ct_attrs = pcie_gadget_target_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
-{
- struct pcie_app_reg __iomem *app_reg = config->va_app_base;
-
- /*setup registers for outbound translation */
-
- writel(config->base, &app_reg->in0_mem_addr_start);
- writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
- &app_reg->in0_mem_addr_limit);
- writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
- writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
- &app_reg->in1_mem_addr_limit);
- writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
- writel(app_reg->in_io_addr_start + IN_IO_SIZE,
- &app_reg->in_io_addr_limit);
- writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
- writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
- &app_reg->in_cfg0_addr_limit);
- writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
- writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
- &app_reg->in_cfg1_addr_limit);
- writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
- writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
- &app_reg->in_msg_addr_limit);
-
- writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
- writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
- writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
-
- /*setup registers for inbound translation */
-
- /* Keep AORAM mapped at BAR0 as default */
- config->bar0_size = INBOUND_ADDR_MASK + 1;
- spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
- spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
- config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
- config->bar0_size);
-
- writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
- writel(0, &app_reg->pim1_mem_addr_start);
- writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
-
- writel(0x0, &app_reg->pim_io_addr_start);
- writel(0x0, &app_reg->pim_io_addr_start);
- writel(0x0, &app_reg->pim_rom_addr_start);
-
- writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
- | ((u32)1 << REG_TRANSLATION_ENABLE),
- &app_reg->app_ctrl_0);
- /* disable all rx interrupts */
- writel(0, &app_reg->int_mask);
-
- /* Select INTA as default*/
- spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
-}
-
-static int spear_pcie_gadget_probe(struct platform_device *pdev)
-{
- struct resource *res0, *res1;
- unsigned int status = 0;
- int irq;
- struct clk *clk;
- static struct pcie_gadget_target *target;
- struct spear_pcie_gadget_config *config;
- struct config_item *cg_item;
- struct configfs_subsystem *subsys;
-
- target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
- if (!target) {
- dev_err(&pdev->dev, "out of memory\n");
- return -ENOMEM;
- }
-
- cg_item = &target->subsys.su_group.cg_item;
- sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
- cg_item->ci_type = &pcie_gadget_target_type;
- config = &target->config;
-
- /* get resource for application registers*/
- res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
- if (IS_ERR(config->va_app_base)) {
- dev_err(&pdev->dev, "ioremap fail\n");
- return PTR_ERR(config->va_app_base);
- }
-
- /* get resource for dbi registers*/
- res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- config->base = (void __iomem *)res1->start;
-
- config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
- if (IS_ERR(config->va_dbi_base)) {
- dev_err(&pdev->dev, "ioremap fail\n");
- return PTR_ERR(config->va_dbi_base);
- }
-
- platform_set_drvdata(pdev, target);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no update irq?\n");
- return irq;
- }
-
- status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
- 0, pdev->name, NULL);
- if (status) {
- dev_err(&pdev->dev,
- "pcie gadget interrupt IRQ%d already claimed\n", irq);
- return status;
- }
-
- /* Register configfs hooks */
- subsys = &target->subsys;
- config_group_init(&subsys->su_group);
- mutex_init(&subsys->su_mutex);
- status = configfs_register_subsystem(subsys);
- if (status)
- return status;
-
- /*
- * init basic pcie application registers
- * do not enable clock if it is PCIE0.Ideally , all controller should
- * have been independent from others with respect to clock. But PCIE1
- * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
- */
- if (pdev->id == 1) {
- /*
- * Ideally CFG Clock should have been also enabled here. But
- * it is done currently during board init routne
- */
- clk = clk_get_sys("pcie1", NULL);
- if (IS_ERR(clk)) {
- pr_err("%s:couldn't get clk for pcie1\n", __func__);
- return PTR_ERR(clk);
- }
- status = clk_enable(clk);
- if (status) {
- pr_err("%s:couldn't enable clk for pcie1\n", __func__);
- return status;
- }
- } else if (pdev->id == 2) {
- /*
- * Ideally CFG Clock should have been also enabled here. But
- * it is done currently during board init routne
- */
- clk = clk_get_sys("pcie2", NULL);
- if (IS_ERR(clk)) {
- pr_err("%s:couldn't get clk for pcie2\n", __func__);
- return PTR_ERR(clk);
- }
- status = clk_enable(clk);
- if (status) {
- pr_err("%s:couldn't enable clk for pcie2\n", __func__);
- return status;
- }
- }
- spear13xx_pcie_device_init(config);
-
- return 0;
-}
-
-static int spear_pcie_gadget_remove(struct platform_device *pdev)
-{
- static struct pcie_gadget_target *target;
-
- target = platform_get_drvdata(pdev);
-
- configfs_unregister_subsystem(&target->subsys);
-
- return 0;
-}
-
-static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
-{
-}
-
-static struct platform_driver spear_pcie_gadget_driver = {
- .probe = spear_pcie_gadget_probe,
- .remove = spear_pcie_gadget_remove,
- .shutdown = spear_pcie_gadget_shutdown,
- .driver = {
- .name = "pcie-gadget-spear",
- .bus = &platform_bus_type
- },
-};
-
-module_platform_driver(spear_pcie_gadget_driver);
-
-MODULE_ALIAS("platform:pcie-gadget-spear");
-MODULE_AUTHOR("Pratyush Anand");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 8840299420e0..5e6be1527571 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
}
if (page) {
- vmballoon_mark_page_offline(page, ctl->page_size);
/* Success. Add the page to the list and continue. */
list_add(&page->lru, &ctl->pages);
continue;
@@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list,
list_for_each_entry_safe(page, tmp, page_list, lru) {
list_del(&page->lru);
- vmballoon_mark_page_online(page, page_size);
__free_pages(page, vmballoon_page_order(page_size));
}
@@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
enum vmballoon_page_size_type page_size)
{
unsigned long flags;
+ struct page *page;
if (page_size == VMW_BALLOON_4K_PAGE) {
balloon_page_list_enqueue(&b->b_dev_info, pages);
@@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
* for the balloon compaction mechanism.
*/
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+
+ list_for_each_entry(page, pages, lru) {
+ vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
+ }
+
list_splice_init(pages, &b->huge_pages);
__count_vm_events(BALLOON_INFLATE, *n_pages *
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
@@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
/* 2MB pages */
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
+ vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
+
list_move(&page->lru, pages);
if (++i == n_req_pages)
break;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index bad89b6e0802..345addd9306d 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
if (entry->run_delayed) {
- schedule_work(&entry->work);
+ if (!schedule_work(&entry->work))
+ vmci_resource_put(resource);
} else {
entry->notify_cb(entry->client_data);
vmci_resource_put(resource);
@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
vmci_resource_get(&dbell->resource);
- schedule_work(&dbell->work);
+ if (!schedule_work(&dbell->work))
+ vmci_resource_put(&dbell->resource);
} else {
dbell->notify_cb(dbell->client_data);
}
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index f257d3812110..11835969e982 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -19,11 +19,150 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/highmem.h>
+
+#include <uapi/misc/xilinx_sdfec.h>
#define DEV_NAME_LEN 12
-static struct idr dev_idr;
-static struct mutex dev_idr_lock;
+static DEFINE_IDA(dev_nrs);
+
+/* Xilinx SDFEC Register Map */
+/* CODE_WRI_PROTECT Register */
+#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
+
+/* ACTIVE Register */
+#define XSDFEC_ACTIVE_ADDR (0x8)
+#define XSDFEC_IS_ACTIVITY_SET (0x1)
+
+/* AXIS_WIDTH Register */
+#define XSDFEC_AXIS_WIDTH_ADDR (0xC)
+#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
+#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
+#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
+#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
+
+/* AXIS_ENABLE Register */
+#define XSDFEC_AXIS_ENABLE_ADDR (0x10)
+#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
+#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
+#define XSDFEC_AXIS_ENABLE_MASK \
+ (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
+
+/* FEC_CODE Register */
+#define XSDFEC_FEC_CODE_ADDR (0x14)
+
+/* ORDER Register Map */
+#define XSDFEC_ORDER_ADDR (0x18)
+
+/* Interrupt Status Register */
+#define XSDFEC_ISR_ADDR (0x1C)
+/* Interrupt Status Register Bit Mask */
+#define XSDFEC_ISR_MASK (0x3F)
+
+/* Write Only - Interrupt Enable Register */
+#define XSDFEC_IER_ADDR (0x20)
+/* Write Only - Interrupt Disable Register */
+#define XSDFEC_IDR_ADDR (0x24)
+/* Read Only - Interrupt Mask Register */
+#define XSDFEC_IMR_ADDR (0x28)
+
+/* ECC Interrupt Status Register */
+#define XSDFEC_ECC_ISR_ADDR (0x2C)
+/* Single Bit Errors */
+#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
+/* PL Initialize Single Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
+/* Multi Bit Errors */
+#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
+/* PL Initialize Multi Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
+/* Multi Bit Error to Event Shift */
+#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
+/* PL Initialize Multi Bit Error to Event Shift */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
+/* ECC Interrupt Status Bit Mask */
+#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status PL Initialize Bit Mask */
+#define XSDFEC_PL_INIT_ECC_ISR_MASK \
+ (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status All Bit Mask */
+#define XSDFEC_ALL_ECC_ISR_MASK \
+ (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
+/* ECC Interrupt Status Single Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_SBE_MASK \
+ (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
+/* ECC Interrupt Status Multi Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_MBE_MASK \
+ (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+
+/* Write Only - ECC Interrupt Enable Register */
+#define XSDFEC_ECC_IER_ADDR (0x30)
+/* Write Only - ECC Interrupt Disable Register */
+#define XSDFEC_ECC_IDR_ADDR (0x34)
+/* Read Only - ECC Interrupt Mask Register */
+#define XSDFEC_ECC_IMR_ADDR (0x38)
+
+/* BYPASS Register */
+#define XSDFEC_BYPASS_ADDR (0x3C)
+
+/* Turbo Code Register */
+#define XSDFEC_TURBO_ADDR (0x100)
+#define XSDFEC_TURBO_SCALE_MASK (0xFFF)
+#define XSDFEC_TURBO_SCALE_BIT_POS (8)
+#define XSDFEC_TURBO_SCALE_MAX (15)
+
+/* REG0 Register */
+#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
+#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
+#define XSDFEC_REG0_N_MIN (4)
+#define XSDFEC_REG0_N_MAX (32768)
+#define XSDFEC_REG0_N_MUL_P (256)
+#define XSDFEC_REG0_N_LSB (0)
+#define XSDFEC_REG0_K_MIN (2)
+#define XSDFEC_REG0_K_MAX (32766)
+#define XSDFEC_REG0_K_MUL_P (256)
+#define XSDFEC_REG0_K_LSB (16)
+
+/* REG1 Register */
+#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
+#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
+#define XSDFEC_REG1_PSIZE_MIN (2)
+#define XSDFEC_REG1_PSIZE_MAX (512)
+#define XSDFEC_REG1_NO_PACKING_MASK (0x400)
+#define XSDFEC_REG1_NO_PACKING_LSB (10)
+#define XSDFEC_REG1_NM_MASK (0xFF800)
+#define XSDFEC_REG1_NM_LSB (11)
+#define XSDFEC_REG1_BYPASS_MASK (0x100000)
+
+/* REG2 Register */
+#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
+#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
+#define XSDFEC_REG2_NLAYERS_MIN (1)
+#define XSDFEC_REG2_NLAYERS_MAX (256)
+#define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
+#define XSDFEC_REG2_NMQC_LSB (9)
+#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
+#define XSDFEC_REG2_NORM_TYPE_LSB (20)
+#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
+#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
+#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
+#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
+#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
+#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
+
+/* REG3 Register */
+#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
+#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
+#define XSDFEC_REG3_LA_OFF_LSB (8)
+#define XSDFEC_REG3_QC_OFF_LSB (16)
+
+#define XSDFEC_LDPC_REG_JUMP (0x10)
+#define XSDFEC_REG_WIDTH_JUMP (4)
+
+/* The maximum number of pinned pages */
+#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
/**
* struct xsdfec_clks - For managing SD-FEC clocks
@@ -49,31 +188,1043 @@ struct xsdfec_clks {
/**
* struct xsdfec_dev - Driver data for SDFEC
- * @regs: device physical base address
- * @dev: pointer to device struct
* @miscdev: Misc device handle
- * @error_data_lock: Error counter and states spinlock
* @clks: Clocks managed by the SDFEC driver
+ * @waitq: Driver wait queue
+ * @config: Configuration of the SDFEC device
* @dev_name: Device name
+ * @flags: spinlock flags
+ * @regs: device physical base address
+ * @dev: pointer to device struct
+ * @state: State of the SDFEC device
+ * @error_data_lock: Error counter and states spinlock
* @dev_id: Device ID
+ * @isr_err_count: Count of ISR errors
+ * @cecc_count: Count of Correctable ECC errors (SBE)
+ * @uecc_count: Count of Uncorrectable ECC errors (MBE)
+ * @irq: IRQ number
+ * @state_updated: indicates State updated by interrupt handler
+ * @stats_updated: indicates Stats updated by interrupt handler
+ * @intr_enabled: indicates IRQ enabled
*
* This structure contains necessary state for SDFEC driver to operate
*/
struct xsdfec_dev {
+ struct miscdevice miscdev;
+ struct xsdfec_clks clks;
+ wait_queue_head_t waitq;
+ struct xsdfec_config config;
+ char dev_name[DEV_NAME_LEN];
+ unsigned long flags;
void __iomem *regs;
struct device *dev;
- struct miscdevice miscdev;
+ enum xsdfec_state state;
/* Spinlock to protect state_updated and stats_updated */
spinlock_t error_data_lock;
- struct xsdfec_clks clks;
- char dev_name[DEV_NAME_LEN];
int dev_id;
+ u32 isr_err_count;
+ u32 cecc_count;
+ u32 uecc_count;
+ int irq;
+ bool state_updated;
+ bool stats_updated;
+ bool intr_enabled;
};
+static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
+ u32 value)
+{
+ dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
+ iowrite32(value, xsdfec->regs + addr);
+}
+
+static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
+{
+ u32 rval;
+
+ rval = ioread32(xsdfec->regs + addr);
+ dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
+ return rval;
+}
+
+static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
+ u32 reg_offset, u32 bit_num,
+ char *config_value)
+{
+ u32 reg_val;
+ u32 bit_mask = 1 << bit_num;
+
+ reg_val = xsdfec_regread(xsdfec, reg_offset);
+ *config_value = (reg_val & bit_mask) > 0;
+}
+
+static void update_config_from_hw(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ bool sdfec_started;
+
+ /* Update the Order */
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
+ xsdfec->config.order = reg_value;
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
+ 0, /* Bit Number, maybe change to mask */
+ &xsdfec->config.bypass);
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
+ 0, /* Bit Number */
+ &xsdfec->config.code_wr_protect);
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ xsdfec->config.irq.enable_ecc_isr =
+ (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
+ if (sdfec_started)
+ xsdfec->state = XSDFEC_STARTED;
+ else
+ xsdfec->state = XSDFEC_STOPPED;
+}
+
+static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_status status;
+ int err;
+
+ memset(&status, 0, sizeof(status));
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ status.state = xsdfec->state;
+ xsdfec->state_updated = false;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
+ XSDFEC_IS_ACTIVITY_SET);
+
+ err = copy_to_user(arg, &status, sizeof(status));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+
+ err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if (mask_read & XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling irq with IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disabling irq with IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC enabling ECC irq with ECC IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_ECC_ISR_MASK) ||
+ ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_PL_INIT_ECC_ISR_MASK))) {
+ dev_dbg(xsdfec->dev,
+ "SDFEC disable ECC irq with ECC IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_irq irq;
+ int err;
+ int isr_err;
+ int ecc_err;
+
+ err = copy_from_user(&irq, arg, sizeof(irq));
+ if (err)
+ return -EFAULT;
+
+ /* Setup tlast related IRQ */
+ isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
+ if (!isr_err)
+ xsdfec->config.irq.enable_isr = irq.enable_isr;
+
+ /* Setup ECC related IRQ */
+ ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
+ if (!ecc_err)
+ xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
+
+ if (isr_err < 0 || ecc_err < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_turbo turbo;
+ int err;
+ u32 turbo_write;
+
+ err = copy_from_user(&turbo, arg, sizeof(turbo));
+ if (err)
+ return -EFAULT;
+
+ if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
+ return -EINVAL;
+
+ if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
+ return -EINVAL;
+
+ /* Check to see what device tree says about the FEC codes */
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE)
+ return -EIO;
+
+ turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
+ << XSDFEC_TURBO_SCALE_BIT_POS) |
+ turbo.alg;
+ xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
+ return err;
+}
+
+static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ u32 reg_value;
+ struct xsdfec_turbo turbo_params;
+ int err;
+
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE)
+ return -EIO;
+
+ memset(&turbo_params, 0, sizeof(turbo_params));
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
+
+ turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
+ XSDFEC_TURBO_SCALE_BIT_POS;
+ turbo_params.alg = reg_value & 0x1;
+
+ err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
+ u32 offset)
+{
+ u32 wdata;
+
+ if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
+ (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
+ dev_dbg(xsdfec->dev, "N value is not in range");
+ return -EINVAL;
+ }
+ n <<= XSDFEC_REG0_N_LSB;
+
+ if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
+ (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
+ dev_dbg(xsdfec->dev, "K value is not in range");
+ return -EINVAL;
+ }
+ k = k << XSDFEC_REG0_K_LSB;
+ wdata = k | n;
+
+ if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
+ u32 no_packing, u32 nm, u32 offset)
+{
+ u32 wdata;
+
+ if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
+ dev_dbg(xsdfec->dev, "Psize is not in range");
+ return -EINVAL;
+ }
+
+ if (no_packing != 0 && no_packing != 1)
+ dev_dbg(xsdfec->dev, "No-packing bit register invalid");
+ no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
+ XSDFEC_REG1_NO_PACKING_MASK);
+
+ if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
+ dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
+ nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
+
+ wdata = nm | no_packing | psize;
+ if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
+ u32 norm_type, u32 special_qc, u32 no_final_parity,
+ u32 max_schedule, u32 offset)
+{
+ u32 wdata;
+
+ if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
+ nlayers > XSDFEC_REG2_NLAYERS_MAX) {
+ dev_dbg(xsdfec->dev, "Nlayers is not in range");
+ return -EINVAL;
+ }
+
+ if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
+ dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
+ nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
+
+ if (norm_type > 1)
+ dev_dbg(xsdfec->dev, "Norm type is invalid");
+ norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
+ XSDFEC_REG2_NORM_TYPE_MASK);
+ if (special_qc > 1)
+ dev_dbg(xsdfec->dev, "Special QC in invalid");
+ special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
+ XSDFEC_REG2_SPECIAL_QC_MASK);
+
+ if (no_final_parity > 1)
+ dev_dbg(xsdfec->dev, "No final parity check invalid");
+ no_final_parity =
+ ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
+ XSDFEC_REG2_NO_FINAL_PARITY_MASK);
+ if (max_schedule &
+ ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
+ dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
+ max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
+ XSDFEC_REG2_MAX_SCHEDULE_MASK);
+
+ wdata = (max_schedule | no_final_parity | special_qc | norm_type |
+ nmqc | nlayers);
+
+ if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
+ u16 qc_off, u32 offset)
+{
+ u32 wdata;
+
+ wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
+ (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
+ if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
+ dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
+ u32 *src_ptr, u32 len, const u32 base_addr,
+ const u32 depth)
+{
+ u32 reg = 0;
+ u32 res;
+ u32 n, i;
+ u32 *addr = NULL;
+ struct page *page[MAX_NUM_PAGES];
+
+ /*
+ * Writes that go beyond the length of
+ * Shared Scale(SC) table should fail
+ */
+ if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
+ len > depth / XSDFEC_REG_WIDTH_JUMP ||
+ offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
+ dev_dbg(xsdfec->dev, "Write exceeds SC table length");
+ return -EINVAL;
+ }
+
+ n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
+ if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
+ n += 1;
+
+ res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
+ if (res < n) {
+ for (i = 0; i < res; i++)
+ put_page(page[i]);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n; i++) {
+ addr = kmap(page[i]);
+ do {
+ xsdfec_regwrite(xsdfec,
+ base_addr + ((offset + reg) *
+ XSDFEC_REG_WIDTH_JUMP),
+ addr[reg]);
+ reg++;
+ } while ((reg < len) &&
+ ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
+ put_page(page[i]);
+ }
+ return reg;
+}
+
+static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_ldpc_params *ldpc;
+ int ret, n;
+
+ ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
+ if (!ldpc)
+ return -ENOMEM;
+
+ if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ if (xsdfec->config.code_wr_protect) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Write Reg 0 */
+ ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 1 */
+ ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 2 */
+ ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
+ ldpc->norm_type, ldpc->special_qc,
+ ldpc->no_final_parity, ldpc->max_schedule,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 3 */
+ ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
+ ldpc->qc_off, ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Shared Codes */
+ n = ldpc->nlayers / 4;
+ if (ldpc->nlayers % 4)
+ n++;
+
+ ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
+ XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
+ XSDFEC_SC_TABLE_DEPTH);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
+ ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
+ XSDFEC_LA_TABLE_DEPTH);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
+ ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
+ XSDFEC_QC_TABLE_DEPTH);
+ if (ret > 0)
+ ret = 0;
+err_out:
+ kfree(ldpc);
+ return ret;
+}
+
+static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ bool order_invalid;
+ enum xsdfec_order order;
+ int err;
+
+ err = get_user(order, (enum xsdfec_order *)arg);
+ if (err)
+ return -EFAULT;
+
+ order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
+ (order != XSDFEC_OUT_OF_ORDER);
+ if (order_invalid)
+ return -EINVAL;
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED)
+ return -EIO;
+
+ xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
+
+ xsdfec->config.order = order;
+
+ return 0;
+}
+
+static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
+{
+ bool bypass;
+ int err;
+
+ err = get_user(bypass, arg);
+ if (err)
+ return -EFAULT;
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED)
+ return -EIO;
+
+ if (bypass)
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
+ else
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
+
+ xsdfec->config.bypass = bypass;
+
+ return 0;
+}
+
+static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
+{
+ u32 reg_value;
+ bool is_active;
+ int err;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
+ /* using a double ! operator instead of casting */
+ is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
+ err = put_user(is_active, arg);
+ if (err)
+ return -EFAULT;
+
+ return err;
+}
+
+static u32
+xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
+{
+ u32 axis_width_field = 0;
+
+ switch (axis_width_cfg) {
+ case XSDFEC_1x128b:
+ axis_width_field = 0;
+ break;
+ case XSDFEC_2x128b:
+ axis_width_field = 1;
+ break;
+ case XSDFEC_4x128b:
+ axis_width_field = 2;
+ break;
+ }
+
+ return axis_width_field;
+}
+
+static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
+ axis_word_inc_cfg)
+{
+ u32 axis_words_field = 0;
+
+ if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
+ axis_word_inc_cfg == XSDFEC_IN_BLOCK)
+ axis_words_field = 0;
+ else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
+ axis_words_field = 1;
+
+ return axis_words_field;
+}
+
+static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ u32 dout_words_field;
+ u32 dout_width_field;
+ u32 din_words_field;
+ u32 din_width_field;
+ struct xsdfec_config *config = &xsdfec->config;
+
+ /* translate config info to register values */
+ dout_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
+ dout_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->dout_width);
+ din_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->din_word_include);
+ din_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->din_width);
+
+ reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
+ reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
+ reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
+ reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
+
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
+
+ return 0;
+}
+
+static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
+{
+ return 0;
+}
+
+static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
+{
+ return 0;
+}
+
+static int xsdfec_start(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
+ regread &= 0x1;
+ if (regread != xsdfec->config.code) {
+ dev_dbg(xsdfec->dev,
+ "%s SDFEC HW code does not match driver code, reg %d, code %d",
+ __func__, regread, xsdfec->config.code);
+ return -EINVAL;
+ }
+
+ /* Set AXIS enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
+ XSDFEC_AXIS_ENABLE_MASK);
+ /* Done */
+ xsdfec->state = XSDFEC_STARTED;
+ return 0;
+}
+
+static int xsdfec_stop(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ if (xsdfec->state != XSDFEC_STARTED)
+ dev_dbg(xsdfec->dev, "Device not started correctly");
+ /* Disable AXIS_ENABLE Input interfaces only */
+ regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
+ /* Stop */
+ xsdfec->state = XSDFEC_STOPPED;
+ return 0;
+}
+
+static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
+{
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ xsdfec->isr_err_count = 0;
+ xsdfec->uecc_count = 0;
+ xsdfec->cecc_count = 0;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ return 0;
+}
+
+static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+ struct xsdfec_stats user_stats;
+
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ user_stats.isr_err_count = xsdfec->isr_err_count;
+ user_stats.cecc_count = xsdfec->cecc_count;
+ user_stats.uecc_count = xsdfec->uecc_count;
+ xsdfec->stats_updated = false;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ err = copy_to_user(arg, &user_stats, sizeof(user_stats));
+ if (err)
+ err = -EFAULT;
+
+ return err;
+}
+
+static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
+{
+ /* Ensure registers are aligned with core configuration */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+ xsdfec_cfg_axi_streams(xsdfec);
+ update_config_from_hw(xsdfec);
+
+ return 0;
+}
+
+static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ struct xsdfec_dev *xsdfec;
+ void __user *arg = NULL;
+ int rval = -EINVAL;
+
+ xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
+
+ /* In failed state allow only reset and get status IOCTLs */
+ if (xsdfec->state == XSDFEC_NEEDS_RESET &&
+ (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
+ cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
+ return -EPERM;
+ }
+
+ if (_IOC_TYPE(cmd) != XSDFEC_MAGIC)
+ return -ENOTTY;
+
+ /* check if ioctl argument is present and valid */
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ arg = (void __user *)data;
+ if (!arg)
+ return rval;
+ }
+
+ switch (cmd) {
+ case XSDFEC_START_DEV:
+ rval = xsdfec_start(xsdfec);
+ break;
+ case XSDFEC_STOP_DEV:
+ rval = xsdfec_stop(xsdfec);
+ break;
+ case XSDFEC_CLEAR_STATS:
+ rval = xsdfec_clear_stats(xsdfec);
+ break;
+ case XSDFEC_GET_STATS:
+ rval = xsdfec_get_stats(xsdfec, arg);
+ break;
+ case XSDFEC_GET_STATUS:
+ rval = xsdfec_get_status(xsdfec, arg);
+ break;
+ case XSDFEC_GET_CONFIG:
+ rval = xsdfec_get_config(xsdfec, arg);
+ break;
+ case XSDFEC_SET_DEFAULT_CONFIG:
+ rval = xsdfec_set_default_config(xsdfec);
+ break;
+ case XSDFEC_SET_IRQ:
+ rval = xsdfec_set_irq(xsdfec, arg);
+ break;
+ case XSDFEC_SET_TURBO:
+ rval = xsdfec_set_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_GET_TURBO:
+ rval = xsdfec_get_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_ADD_LDPC_CODE_PARAMS:
+ rval = xsdfec_add_ldpc(xsdfec, arg);
+ break;
+ case XSDFEC_SET_ORDER:
+ rval = xsdfec_set_order(xsdfec, arg);
+ break;
+ case XSDFEC_SET_BYPASS:
+ rval = xsdfec_set_bypass(xsdfec, arg);
+ break;
+ case XSDFEC_IS_ACTIVE:
+ rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
+ break;
+ default:
+ /* Should not get here */
+ break;
+ }
+ return rval;
+}
+
+#ifdef CONFIG_COMPAT
+static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long data)
+{
+ return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data));
+}
+#endif
+
+static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
+
+ if (!xsdfec)
+ return POLLNVAL | POLLHUP;
+
+ poll_wait(file, &xsdfec->waitq, wait);
+
+ /* XSDFEC ISR detected an error */
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ if (xsdfec->state_updated)
+ mask |= POLLIN | POLLPRI;
+
+ if (xsdfec->stats_updated)
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+
+ return mask;
+}
+
static const struct file_operations xsdfec_fops = {
.owner = THIS_MODULE,
+ .open = xsdfec_dev_open,
+ .release = xsdfec_dev_release,
+ .unlocked_ioctl = xsdfec_dev_ioctl,
+ .poll = xsdfec_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = xsdfec_dev_compat_ioctl,
+#endif
};
+static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
+{
+ struct device *dev = xsdfec->dev;
+ struct device_node *node = dev->of_node;
+ int rval;
+ const char *fec_code;
+ u32 din_width;
+ u32 din_word_include;
+ u32 dout_width;
+ u32 dout_word_include;
+
+ rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
+ if (rval < 0)
+ return rval;
+
+ if (!strcasecmp(fec_code, "ldpc"))
+ xsdfec->config.code = XSDFEC_LDPC_CODE;
+ else if (!strcasecmp(fec_code, "turbo"))
+ xsdfec->config.code = XSDFEC_TURBO_CODE;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
+ &din_word_include);
+ if (rval < 0)
+ return rval;
+
+ if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
+ xsdfec->config.din_word_include = din_word_include;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
+ if (rval < 0)
+ return rval;
+
+ switch (din_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.din_width = din_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
+ &dout_word_include);
+ if (rval < 0)
+ return rval;
+
+ if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
+ xsdfec->config.dout_word_include = dout_word_include;
+ else
+ return -EINVAL;
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
+ if (rval < 0)
+ return rval;
+
+ switch (dout_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.dout_width = dout_width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write LDPC to CODE Register */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+
+ xsdfec_cfg_axi_streams(xsdfec);
+
+ return 0;
+}
+
+static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
+{
+ struct xsdfec_dev *xsdfec = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 ecc_err;
+ u32 isr_err;
+ u32 uecc_count;
+ u32 cecc_count;
+ u32 isr_err_count;
+ u32 aecc_count;
+ u32 tmp;
+
+ WARN_ON(xsdfec->irq != irq);
+
+ /* Mask Interrupts */
+ xsdfec_isr_enable(xsdfec, false);
+ xsdfec_ecc_isr_enable(xsdfec, false);
+ /* Read ISR */
+ ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
+ isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
+ /* Clear the interrupts */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
+ xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
+
+ tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
+ /* Count uncorrectable 2-bit errors */
+ uecc_count = hweight32(tmp);
+ /* Count all ECC errors */
+ aecc_count = hweight32(ecc_err);
+ /* Number of correctable 1-bit ECC error */
+ cecc_count = aecc_count - 2 * uecc_count;
+ /* Count ISR errors */
+ isr_err_count = hweight32(isr_err);
+ dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
+ uecc_count, aecc_count, cecc_count, isr_err_count);
+ dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
+ xsdfec->cecc_count, xsdfec->isr_err_count);
+
+ spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
+ /* Add new errors to a 2-bits counter */
+ if (uecc_count)
+ xsdfec->uecc_count += uecc_count;
+ /* Add new errors to a 1-bits counter */
+ if (cecc_count)
+ xsdfec->cecc_count += cecc_count;
+ /* Add new errors to a ISR counter */
+ if (isr_err_count)
+ xsdfec->isr_err_count += isr_err_count;
+
+ /* Update state/stats flag */
+ if (uecc_count) {
+ if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_PL_RECONFIGURE;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ if (cecc_count)
+ xsdfec->stats_updated = true;
+
+ if (isr_err_count) {
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ xsdfec->stats_updated = true;
+ xsdfec->state_updated = true;
+ }
+
+ spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
+ dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
+ xsdfec->stats_updated);
+
+ /* Enable another polling */
+ if (xsdfec->state_updated || xsdfec->stats_updated)
+ wake_up_interruptible(&xsdfec->waitq);
+ else
+ ret = IRQ_NONE;
+
+ /* Unmask Interrupts */
+ xsdfec_isr_enable(xsdfec, true);
+ xsdfec_ecc_isr_enable(xsdfec, true);
+
+ return ret;
+}
+
static int xsdfec_clk_init(struct platform_device *pdev,
struct xsdfec_clks *clks)
{
@@ -227,19 +1378,13 @@ static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
clk_disable_unprepare(clks->axi_clk);
}
-static void xsdfec_idr_remove(struct xsdfec_dev *xsdfec)
-{
- mutex_lock(&dev_idr_lock);
- idr_remove(&dev_idr, xsdfec->dev_id);
- mutex_unlock(&dev_idr_lock);
-}
-
static int xsdfec_probe(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
struct device *dev;
struct resource *res;
int err;
+ bool irq_enabled = true;
xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
if (!xsdfec)
@@ -260,12 +1405,34 @@ static int xsdfec_probe(struct platform_device *pdev)
goto err_xsdfec_dev;
}
+ xsdfec->irq = platform_get_irq(pdev, 0);
+ if (xsdfec->irq < 0) {
+ dev_dbg(dev, "platform_get_irq failed");
+ irq_enabled = false;
+ }
+
+ err = xsdfec_parse_of(xsdfec);
+ if (err < 0)
+ goto err_xsdfec_dev;
+
+ update_config_from_hw(xsdfec);
+
/* Save driver private data */
platform_set_drvdata(pdev, xsdfec);
- mutex_lock(&dev_idr_lock);
- err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL);
- mutex_unlock(&dev_idr_lock);
+ if (irq_enabled) {
+ init_waitqueue_head(&xsdfec->waitq);
+ /* Register IRQ thread */
+ err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
+ xsdfec_irq_thread, IRQF_ONESHOT,
+ "xilinx-sdfec16", xsdfec);
+ if (err < 0) {
+ dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
+ goto err_xsdfec_dev;
+ }
+ }
+
+ err = ida_alloc(&dev_nrs, GFP_KERNEL);
if (err < 0)
goto err_xsdfec_dev;
xsdfec->dev_id = err;
@@ -278,12 +1445,12 @@ static int xsdfec_probe(struct platform_device *pdev)
err = misc_register(&xsdfec->miscdev);
if (err) {
dev_err(dev, "error:%d. Unable to register device", err);
- goto err_xsdfec_idr;
+ goto err_xsdfec_ida;
}
return 0;
-err_xsdfec_idr:
- xsdfec_idr_remove(xsdfec);
+err_xsdfec_ida:
+ ida_free(&dev_nrs, xsdfec->dev_id);
err_xsdfec_dev:
xsdfec_disable_all_clks(&xsdfec->clks);
return err;
@@ -295,7 +1462,7 @@ static int xsdfec_remove(struct platform_device *pdev)
xsdfec = platform_get_drvdata(pdev);
misc_deregister(&xsdfec->miscdev);
- xsdfec_idr_remove(xsdfec);
+ ida_free(&dev_nrs, xsdfec->dev_id);
xsdfec_disable_all_clks(&xsdfec->clks);
return 0;
}
@@ -321,8 +1488,6 @@ static int __init xsdfec_init(void)
{
int err;
- mutex_init(&dev_idr_lock);
- idr_init(&dev_idr);
err = platform_driver_register(&xsdfec_driver);
if (err < 0) {
pr_err("%s Unabled to register SDFEC driver", __func__);
@@ -334,7 +1499,6 @@ static int __init xsdfec_init(void)
static void __exit xsdfec_exit(void)
{
platform_driver_unregister(&xsdfec_driver);
- idr_destroy(&dev_idr);
}
module_init(xsdfec_init);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 74e4364bc9fb..09113b9ad679 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
goto out;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index e327f80ebe70..7102e2ebc614 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -10,6 +10,7 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
goto free_tag_set;
}
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ mq->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
+
mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d681e8aaca83..fe914ff5f5d6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
goto err;
}
+ /*
+ * Some SD cards claims an out of spec VDD voltage range. Let's treat
+ * these bits as being in-valid and especially also bit7.
+ */
+ ocr &= ~0x7FFF;
+
rocr = mmc_select_voltage(host, ocr);
/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 8dd8fc32ecca..26cabd53ddc5 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -951,6 +951,8 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
+ WARN_ON(host->sdio_irqs && !mmc_card_keep_power(host));
+
/* Prevent processing of SDIO IRQs in suspended state. */
mmc_card_set_suspended(host->card);
cancel_delayed_work_sync(&host->sdio_irq_work);
@@ -1013,7 +1015,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
else if (host->caps & MMC_CAP_SDIO_IRQ)
- host->ops->enable_sdio_irq(host, 1);
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
}
out:
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 0bcc5e83bd1a..900871073bd7 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -27,10 +27,39 @@
#include "core.h"
#include "card.h"
+static int sdio_get_pending_irqs(struct mmc_host *host, u8 *pending)
+{
+ struct mmc_card *card = host->card;
+ int ret;
+
+ WARN_ON(!host->claimed);
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, pending);
+ if (ret) {
+ pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
+ mmc_card_id(card), ret);
+ return ret;
+ }
+
+ if (*pending && mmc_card_broken_irq_polling(card) &&
+ !(host->caps & MMC_CAP_SDIO_IRQ)) {
+ unsigned char dummy;
+
+ /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
+ * register with a Marvell SD8797 card. A dummy CMD52 read to
+ * function 0 register 0xff can avoid this.
+ */
+ mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
+ }
+
+ return 0;
+}
+
static int process_sdio_pending_irqs(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int i, ret, count;
+ bool sdio_irq_pending = host->sdio_irq_pending;
unsigned char pending;
struct sdio_func *func;
@@ -38,34 +67,23 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
if (mmc_card_suspended(card))
return 0;
+ /* Clear the flag to indicate that we have processed the IRQ. */
+ host->sdio_irq_pending = false;
+
/*
* Optimization, if there is only 1 function interrupt registered
* and we know an IRQ was signaled then call irq handler directly.
* Otherwise do the full probe.
*/
func = card->sdio_single_irq;
- if (func && host->sdio_irq_pending) {
+ if (func && sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
- ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
- if (ret) {
- pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
- mmc_card_id(card), ret);
+ ret = sdio_get_pending_irqs(host, &pending);
+ if (ret)
return ret;
- }
-
- if (pending && mmc_card_broken_irq_polling(card) &&
- !(host->caps & MMC_CAP_SDIO_IRQ)) {
- unsigned char dummy;
-
- /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
- * register with a Marvell SD8797 card. A dummy CMD52 read to
- * function 0 register 0xff can avoid this.
- */
- mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
- }
count = 0;
for (i = 1; i <= 7; i++) {
@@ -96,9 +114,8 @@ static void sdio_run_irqs(struct mmc_host *host)
{
mmc_claim_host(host);
if (host->sdio_irqs) {
- host->sdio_irq_pending = true;
process_sdio_pending_irqs(host);
- if (host->ops->ack_sdio_irq)
+ if (!host->sdio_irq_pending)
host->ops->ack_sdio_irq(host);
}
mmc_release_host(host);
@@ -114,6 +131,7 @@ void sdio_irq_work(struct work_struct *work)
void sdio_signal_irq(struct mmc_host *host)
{
+ host->sdio_irq_pending = true;
queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
}
EXPORT_SYMBOL_GPL(sdio_signal_irq);
@@ -159,7 +177,6 @@ static int sdio_irq_thread(void *_host)
if (ret)
break;
ret = process_sdio_pending_irqs(host);
- host->sdio_irq_pending = false;
mmc_release_host(host);
/*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 14d89a108edd..3a52f5703286 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -154,6 +154,18 @@ config MMC_SDHCI_OF_ARASAN
If unsure, say N.
+config MMC_SDHCI_OF_ASPEED
+ tristate "SDHCI OF support for the ASPEED SDHCI controller"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF && OF_ADDRESS
+ help
+ This selects the ASPEED Secure Digital Host Controller Interface.
+
+ If you have a controller with this interface, say Y or M here. You
+ also need to enable an appropriate bus interface.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_AT91
tristate "SDHCI OF support for the Atmel SDMMC controller"
depends on MMC_SDHCI_PLTFM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 73578718f119..390ee162fe71 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o
+obj-$(CONFIG_MMC_SDHCI_OF_ASPEED) += sdhci-of-aspeed.o
obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9ee0bc0ce6d0..c26fbe5f2222 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2413,6 +2413,7 @@ static void atmci_get_cap(struct atmel_mci *host)
case 0x600:
case 0x500:
host->caps.has_odd_clk_div = 1;
+ /* Fall through */
case 0x400:
case 0x300:
host->caps.has_dma_conf_reg = 1;
@@ -2420,13 +2421,16 @@ static void atmci_get_cap(struct atmel_mci *host)
host->caps.has_cfg_reg = 1;
host->caps.has_cstor_reg = 1;
host->caps.has_highspeed = 1;
+ /* Fall through */
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
host->caps.need_notbusy_for_read_ops = 1;
+ /* Fall through */
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
+ /* Fall through */
case 0x0:
break;
default:
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 7e0d3a49c06d..148414d7f0c9 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
struct dma_chan *terminate_chan = NULL;
struct mmc_request *mrq;
- cancel_delayed_work_sync(&host->timeout_work);
+ cancel_delayed_work(&host->timeout_work);
mrq = host->mrq;
@@ -1314,7 +1314,7 @@ static int bcm2835_add_host(struct bcm2835_host *host)
}
mmc->max_segs = 128;
- mmc->max_req_size = 524288;
+ mmc->max_req_size = min_t(size_t, 524288, dma_max_mapping_size(dev));
mmc->max_seg_size = mmc->max_req_size;
mmc->max_blk_size = 1024;
mmc->max_blk_count = 65535;
@@ -1409,7 +1409,6 @@ static int bcm2835_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
- dev_err(dev, "get IRQ failed\n");
ret = -EINVAL;
goto err;
}
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index ed5cefb83768..89deb451e0ac 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
mmc->max_segs = 1;
/* DMA size field can address up to 8 MB */
- mmc->max_seg_size = 8 * 1024 * 1024;
+ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
+ dma_get_max_seg_size(host->dev));
mmc->max_req_size = mmc->max_seg_size;
/* External DMA is in 512 byte blocks */
mmc->max_blk_size = 512;
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index bc51cef47c47..83e1bad0a008 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -66,7 +66,7 @@ static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
static int dw_mci_hi3798cv200_execute_tuning(struct dw_mci_slot *slot,
u32 opcode)
{
- int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
+ static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
struct dw_mci *host = slot->host;
struct hi3798cv200_priv *priv = host->priv;
int raise_point = -1, fall_point = -1;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index faaaf52a46d2..79c55c7b4afd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if (err != -ETIMEDOUT) {
state = STATE_SENDING_DATA;
continue;
}
@@ -3461,6 +3460,10 @@ int dw_mci_runtime_resume(struct device *dev)
/* Force setup bus to guarantee available clock output */
dw_mci_setup_bus(host->slot, true);
+ /* Re-enable SDIO interrupts. */
+ if (sdio_irq_claimed(host->slot->mmc))
+ __dw_mci_enable_sdio_irq(host->slot, 1);
+
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index ffdbfaadd3f2..f816c06ef916 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -25,8 +25,6 @@
#include <asm/cacheflush.h>
-#include <asm/mach-jz4740/dma.h>
-
#define JZ_REG_MMC_STRPCL 0x00
#define JZ_REG_MMC_STATUS 0x04
#define JZ_REG_MMC_CLKRT 0x08
@@ -186,9 +184,9 @@ static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
uint32_t val)
{
if (host->version >= JZ_MMC_JZ4780)
- return writel(val, host->base + JZ_REG_MMC_IREG);
+ writel(val, host->base + JZ_REG_MMC_IREG);
else
- return writew(val, host->base + JZ_REG_MMC_IREG);
+ writew(val, host->base + JZ_REG_MMC_IREG);
}
static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
@@ -292,11 +290,9 @@ static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
if (data->flags & MMC_DATA_WRITE) {
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
- conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
} else {
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
- conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
}
sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
@@ -820,14 +816,14 @@ static irqreturn_t jz_mmc_irq(int irq, void *devid)
del_timer(&host->timeout_timer);
if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
- cmd->error = -ETIMEDOUT;
+ cmd->error = -ETIMEDOUT;
} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
- cmd->error = -EIO;
+ cmd->error = -EIO;
} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
- if (cmd->data)
- cmd->data->error = -EIO;
- cmd->error = -EIO;
+ if (cmd->data)
+ cmd->data->error = -EIO;
+ cmd->error = -EIO;
}
jz4740_mmc_set_irq_enabled(host, irq_reg, false);
@@ -969,7 +965,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
- dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
goto err_free_host;
}
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 037311db3551..e712315c7e8d 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1091,7 +1091,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
- dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
}
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2d736e416775..ba9a63db73da 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -73,7 +73,7 @@
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
- #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 19544b121276..66e354d51ee9 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -891,7 +891,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
/* Handle scatterlist segments one at a time, with synch for
* each 512-byte block
*/
- for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) {
+ for_each_sg(data->sg, sg, data->sg_len, n_sg) {
int status = 0;
dma_addr_t dma_addr = 0;
void *kmap_addr;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b8554bf38f72..c37e70dbe250 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1219,47 +1219,58 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
(MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
return;
- /*
- * ST Micro variant: handle busy detection.
- */
+ /* Handle busy detection on DAT0 if the variant supports it. */
if (busy_resp && host->variant->busy_detect) {
- /* We are busy with a command, return */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag))
- return;
-
/*
- * We were not busy, but we now got a busy response on
- * something that was not an error, and we double-check
- * that the special busy status bit is still set before
- * proceeding.
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ * Potentially we may even need to poll the register for a
+ * while, to allow it to be set, but tests indicates that it
+ * isn't needed.
*/
if (!host->busy_status &&
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
- /* Clear the busy start IRQ */
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- /* Unmask the busy end IRQ */
writel(readl(base + MMCIMASK0) |
host->variant->busy_detect_mask,
base + MMCIMASK0);
- /*
- * Now cache the last response status code (until
- * the busy bit goes low), and return.
- */
+
host->busy_status =
status & (MCI_CMDSENT|MCI_CMDRESPEND);
return;
}
/*
- * At this point we are not busy with a command, we have
- * not received a new busy request, clear and mask the busy
- * end IRQ and fall through to process the IRQ.
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag)) {
+ writel(host->variant->busy_detect_mask,
+ host->base + MMCICLEAR);
+ return;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent and the busy bit isn't set, it means we have received
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
+ * process the command.
*/
if (host->busy_status) {
@@ -1505,14 +1516,8 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
}
/*
- * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
- * enabled) in mmci_cmd_irq() function where ST Micro busy
- * detection variant is handled. Considering the HW seems to be
- * triggering the IRQ on both edges while monitoring DAT0 for
- * busy completion and that same status bit is used to monitor
- * start and end of busy detection, special care must be taken
- * to make sure that both start and end interrupts are always
- * cleared one after the other.
+ * Busy detection is managed by mmci_cmd_irq(), including to
+ * clear the corresponding IRQ.
*/
status &= readl(host->base + MMCIMASK0);
if (host->variant->busy_detect)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 33f4b6387ef7..189e42674d85 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -192,6 +192,7 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+#define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */
/* SDC_ADV_CFG0 mask */
#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
@@ -328,6 +329,7 @@ struct mt_bdma_desc {
u32 ptr;
u32 bd_data_len;
#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */
+#define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */
};
struct msdc_dma {
@@ -641,8 +643,14 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf)
<< 28;
}
- bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
- bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
+
+ if (host->dev_comp->support_64g) {
+ bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT;
+ bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT);
+ } else {
+ bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
+ bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
+ }
if (j == data->sg_count - 1) /* the last bd */
bd[j].bd_info |= BDMA_DESC_EOL;
@@ -1071,11 +1079,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
}
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
- if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ if (events & MSDC_INT_CMDTMO ||
+ (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
/*
* should not clear fifo/interrupt as the tune data
- * may have alreay come.
+ * may have alreay come when cmd19/cmd21 gets response
+ * CRC error.
*/
msdc_reset_hw(host);
if (events & MSDC_INT_RSPCRCERR) {
@@ -1568,6 +1578,7 @@ static void msdc_init_hw(struct msdc_host *host)
/* Config SDIO device detect interrupt function */
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
@@ -2275,7 +2286,10 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = MAX_BD_NUM;
- mmc->max_seg_size = BDMA_DESC_BUFLEN;
+ if (host->dev_comp->support_64g)
+ mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT;
+ else
+ mmc->max_seg_size = BDMA_DESC_BUFLEN;
mmc->max_blk_size = 2048;
mmc->max_req_size = 512 * 1024;
mmc->max_blk_count = mmc->max_req_size / 512;
@@ -2421,6 +2435,9 @@ static void msdc_restore_reg(struct msdc_host *host)
} else {
writel(host->save_para.pad_tune, host->base + tune_reg);
}
+
+ if (sdio_irq_claimed(host->mmc))
+ __msdc_enable_sdio_irq(host, 1);
}
static int msdc_runtime_suspend(struct device *dev)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 750604f7fac9..011b59a3602e 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1010,10 +1010,8 @@ static int mxcmci_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
if (!mmc)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b334e81c5cab..78e7e350655c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -571,7 +571,6 @@ static int mxs_mmc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
- struct resource *iores;
int ret = 0, irq_err;
struct regulator *reg_vmmc;
struct mxs_ssp *ssp;
@@ -587,8 +586,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
ssp = &host->ssp;
ssp->dev = &pdev->dev;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ssp->base = devm_ioremap_resource(&pdev->dev, iores);
+ ssp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ssp->base)) {
ret = PTR_ERR(ssp->base);
goto out_mmc_free;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 64d3b5fb7fe5..d4ada5cca2d1 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -124,7 +124,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
{
struct renesas_sdhi *priv = host_to_priv(host);
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
- int i, ret;
+ int i;
/* tested only on R-Car Gen2+ currently; may work for others */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
@@ -153,9 +153,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
}
}
- ret = clk_set_rate(priv->clk, best_freq);
+ clk_set_rate(priv->clk, best_freq);
- return ret == 0 ? best_freq : clk_get_rate(priv->clk);
+ return clk_get_rate(priv->clk);
}
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
@@ -166,10 +166,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- if (new_clock == 0)
+ if (new_clock == 0) {
+ host->mmc->actual_clock = 0;
goto out;
+ }
- clock = renesas_sdhi_clk_update(host, new_clock) / 512;
+ host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
+ clock = host->mmc->actual_clock / 512;
for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
clock <<= 1;
@@ -774,8 +777,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- pm_runtime_enable(&pdev->dev);
-
ret = renesas_sdhi_clk_enable(host);
if (ret)
goto efree;
@@ -856,8 +857,6 @@ edisclk:
efree:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
@@ -869,8 +868,6 @@ int renesas_sdhi_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
- pm_runtime_disable(&pdev->dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 1d29b822efb8..13ff023fbee9 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -68,26 +68,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
};
-/* Definitions for sampling clocks */
-static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
- {
- .clk_rate = 0,
- .tap = 0x00000300,
- },
-};
-
-static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
- TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
- .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
- MMC_CAP_CMD23,
- .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
- .bus_shift = 2,
- .scc_offset = 0x1000,
- .taps = rcar_gen3_scc_taps,
- .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
-};
-
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
@@ -102,11 +82,8 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
- { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
- { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
- { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-shmobile" },
{},
};
@@ -470,21 +447,8 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
.dataend = renesas_sdhi_sys_dmac_dataend_dma,
};
-/*
- * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
- * implementation. Currently empty as all supported ES versions use
- * the internal DMAC.
- */
-static const struct soc_device_attribute gen3_soc_whitelist[] = {
- { /* sentinel */ }
-};
-
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
- if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
- !soc_device_match(gen3_soc_whitelist))
- return -ENODEV;
-
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index ccc5f095775f..bce9c33bc4b5 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1614,7 +1614,6 @@ static int s3cmci_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
- dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b3a130a9ee23..1604f512c7bd 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
sdhci_acpi_byt_setting(&c->pdev->dev);
- return sdhci_runtime_resume_host(c->host);
+ return sdhci_runtime_resume_host(c->host, 0);
}
#endif
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 163d1cf4367e..ae0ec27dd7cc 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -337,10 +337,10 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
- size_t priv_size;
unsigned int nr_phy_params;
int ret;
struct device *dev = &pdev->dev;
+ static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk))
@@ -351,8 +351,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
return ret;
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
- priv_size = sizeof(*priv) + sizeof(priv->phy_params[0]) * nr_phy_params;
- host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data, priv_size);
+ host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data,
+ struct_size(priv, phy_params, nr_phy_params));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto disable_clk;
@@ -369,6 +369,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
+ sdhci_enable_v4_mode(host);
+ __sdhci_read_caps(host, &version, NULL, NULL);
sdhci_get_of_property(pdev);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index c391510e9ef4..1c988d6a2433 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1666,12 +1666,10 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
- if (!sdhci_sdio_irq_enabled(host)) {
- imx_data->actual_clock = host->mmc->actual_clock;
- esdhc_pltfm_set_clock(host, 0);
- clk_disable_unprepare(imx_data->clk_per);
- clk_disable_unprepare(imx_data->clk_ipg);
- }
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
clk_disable_unprepare(imx_data->clk_ahb);
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
@@ -1695,17 +1693,17 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
if (err)
goto remove_pm_qos_request;
- if (!sdhci_sdio_irq_enabled(host)) {
- err = clk_prepare_enable(imx_data->clk_per);
- if (err)
- goto disable_ahb_clk;
- err = clk_prepare_enable(imx_data->clk_ipg);
- if (err)
- goto disable_per_clk;
- esdhc_pltfm_set_clock(host, imx_data->actual_clock);
- }
+ err = clk_prepare_enable(imx_data->clk_per);
+ if (err)
+ goto disable_ahb_clk;
+
+ err = clk_prepare_enable(imx_data->clk_ipg);
+ if (err)
+ goto disable_per_clk;
+
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
- err = sdhci_runtime_resume_host(host);
+ err = sdhci_runtime_resume_host(host, 0);
if (err)
goto disable_ipg_clk;
@@ -1715,11 +1713,9 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
return err;
disable_ipg_clk:
- if (!sdhci_sdio_irq_enabled(host))
- clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
- if (!sdhci_sdio_irq_enabled(host))
- clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_per);
disable_ahb_clk:
clk_disable_unprepare(imx_data->clk_ahb);
remove_pm_qos_request:
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 2feb4ef32035..2b9cdcd1dd9d 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -261,8 +261,17 @@ static const struct sdhci_iproc_data bcm2835_data = {
.mmc_caps = 0x00000000,
};
+static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+ .ops = &sdhci_iproc_32only_ops,
+};
+
+static const struct sdhci_iproc_data bcm2711_data = {
+ .pdata = &sdhci_bcm2711_pltfm_data,
+};
+
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
+ { .compatible = "brcm,bcm2711-emmc2", .data = &bcm2711_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
{ .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
{ }
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9cf14b359c14..b75c82d8d6c1 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1917,8 +1917,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
- dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
- msm_host->pwr_irq);
ret = msm_host->pwr_irq;
goto clk_disable;
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index b12abf9b15f2..7023cbec4017 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -114,6 +114,12 @@ static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
.hiword_update = true,
};
+static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0xa0, .width = 8, .shift = 2 },
+ .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -373,6 +379,11 @@ static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
+ .soc_ctl_map = &intel_lgm_emmc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -474,6 +485,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "rockchip,rk3399-sdhci-5.1",
.data = &sdhci_arasan_rk3399_data,
},
+ {
+ .compatible = "intel,lgm-sdhci-5.1-emmc",
+ .data = &intel_lgm_emmc_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
new file mode 100644
index 000000000000..8962f6664381
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2019 ASPEED Technology Inc. */
+/* Copyright (C) 2019 IBM Corp. */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "sdhci-pltfm.h"
+
+#define ASPEED_SDC_INFO 0x00
+#define ASPEED_SDC_S1MMC8 BIT(25)
+#define ASPEED_SDC_S0MMC8 BIT(24)
+
+struct aspeed_sdc {
+ struct clk *clk;
+ struct resource *res;
+
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+struct aspeed_sdhci {
+ struct aspeed_sdc *parent;
+ u32 width_mask;
+};
+
+static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
+ struct aspeed_sdhci *sdhci,
+ bool bus8)
+{
+ u32 info;
+
+ /* Set/clear 8 bit mode */
+ spin_lock(&sdc->lock);
+ info = readl(sdc->regs + ASPEED_SDC_INFO);
+ if (bus8)
+ info |= sdhci->width_mask;
+ else
+ info &= ~sdhci->width_mask;
+ writel(info, sdc->regs + ASPEED_SDC_INFO);
+ spin_unlock(&sdc->lock);
+}
+
+static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ unsigned long parent;
+ int div;
+ u16 clk;
+
+ pltfm_host = sdhci_priv(host);
+ parent = clk_get_rate(pltfm_host->clk);
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ if (WARN_ON(clock > host->max_clk))
+ clock = host->max_clk;
+
+ for (div = 1; div < 256; div *= 2) {
+ if ((parent / div) <= clock)
+ break;
+ }
+ div >>= 1;
+
+ clk = div << SDHCI_DIVIDER_SHIFT;
+
+ sdhci_enable_clk(host, clk);
+}
+
+static unsigned int aspeed_sdhci_get_max_clock(struct sdhci_host *host)
+{
+ if (host->mmc->f_max)
+ return host->mmc->f_max;
+
+ return sdhci_pltfm_clk_get_max_clock(host);
+}
+
+static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_priv;
+ struct aspeed_sdhci *aspeed_sdhci;
+ struct aspeed_sdc *aspeed_sdc;
+ u8 ctrl;
+
+ pltfm_priv = sdhci_priv(host);
+ aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv);
+ aspeed_sdc = aspeed_sdhci->parent;
+
+ /* Set/clear 8-bit mode */
+ aspeed_sdc_configure_8bit_mode(aspeed_sdc, aspeed_sdhci,
+ width == MMC_BUS_WIDTH_8);
+
+ /* Set/clear 1 or 4 bit mode */
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static const struct sdhci_ops aspeed_sdhci_ops = {
+ .set_clock = aspeed_sdhci_set_clock,
+ .get_max_clock = aspeed_sdhci_get_max_clock,
+ .set_bus_width = aspeed_sdhci_set_bus_width,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data aspeed_sdhci_pdata = {
+ .ops = &aspeed_sdhci_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+};
+
+static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev,
+ struct resource *res)
+{
+ resource_size_t delta;
+
+ if (!res || resource_type(res) != IORESOURCE_MEM)
+ return -EINVAL;
+
+ if (res->start < dev->parent->res->start)
+ return -EINVAL;
+
+ delta = res->start - dev->parent->res->start;
+ if (delta & (0x100 - 1))
+ return -EINVAL;
+
+ return (delta / 0x100) - 1;
+}
+
+static int aspeed_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct aspeed_sdhci *dev;
+ struct sdhci_host *host;
+ struct resource *res;
+ int slot;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &aspeed_sdhci_pdata, sizeof(*dev));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ dev = sdhci_pltfm_priv(pltfm_host);
+ dev->parent = dev_get_drvdata(pdev->dev.parent);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ slot = aspeed_sdhci_calculate_slot(dev, res);
+
+ if (slot < 0)
+ return slot;
+ else if (slot >= 2)
+ return -EINVAL;
+
+ dev_info(&pdev->dev, "Configuring for slot %d\n", slot);
+ dev->width_mask = !slot ? ASPEED_SDC_S0MMC8 : ASPEED_SDC_S1MMC8;
+
+ sdhci_get_of_property(pdev);
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pltfm_host->clk))
+ return PTR_ERR(pltfm_host->clk);
+
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable SDIO clock\n");
+ goto err_pltfm_free;
+ }
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_sdhci_add;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_sdhci_add;
+
+ return 0;
+
+err_sdhci_add:
+ clk_disable_unprepare(pltfm_host->clk);
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int aspeed_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ int dead = 0;
+
+ host = platform_get_drvdata(pdev);
+ pltfm_host = sdhci_priv(host);
+
+ sdhci_remove_host(host, dead);
+
+ clk_disable_unprepare(pltfm_host->clk);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_sdhci_of_match[] = {
+ { .compatible = "aspeed,ast2400-sdhci", },
+ { .compatible = "aspeed,ast2500-sdhci", },
+ { .compatible = "aspeed,ast2600-sdhci", },
+ { }
+};
+
+static struct platform_driver aspeed_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-aspeed",
+ .of_match_table = aspeed_sdhci_of_match,
+ },
+ .probe = aspeed_sdhci_probe,
+ .remove = aspeed_sdhci_remove,
+};
+
+static int aspeed_sdc_probe(struct platform_device *pdev)
+
+{
+ struct device_node *parent, *child;
+ struct aspeed_sdc *sdc;
+ int ret;
+
+ sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
+ if (!sdc)
+ return -ENOMEM;
+
+ spin_lock_init(&sdc->lock);
+
+ sdc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdc->clk))
+ return PTR_ERR(sdc->clk);
+
+ ret = clk_prepare_enable(sdc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable SDCLK\n");
+ return ret;
+ }
+
+ sdc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdc->regs = devm_ioremap_resource(&pdev->dev, sdc->res);
+ if (IS_ERR(sdc->regs)) {
+ ret = PTR_ERR(sdc->regs);
+ goto err_clk;
+ }
+
+ dev_set_drvdata(&pdev->dev, sdc);
+
+ parent = pdev->dev.of_node;
+ for_each_available_child_of_node(parent, child) {
+ struct platform_device *cpdev;
+
+ cpdev = of_platform_device_create(child, NULL, &pdev->dev);
+ if (!cpdev) {
+ of_node_put(child);
+ ret = -ENODEV;
+ goto err_clk;
+ }
+ }
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(sdc->clk);
+ return ret;
+}
+
+static int aspeed_sdc_remove(struct platform_device *pdev)
+{
+ struct aspeed_sdc *sdc = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(sdc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_sdc_of_match[] = {
+ { .compatible = "aspeed,ast2400-sd-controller", },
+ { .compatible = "aspeed,ast2500-sd-controller", },
+ { .compatible = "aspeed,ast2600-sd-controller", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match);
+
+static struct platform_driver aspeed_sdc_driver = {
+ .driver = {
+ .name = "sd-controller-aspeed",
+ .pm = &sdhci_pltfm_pmops,
+ .of_match_table = aspeed_sdc_of_match,
+ },
+ .probe = aspeed_sdc_probe,
+ .remove = aspeed_sdc_remove,
+};
+
+static int __init aspeed_sdc_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&aspeed_sdhci_driver);
+ if (rc < 0)
+ return rc;
+
+ rc = platform_driver_register(&aspeed_sdc_driver);
+ if (rc < 0)
+ platform_driver_unregister(&aspeed_sdhci_driver);
+
+ return rc;
+}
+module_init(aspeed_sdc_init);
+
+static void __exit aspeed_sdc_exit(void)
+{
+ platform_driver_unregister(&aspeed_sdc_driver);
+ platform_driver_unregister(&aspeed_sdhci_driver);
+}
+module_exit(aspeed_sdc_exit);
+
+MODULE_DESCRIPTION("Driver for the ASPEED SD/SDIO/SDHCI Controllers");
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e377b9bc55a4..e7d1920729fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
}
out:
- return sdhci_runtime_resume_host(host);
+ return sdhci_runtime_resume_host(host, 0);
}
#endif /* CONFIG_PM */
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ /* HS200 is broken at this moment */
+ host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
+
ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 4dd43b1adf2c..3271c2d76629 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -999,6 +999,7 @@ static struct soc_device_attribute soc_incorrect_hostver[] = {
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
+ { .family = "QorIQ LS1028A", .revision = "1.0", },
{ },
};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 4041878eb0f3..e1ca185d7328 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
err_pci_runtime_suspend:
while (--i >= 0)
- sdhci_runtime_resume_host(chip->slots[i]->host);
+ sdhci_runtime_resume_host(chip->slots[i]->host, 0);
return ret;
}
@@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
if (!slot)
continue;
- ret = sdhci_runtime_resume_host(slot->host);
+ ret = sdhci_runtime_resume_host(slot->host, 0);
if (ret)
return ret;
}
@@ -1672,6 +1672,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
@@ -1759,8 +1760,7 @@ static const struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
+ struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
@@ -1773,8 +1773,7 @@ static int sdhci_pci_suspend(struct device *dev)
static int sdhci_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
+ struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
@@ -1789,8 +1788,7 @@ static int sdhci_pci_resume(struct device *dev)
#ifdef CONFIG_PM
static int sdhci_pci_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
+ struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
@@ -1803,8 +1801,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
static int sdhci_pci_runtime_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
+ struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 9dc4548271b4..fa8105087d68 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -11,6 +11,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include "sdhci.h"
#include "sdhci-pci.h"
@@ -51,13 +52,136 @@
#define O2_SD_VENDOR_SETTING2 0x1C8
#define O2_SD_HW_TUNING_DISABLE BIT(4)
-#define O2_PLL_WDT_CONTROL1 0x1CC
+#define O2_PLL_DLL_WDT_CONTROL1 0x1CC
#define O2_PLL_FORCE_ACTIVE BIT(18)
#define O2_PLL_LOCK_STATUS BIT(14)
#define O2_PLL_SOFT_RESET BIT(12)
+#define O2_DLL_LOCK_STATUS BIT(11)
#define O2_SD_DETECT_SETTING 0x324
+static const u32 dmdn_table[] = {0x2B1C0000,
+ 0x2C1A0000, 0x371B0000, 0x35100000};
+#define DMDN_SZ ARRAY_SIZE(dmdn_table)
+
+struct o2_host {
+ u8 dll_adjust_count;
+};
+
+static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 scratch32;
+
+ /* Wait max 50 ms */
+ timeout = ktime_add_ms(ktime_get(), 50);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT
+ == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT)
+ break;
+
+ if (timedout) {
+ pr_err("%s: Card Detect debounce never finished.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+}
+
+static void sdhci_o2_enable_internal_clock(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u16 scratch;
+ u32 scratch32;
+
+ /* PLL software reset */
+ scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
+ scratch32 |= O2_PLL_SOFT_RESET;
+ sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
+ udelay(1);
+ scratch32 &= ~(O2_PLL_SOFT_RESET);
+ sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
+
+ /* PLL force active */
+ scratch32 |= O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ scratch = sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1);
+ if (scratch & O2_PLL_LOCK_STATUS)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ goto out;
+ }
+ udelay(10);
+ }
+
+ /* Wait for card detect finish */
+ udelay(1);
+ sdhci_o2_wait_card_detect_stable(host);
+
+out:
+ /* Cancel PLL force active */
+ scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
+ scratch32 &= ~O2_PLL_FORCE_ACTIVE;
+ sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
+}
+
+static int sdhci_o2_get_cd(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
+ sdhci_o2_enable_internal_clock(host);
+
+ return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+}
+
+static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
+{
+ u32 scratch_32;
+
+ pci_read_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, &scratch_32);
+
+ scratch_32 &= 0x0000FFFF;
+ scratch_32 |= value;
+
+ pci_write_config_dword(chip->pdev,
+ O2_SD_PLL_SETTING, scratch_32);
+}
+
+static u32 sdhci_o2_pll_dll_wdt_control(struct sdhci_host *host)
+{
+ return sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
+}
+
+/*
+ * This function is used to detect dll lock status.
+ * Since the dll lock status bit will toggle randomly
+ * with very short interval which needs to be polled
+ * as fast as possible. Set sleep_us as 1 microsecond.
+ */
+static int sdhci_o2_wait_dll_detect_lock(struct sdhci_host *host)
+{
+ u32 scratch32 = 0;
+
+ return readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
+ scratch32, !(scratch32 & O2_DLL_LOCK_STATUS), 1, 1000000);
+}
+
static void sdhci_o2_set_tuning_mode(struct sdhci_host *host)
{
u16 reg;
@@ -95,6 +219,83 @@ static void __sdhci_o2_execute_tuning(struct sdhci_host *host, u32 opcode)
sdhci_reset_tuning(host);
}
+/*
+ * This function is used to fix o2 dll shift issue.
+ * It isn't necessary to detect card present before recovery.
+ * Firstly, it is used by bht emmc card, which is embedded.
+ * Second, before call recovery card present will be detected
+ * outside of the execute tuning function.
+ */
+static int sdhci_o2_dll_recovery(struct sdhci_host *host)
+{
+ int ret = 0;
+ u8 scratch_8 = 0;
+ u32 scratch_32 = 0;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct sdhci_pci_chip *chip = slot->chip;
+ struct o2_host *o2_host = sdhci_pci_priv(slot);
+
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ while (o2_host->dll_adjust_count < DMDN_SZ && !ret) {
+ /* Disable clock */
+ sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL);
+
+ /* PLL software reset */
+ scratch_32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
+ scratch_32 |= O2_PLL_SOFT_RESET;
+ sdhci_writel(host, scratch_32, O2_PLL_DLL_WDT_CONTROL1);
+
+ pci_read_config_dword(chip->pdev,
+ O2_SD_FUNC_REG4,
+ &scratch_32);
+ /* Enable Base Clk setting change */
+ scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
+ pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG4, scratch_32);
+ o2_pci_set_baseclk(chip, dmdn_table[o2_host->dll_adjust_count]);
+
+ /* Enable internal clock */
+ scratch_8 = SDHCI_CLOCK_INT_EN;
+ sdhci_writeb(host, scratch_8, SDHCI_CLOCK_CONTROL);
+
+ if (sdhci_o2_get_cd(host->mmc)) {
+ /*
+ * need wait at least 5ms for dll status stable,
+ * after enable internal clock
+ */
+ usleep_range(5000, 6000);
+ if (sdhci_o2_wait_dll_detect_lock(host)) {
+ scratch_8 |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writeb(host, scratch_8,
+ SDHCI_CLOCK_CONTROL);
+ ret = 1;
+ } else {
+ pr_warn("%s: DLL unlocked when dll_adjust_count is %d.\n",
+ mmc_hostname(host->mmc),
+ o2_host->dll_adjust_count);
+ }
+ } else {
+ pr_err("%s: card present detect failed.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+
+ o2_host->dll_adjust_count++;
+ }
+ if (!ret && o2_host->dll_adjust_count == DMDN_SZ)
+ pr_err("%s: DLL adjust over max times\n",
+ mmc_hostname(host->mmc));
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev,
+ O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ return ret;
+}
+
static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -109,7 +310,16 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
return -EINVAL;
-
+ /*
+ * Judge the tuning reason, whether caused by dll shift
+ * If cause by dll shift, should call sdhci_o2_dll_recovery
+ */
+ if (!sdhci_o2_wait_dll_detect_lock(host))
+ if (!sdhci_o2_dll_recovery(host)) {
+ pr_err("%s: o2 dll recovery failed\n",
+ mmc_hostname(host->mmc));
+ return -EINVAL;
+ }
/*
* o2 sdhci host didn't support 8bit emmc tuning
*/
@@ -136,19 +346,6 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
return 0;
}
-static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
-{
- u32 scratch_32;
- pci_read_config_dword(chip->pdev,
- O2_SD_PLL_SETTING, &scratch_32);
-
- scratch_32 &= 0x0000FFFF;
- scratch_32 |= value;
-
- pci_write_config_dword(chip->pdev,
- O2_SD_PLL_SETTING, scratch_32);
-}
-
static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
{
int ret;
@@ -284,92 +481,13 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
host->irq = pci_irq_vector(chip->pdev, 0);
}
-static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host)
-{
- ktime_t timeout;
- u32 scratch32;
-
- /* Wait max 50 ms */
- timeout = ktime_add_ms(ktime_get(), 50);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE);
- if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT
- == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT)
- break;
-
- if (timedout) {
- pr_err("%s: Card Detect debounce never finished.\n",
- mmc_hostname(host->mmc));
- sdhci_dumpregs(host);
- return;
- }
- udelay(10);
- }
-}
-
-static void sdhci_o2_enable_internal_clock(struct sdhci_host *host)
-{
- ktime_t timeout;
- u16 scratch;
- u32 scratch32;
-
- /* PLL software reset */
- scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
- scratch32 |= O2_PLL_SOFT_RESET;
- sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
- udelay(1);
- scratch32 &= ~(O2_PLL_SOFT_RESET);
- sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
-
- /* PLL force active */
- scratch32 |= O2_PLL_FORCE_ACTIVE;
- sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- while (1) {
- bool timedout = ktime_after(ktime_get(), timeout);
-
- scratch = sdhci_readw(host, O2_PLL_WDT_CONTROL1);
- if (scratch & O2_PLL_LOCK_STATUS)
- break;
- if (timedout) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- sdhci_dumpregs(host);
- goto out;
- }
- udelay(10);
- }
-
- /* Wait for card detect finish */
- udelay(1);
- sdhci_o2_wait_card_detect_stable(host);
-
-out:
- /* Cancel PLL force active */
- scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1);
- scratch32 &= ~O2_PLL_FORCE_ACTIVE;
- sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1);
-}
-
-static int sdhci_o2_get_cd(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
-
- sdhci_o2_enable_internal_clock(host);
-
- return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-}
-
static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk)
{
/* Enable internal clock */
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+ sdhci_o2_enable_internal_clock(host);
if (sdhci_o2_get_cd(host->mmc)) {
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -395,12 +513,14 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
+ struct o2_host *o2_host = sdhci_pci_priv(slot);
u32 reg, caps;
int ret;
chip = slot->chip;
host = slot->host;
+ o2_host->dll_adjust_count = 0;
caps = sdhci_readl(host, SDHCI_CAPABILITIES);
/*
@@ -432,7 +552,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
mmc_hostname(host->mmc));
host->flags &= ~SDHCI_SIGNALING_330;
host->flags |= SDHCI_SIGNALING_180;
- host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
host->mmc->caps2 |= MMC_CAP2_NO_SD;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
pci_write_config_dword(chip->pdev,
@@ -682,9 +801,11 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
const struct sdhci_pci_fixes sdhci_o2 = {
.probe = sdhci_pci_o2_probe,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
.probe_slot = sdhci_pci_o2_probe_slot,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_o2_resume,
#endif
.ops = &sdhci_pci_o2_ops,
+ .priv_size = sizeof(struct o2_host),
};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index cdd15f357d01..1abc9d47a4c0 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -54,6 +54,7 @@
#define PCI_DEVICE_ID_INTEL_EHL_SD 0x4b48
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
+#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d268b3b8850a..328b132bbe57 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -118,12 +118,10 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
size_t priv_size)
{
struct sdhci_host *host;
- struct resource *iomem;
void __iomem *ioaddr;
int irq, ret;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr)) {
ret = PTR_ERR(ioaddr);
goto err;
@@ -131,7 +129,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ number\n");
ret = irq;
goto err;
}
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 3ddecf479295..e55037ceda73 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
- return sdhci_runtime_resume_host(host);
+ return sdhci_runtime_resume_host(host, 0);
}
#endif
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 8e4a8ba33f05..51e096f27388 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -490,10 +490,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq specified\n");
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
if (IS_ERR(host)) {
@@ -611,6 +609,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
switch (pdata->max_width) {
case 8:
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ /* Fall through */
case 4:
host->mmc->caps |= MMC_CAP_4_BIT_DATA;
break;
@@ -745,7 +744,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
clk_prepare_enable(busclk);
if (ourhost->cur_clk >= 0)
clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
- ret = sdhci_runtime_resume_host(host);
+ ret = sdhci_runtime_resume_host(host, 0);
return ret;
}
#endif
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6ee340a3fb3a..d07b9793380f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
u32 div, val, mask;
- div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
- sdhci_enable_clk(host, clk);
+ div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+ div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ sdhci_enable_clk(host, div);
/* enable auto gate sdhc_enable_auto_gate */
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
return 1 << 31;
}
+static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
+{
+ return 0;
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
.set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
.hw_reset = sdhci_sprd_hw_reset,
.get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
+ .get_ro = sdhci_sprd_get_ro,
};
static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
}
static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MISSING_CAPS,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
- SDHCI_QUIRK2_USE_32BIT_BLK_CNT,
+ SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_sprd_ops,
};
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sdhci_enable_v4_mode(host);
+ /*
+ * Supply the existing CAPS, but clear the UHS-I modes. This
+ * will allow these modes to be specified only by device
+ * tree properties through mmc_of_parse().
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
@@ -624,6 +644,7 @@ err_cleanup_host:
sdhci_cleanup_host(host);
pm_runtime_disable:
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
@@ -695,7 +716,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
if (ret)
goto clk_disable;
- sdhci_runtime_resume_host(host);
+ sdhci_runtime_resume_host(host, 1);
return 0;
clk_disable:
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f4d4761cf20a..02d8f524bb9e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
}
}
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
+{
+ /*
+ * Write-enable shall be assumed if GPIO is missing in a board's
+ * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
+ * Tegra.
+ */
+ return mmc_gpio_get_ro(host->mmc);
+}
+
static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
};
static const struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
};
static const struct sdhci_ops tegra114_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
};
static const struct sdhci_ops tegra210_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_w = tegra210_sdhci_writew,
.write_l = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
};
static const struct sdhci_ops tegra186_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.set_clock = tegra_sdhci_set_clock,
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 8a18f14cf842..1dea1ba66f7b 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev)
priv->restore_needed = false;
}
- ret = sdhci_runtime_resume_host(host);
+ ret = sdhci_runtime_resume_host(host, 0);
if (ret)
goto out;
return 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 59acf8e3331e..4b297f397326 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -668,10 +668,10 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
/* 32-bit and 64-bit descriptors have these members in same position */
dma_desc->cmd = cpu_to_le16(cmd);
dma_desc->len = cpu_to_le16(len);
- dma_desc->addr_lo = cpu_to_le32((u32)addr);
+ dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
if (host->flags & SDHCI_USE_64_BIT_DMA)
- dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
+ dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
*desc += host->desc_sz;
}
@@ -816,6 +816,13 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
}
}
+static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
+{
+ sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
+}
+
static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
{
if (host->bounce_buffer)
@@ -826,13 +833,10 @@ static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
{
- if (host->v4_mode) {
- sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS);
- if (host->flags & SDHCI_USE_64_BIT_DMA)
- sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI);
- } else {
+ if (host->v4_mode)
+ sdhci_set_adma_addr(host, addr);
+ else
sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
- }
}
static unsigned int sdhci_target_timeout(struct sdhci_host *host,
@@ -1095,12 +1099,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->flags &= ~SDHCI_REQ_USE_DMA;
} else if (host->flags & SDHCI_USE_ADMA) {
sdhci_adma_table_pre(host, data, sg_cnt);
-
- sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
- if (host->flags & SDHCI_USE_64_BIT_DMA)
- sdhci_writel(host,
- (u64)host->adma_addr >> 32,
- SDHCI_ADMA_ADDRESS_HI);
+ sdhci_set_adma_addr(host, host->adma_addr);
} else {
WARN_ON(sg_cnt != 1);
sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
@@ -1636,8 +1635,8 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
+ /* Wait max 150 ms */
+ timeout = ktime_add_ms(ktime_get(), 150);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
@@ -1653,6 +1652,29 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
udelay(10);
}
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
+ clk |= SDHCI_CLOCK_PLL_EN;
+ clk &= ~SDHCI_CLOCK_INT_STABLE;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 150 ms */
+ timeout = ktime_add_ms(ktime_get(), 150);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: PLL clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+ }
+
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
@@ -1849,7 +1871,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- else if (timing == MMC_TIMING_UHS_SDR25)
+ else if (timing == MMC_TIMING_SD_HS ||
+ timing == MMC_TIMING_MMC_HS ||
+ timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (timing == MMC_TIMING_UHS_SDR50)
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
@@ -2120,11 +2144,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
pm_runtime_get_noresume(host->mmc->parent);
spin_lock_irqsave(&host->lock, flags);
- if (enable)
- host->flags |= SDHCI_SDIO_IRQ_ENABLED;
- else
- host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
-
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
@@ -2139,8 +2158,7 @@ static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
- sdhci_enable_sdio_irq_nolock(host, true);
+ sdhci_enable_sdio_irq_nolock(host, true);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2305,7 +2323,7 @@ void sdhci_reset_tuning(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
-static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
+void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
{
sdhci_reset_tuning(host);
@@ -2316,6 +2334,7 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
mmc_abort_tuning(host->mmc, opcode);
}
+EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
/*
* We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
@@ -3024,7 +3043,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
spin_lock(&host->lock);
- if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
+ if (host->runtime_suspended) {
spin_unlock(&host->lock);
return IRQ_NONE;
}
@@ -3320,7 +3339,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
-int sdhci_runtime_resume_host(struct sdhci_host *host)
+int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
{
struct mmc_host *mmc = host->mmc;
unsigned long flags;
@@ -3331,7 +3350,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
host->ops->enable_dma(host);
}
- sdhci_init(host, 0);
+ sdhci_init(host, soft_reset);
if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
mmc->ios.power_mode != MMC_POWER_OFF) {
@@ -3358,7 +3377,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
host->runtime_suspended = false;
/* Enable SDIO IRQ */
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ if (sdio_irq_claimed(mmc))
sdhci_enable_sdio_irq_nolock(host, true);
/* Enable Card Detection */
@@ -3565,7 +3584,8 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
return ret;
}
-void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
+ const u32 *caps, const u32 *caps1)
{
u16 v;
u64 dt_caps_mask = 0;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 89fd96596a1f..a29c4cd2d92e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -114,6 +114,7 @@
#define SDHCI_DIV_HI_MASK 0x300
#define SDHCI_PROG_CLOCK_MODE 0x0020
#define SDHCI_CLOCK_CARD_EN 0x0004
+#define SDHCI_CLOCK_PLL_EN 0x0008
#define SDHCI_CLOCK_INT_STABLE 0x0002
#define SDHCI_CLOCK_INT_EN 0x0001
@@ -511,7 +512,6 @@ struct sdhci_host {
#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
-#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */
@@ -738,8 +738,8 @@ static inline void *sdhci_priv(struct sdhci_host *host)
}
void sdhci_card_detect(struct sdhci_host *host);
-void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps,
- u32 *caps1);
+void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
+ const u32 *caps, const u32 *caps1);
int sdhci_setup_host(struct sdhci_host *host);
void sdhci_cleanup_host(struct sdhci_host *host);
int __sdhci_add_host(struct sdhci_host *host);
@@ -752,11 +752,6 @@ static inline void sdhci_read_caps(struct sdhci_host *host)
__sdhci_read_caps(host, NULL, NULL, NULL);
}
-static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
-{
- return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
-}
-
u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
@@ -781,7 +776,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
-int sdhci_runtime_resume_host(struct sdhci_host *host);
+int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
#endif
void sdhci_cqe_enable(struct mmc_host *mmc);
@@ -796,5 +791,6 @@ void sdhci_start_tuning(struct sdhci_host *host);
void sdhci_end_tuning(struct sdhci_host *host);
void sdhci_reset_tuning(struct sdhci_host *host);
void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
+void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode);
#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index e369cbf1ff02..f8b939e63e02 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -119,10 +119,8 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
u32 reg = 0;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "%s: no irq specified\n", __func__);
+ if (irq < 0)
return irq;
- }
host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8539e10784b4..93e83ad25976 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev)
host->mmc->f_max = pdata->hclk;
host->mmc->f_min = pdata->hclk / 512;
- pm_runtime_enable(&pdev->dev);
-
ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -193,7 +191,6 @@ host_remove:
tmio_mmc_host_remove(host);
host_free:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
cell_disable:
if (cell->disable)
cell->disable(pdev);
@@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev)
if (cell->disable)
cell->disable(pdev);
- pm_runtime_disable(&pdev->dev);
-
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c5ba13fae399..2f0b092d6dcc 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -163,6 +163,7 @@ struct tmio_mmc_host {
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
+ bool runtime_synced;
bool sdio_irq_enabled;
/* Mandatory callback */
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 2cb3f951c3e2..9b6e1001e77c 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-/**
- * tmio_mmc_host_probe() - Common probe for all implementations
- * @_host: Host to probe
- *
- * Perform tasks common to all implementations probe functions.
- *
- * The caller should have called pm_runtime_enable() prior to calling
- * the common probe function.
- */
int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
@@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
ret = mmc_add_host(mmc);
if (ret)
goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
+ pm_runtime_put(&pdev->dev);
return 0;
remove_host:
+ pm_runtime_put_noidle(&pdev->dev);
tmio_mmc_host_remove(_host);
return ret;
}
@@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
struct platform_device *pdev = host->pdev;
struct mmc_host *mmc = host->mmc;
+ pm_runtime_get_sync(&pdev->dev);
+
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
- if (!host->native_hotplug)
- pm_runtime_get_sync(&pdev->dev);
-
dev_pm_qos_hide_latency_limit(&pdev->dev);
mmc_remove_host(mmc);
@@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
tmio_mmc_release_dma(host);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (host->native_hotplug)
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
@@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
+ if (!host->runtime_synced) {
+ host->runtime_synced = true;
+ return 0;
+ }
+
tmio_mmc_clk_enable(host);
tmio_mmc_hw_reset(host->mmc);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 49aad9a79c18..0c72ec5546c3 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -557,10 +557,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
int irq, ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get IRQ number");
+ if (irq < 0)
return irq;
- }
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -631,7 +629,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
host->clk_disable = uniphier_sd_clk_disable;
host->set_clock = uniphier_sd_set_clock;
- pm_runtime_enable(&pdev->dev);
ret = uniphier_sd_clk_enable(host);
if (ret)
goto free_host;
@@ -653,7 +650,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
free_host:
tmio_mmc_host_free(host);
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -664,7 +660,6 @@ static int uniphier_sd_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
uniphier_sd_clk_disable(host);
- pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index cff6bbd226f5..a4d8968d133d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -1,5 +1,6 @@
menuconfig MTD_HYPERBUS
tristate "HyperBus support"
+ depends on HAS_IOMEM
select MTD_CFI
select MTD_MAP_BANK_WIDTH_2
select MTD_CFI_AMDSTD
@@ -14,8 +15,9 @@ if MTD_HYPERBUS
config HBMC_AM654
tristate "HyperBus controller driver for AM65x SoC"
+ depends on ARM64 || COMPILE_TEST
select MULTIPLEXER
- select MUX_MMIO
+ imply MUX_MMIO
help
This is the driver for HyperBus controller on TI's AM65x and
other SoCs
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
+ /* Fall through */
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index a1f8fe1abb10..e082d632fb74 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
+ /* fall through */
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 1622d3145587..8ca9fad6e6ad 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
(chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
return MICRON_ON_DIE_UNSUPPORTED;
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- if (!(id[4] & MICRON_ID_ECC_ENABLED))
- return MICRON_ON_DIE_UNSUPPORTED;
-
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 03cc788511d5..654bdc41fc99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
default:
/* Kept only for backward compatibility purpose. */
params->quad_enable = spansion_quad_enable;
- if (nor->clear_sr_bp)
- nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
break;
}
@@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
int err;
if (nor->clear_sr_bp) {
+ if (nor->quad_enable == spansion_quad_enable)
+ nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
+
err = nor->clear_sr_bp(nor);
if (err) {
dev_err(nor->dev,
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index d1271c1ee23c..1fb22388e7e0 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -405,17 +405,12 @@ int mux_control_deselect(struct mux_control *mux)
}
EXPORT_SYMBOL_GPL(mux_control_deselect);
-static int of_dev_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
struct device *dev;
- dev = class_find_device(&mux_class, NULL, np, of_dev_node_match);
+ dev = class_find_device_by_of_node(&mux_class, np);
return dev ? to_mux_chip(dev) : NULL;
}
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 11c5bad95226..14a5fb378145 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -363,10 +363,13 @@ static int __init arcrimi_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 3: /* Node ID */
node = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 28510e33924f..cd27fdc1059b 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -197,16 +197,22 @@ static int __init com20020isa_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_info("Too many arguments\n");
+ /* Fall through */
case 6: /* Timeout */
timeout = ints[6];
+ /* Fall through */
case 5: /* CKP value */
clockp = ints[5];
+ /* Fall through */
case 4: /* Backplane flag */
backplane = ints[4];
+ /* Fall through */
case 3: /* Node ID */
node = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 2c546013a980..186bbf87bc84 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -363,8 +363,10 @@ static int __init com90io_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index ca4a57c30bf8..bd75d06ad7df 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -693,10 +693,13 @@ static int __init com90xx_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 3: /* Mem address */
shmem = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9b7016abca2f..931d9d935686 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
bond_dev->mpls_features = mpls_features;
bond_dev->gso_max_segs = gso_max_segs;
@@ -2196,6 +2198,15 @@ static void bond_miimon_commit(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
+ /* For 802.3ad mode, check current slave speed and
+ * duplex again in case its port was disabled after
+ * invalid speed/duplex reporting but recovered before
+ * link monitoring could make a decision on the actual
+ * link status
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ slave->link == BOND_LINK_UP)
+ bond_3ad_adapter_speed_duplex_changed(slave);
continue;
case BOND_LINK_UP:
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 1d4075903971..c8e1a04ba384 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -898,7 +898,8 @@ static void at91_irq_err_state(struct net_device *dev,
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
- case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ /* fall through */
+ case CAN_STATE_ERROR_WARNING:
/*
* from: ERROR_ACTIVE, ERROR_WARNING
* to : ERROR_PASSIVE, BUS_OFF
@@ -947,7 +948,8 @@ static void at91_irq_err_state(struct net_device *dev,
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
- case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ /* fall through */
+ case CAN_STATE_ERROR_WARNING:
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
reg_ier = AT91_IRQ_ERRP;
break;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b6b93a2d93a5..483d270664cc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev)
return -EINVAL;
dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index f2fe344593d5..fcec8bcb53d6 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -400,9 +400,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
priv->write(reg_mcr, &regs->mcr);
}
-static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
+static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int ackval;
u32 reg_mcr;
reg_mcr = priv->read(&regs->mcr);
@@ -412,20 +413,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
/* enable stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+
+ /* get stop acknowledgment */
+ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+ ackval, ackval & (1 << priv->stm.ack_bit),
+ 0, FLEXCAN_TIMEOUT_US))
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
+static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int ackval;
u32 reg_mcr;
/* remove stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
+ /* get stop acknowledgment */
+ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+ ackval, !(ackval & (1 << priv->stm.ack_bit)),
+ 0, FLEXCAN_TIMEOUT_US))
+ return -ETIMEDOUT;
+
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
+
+ return 0;
}
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@@ -1437,10 +1455,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->stm.gpr = syscon_node_to_regmap(gpr_np);
- of_node_put(gpr_np);
if (IS_ERR(priv->stm.gpr)) {
dev_dbg(&pdev->dev, "could not find gpr regmap\n");
- return PTR_ERR(priv->stm.gpr);
+ ret = PTR_ERR(priv->stm.gpr);
+ goto out_put_node;
}
priv->stm.req_gpr = out_val[1];
@@ -1455,7 +1473,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
- return 0;
+out_put_node:
+ of_node_put(gpr_np);
+ return ret;
}
static const struct of_device_id flexcan_of_match[] = {
@@ -1612,7 +1632,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
*/
if (device_may_wakeup(device)) {
enable_irq_wake(dev->irq);
- flexcan_enter_stop_mode(priv);
+ err = flexcan_enter_stop_mode(priv);
+ if (err)
+ return err;
} else {
err = flexcan_chip_disable(priv);
if (err)
@@ -1662,10 +1684,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
if (netif_running(dev) && device_may_wakeup(device)) {
flexcan_enable_wakeup_irq(priv, false);
- flexcan_exit_stop_mode(priv);
+ err = flexcan_exit_stop_mode(priv);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 7f6a3b971da9..13b10cbf236a 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -660,7 +660,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd)
pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
PCIEFD_REG_CAN_CLK_SEL);
- /* fallthough */
+ /* fall through */
case CANFD_CLK_SEL_80MHZ:
priv->ucan.can.clock.freq = 80 * 1000 * 1000;
break;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 05410008aa6b..de34a4b82d4a 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* All packets processed */
if (num_pkts < quota) {
- napi_complete_done(napi, num_pkts);
- /* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
- RCANFD_RFCC_RFIE);
+ if (napi_complete_done(napi, num_pkts)) {
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ }
}
return num_pkts;
}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 185c7f7d38a4..5e0d5e8101c8 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 44e99e3d7134..12358f06d194 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
return regulator_disable(reg);
}
-static void mcp251x_open_clean(struct net_device *net)
-{
- struct mcp251x_priv *priv = netdev_priv(net);
- struct spi_device *spi = priv->spi;
-
- free_irq(spi->irq, priv);
- mcp251x_hw_sleep(spi);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
-}
-
static int mcp251x_stop(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
@@ -860,7 +849,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF)
priv->can.can_stats.error_warning++;
- case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ /* fall through */
+ case CAN_STATE_ERROR_WARNING:
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF)
priv->can.can_stats.error_passive++;
@@ -940,37 +930,43 @@ static int mcp251x_open(struct net_device *net)
flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
- goto open_unlock;
+ goto out_close;
}
priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clean;
+ }
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
ret = mcp251x_hw_reset(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_setup(net, spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_set_normal_mode(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
+ mutex_unlock(&priv->mcp_lock);
-open_unlock:
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+out_clean:
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
+out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
mutex_unlock(&priv->mcp_lock);
return ret;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 15ce5ad1d632..617da295b6c1 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- /* else: fall through */
+ /* fall through */
case CAN_STATE_ERROR_WARNING:
if (n & PCAN_USB_ERROR_BUS_HEAVY) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 458154c9b482..65dce642b86b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
dev->state &= ~PCAN_USB_STATE_STARTED;
netif_stop_queue(netdev);
+ close_candev(netdev);
+
+ dev->can.state = CAN_STATE_STOPPED;
+
/* unlink all pending urbs and free used memory */
peak_usb_unlink_all_urbs(dev);
if (dev->adapter->dev_stop)
dev->adapter->dev_stop(dev);
- close_candev(netdev);
-
- dev->can.state = CAN_STATE_STOPPED;
-
/* can set bus off now */
if (dev->adapter->dev_set_bus) {
int err = dev->adapter->dev_set_bus(dev, 0);
@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 34761c3a6286..47cc1ff5b88e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
goto err_out;
/* allocate command buffer once for all for the interface */
- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
GFP_KERNEL);
if (!pdev->cmd_buffer_addr)
goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 178bb7cff0c1..53cb2f72bdd0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
u8 *buffer;
int err;
- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3811fdbda13e..28c963a21dac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
state->interface != PHY_INTERFACE_MODE_MOCA) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- dev_err(ds->dev,
- "Unsupported interface: %d\n", state->interface);
+ if (port != core_readl(priv, CORE_IMP0_PRT_ID))
+ dev_err(ds->dev,
+ "Unsupported interface: %d for port %d\n",
+ state->interface, port);
return;
}
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
u32 id_mode_dis = 0, port_mode;
u32 reg, offset;
+ if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ return;
+
if (priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
else
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 5a9e27b337a8..098b01e4ed1a 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9897" },
{ .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9563" },
+ { .compatible = "microchip,ksz8563" },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index ee7096d8af07..72ec250b9540 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
{ \
+ .name = #width, \
.val_bits = (width), \
.reg_stride = (width) / 8, \
.reg_bits = (regbits) + (regalign), \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 6b17cd961d06..d0a97eb73a37 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -27,7 +27,6 @@
#include <linux/platform_data/mv88e6xxx.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
-#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/dsa.h>
@@ -430,7 +429,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
return 0;
/* Port's MAC control must not be changed unless the link is down */
- err = chip->info->ops->port_set_link(chip, port, 0);
+ err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
if (err)
return err;
@@ -482,30 +481,6 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
return port < chip->info->num_internal_phys;
}
-/* We expect the switch to perform auto negotiation if there is a real
- * phy. However, in the case of a fixed link phy, we force the port
- * settings from the fixed link settings.
- */
-static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (!phy_is_pseudo_fixed_link(phydev) &&
- mv88e6xxx_phy_is_internal(ds, port))
- return;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
- phydev->duplex, phydev->pause,
- phydev->interface);
- mv88e6xxx_reg_unlock(chip);
-
- if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
-}
-
static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
@@ -2721,6 +2696,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
+ of_node_put(child);
return err;
}
}
@@ -4638,7 +4614,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
- .adjust_link = mv88e6xxx_adjust_link,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_link_state,
.phylink_mac_config = mv88e6xxx_mac_config,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 232e8cc96f6d..16f15c93a102 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <john@phrozen.org>
*/
@@ -583,8 +583,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
- if (err)
+ if (err) {
+ of_node_put(port);
+ of_node_put(ports);
return err;
+ }
if (!dsa_is_user_port(priv->ds, reg))
continue;
@@ -595,6 +598,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
internal_mdio_mask |= BIT(reg);
}
+ of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
@@ -935,6 +939,8 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
qca8k_port_set_status(priv, port, 1);
priv->port_sts[port].enabled = 1;
+ phy_support_asym_pause(phy);
+
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 6bfb1696a6f2..9988c9d18567 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -277,6 +277,18 @@ sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
}
+static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
static void
sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -477,7 +489,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
/* SJA1105E/T: First generation */
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_L2_LOOKUP] = {
- .entry_packing = sja1105et_l2_lookup_entry_packing,
+ .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 32bf3a7cc3b6..df976b259e43 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -218,7 +218,7 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
/* This selects between Independent VLAN Learning (IVL) and
* Shared VLAN Learning (SVL)
*/
- .shared_learn = false,
+ .shared_learn = true,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -625,6 +625,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
+ of_node_put(child);
return -ENODEV;
}
@@ -634,6 +635,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
+ of_node_put(child);
return -ENODEV;
}
ports[index].phy_mode = phy_mode;
@@ -643,6 +645,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
+ of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1089,8 +1092,13 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
+ if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+ l2_lookup.mask_vlanid = 0;
+ l2_lookup.mask_iotag = 0;
+ }
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1147,8 +1155,13 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
+ if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+ l2_lookup.mask_vlanid = 0;
+ l2_lookup.mask_iotag = 0;
+ }
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1178,60 +1191,31 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- u16 rx_vid, tx_vid;
- int rc, i;
- if (dsa_port_is_vlan_filtering(&ds->ports[port]))
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
-
- /* Since we make use of VLANs even when the bridge core doesn't tell us
- * to, translate these FDB entries into the correct dsa_8021q ones.
- * The basic idea (also repeats for removal below) is:
- * - Each of the other front-panel ports needs to be able to forward a
- * pvid-tagged (aka tagged with their rx_vid) frame that matches this
- * DMAC.
- * - The CPU port (aka the tx_vid of this port) needs to be able to
- * send a frame matching this DMAC to the specified port.
- * For a better picture see net/dsa/tag_8021q.c.
+ /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
+ * so the switch still does some VLAN processing internally.
+ * But Shared VLAN Learning (SVL) is also active, and it will take
+ * care of autonomous forwarding between the unique pvid's of each
+ * port. Here we just make sure that users can't add duplicate FDB
+ * entries when in this mode - the actual VID doesn't matter except
+ * for what gets printed in 'bridge fdb show'. In the case of zero,
+ * no VID gets printed at all.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (i == port)
- continue;
- if (i == dsa_upstream_port(priv->ds, port))
- continue;
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ vid = 0;
- rx_vid = dsa_8021q_rx_vid(ds, i);
- rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid);
- if (rc < 0)
- return rc;
- }
- tx_vid = dsa_8021q_tx_vid(ds, port);
- return priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- u16 rx_vid, tx_vid;
- int rc, i;
- if (dsa_port_is_vlan_filtering(&ds->ports[port]))
- return priv->info->fdb_del_cmd(ds, port, addr, vid);
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ vid = 0;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (i == port)
- continue;
- if (i == dsa_upstream_port(priv->ds, port))
- continue;
-
- rx_vid = dsa_8021q_rx_vid(ds, i);
- rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid);
- if (rc < 0)
- return rc;
- }
- tx_vid = dsa_8021q_tx_vid(ds, port);
- return priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
@@ -1239,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
- u16 rx_vid, tx_vid;
int i;
- rx_vid = dsa_8021q_rx_vid(ds, port);
- tx_vid = dsa_8021q_tx_vid(ds, port);
-
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -1270,39 +1250,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- /* On SJA1105 E/T, the switch doesn't implement the LOCKEDS
- * bit, so it doesn't tell us whether a FDB entry is static
- * or not.
- * But, of course, we can find out - we're the ones who added
- * it in the first place.
- */
- if (priv->info->device_id == SJA1105E_DEVICE_ID ||
- priv->info->device_id == SJA1105T_DEVICE_ID) {
- int match;
-
- match = sja1105_find_static_fdb_entry(priv, port,
- &l2_lookup);
- l2_lookup.lockeds = (match >= 0);
- }
-
- /* We need to hide the dsa_8021q VLANs from the user. This
- * basically means hiding the duplicates and only showing
- * the pvid that is supposed to be active in standalone and
- * non-vlan_filtering modes (aka 1).
- * - For statically added FDB entries (bridge fdb add), we
- * can convert the TX VID (coming from the CPU port) into the
- * pvid and ignore the RX VIDs of the other ports.
- * - For dynamically learned FDB entries, a single entry with
- * no duplicates is learned - that which has the real port's
- * pvid, aka RX VID.
- */
- if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
- if (l2_lookup.vlanid == tx_vid ||
- l2_lookup.vlanid == rx_vid)
- l2_lookup.vlanid = 1;
- else
- continue;
- }
+ /* We need to hide the dsa_8021q VLANs from the user. */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
return 0;
@@ -1594,6 +1544,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
*/
static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -1622,6 +1573,28 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
+ /* VLAN filtering => independent VLAN learning.
+ * No VLAN filtering => shared VLAN learning.
+ *
+ * In shared VLAN learning mode, untagged traffic still gets
+ * pvid-tagged, and the FDB table gets populated with entries
+ * containing the "real" (pvid or from VLAN tag) VLAN ID.
+ * However the switch performs a masked L2 lookup in the FDB,
+ * effectively only looking up a frame's DMAC (and not VID) for the
+ * forwarding decision.
+ *
+ * This is extremely convenient for us, because in modes with
+ * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
+ * each front panel port. This is good for identification but breaks
+ * learning badly - the VID of the learnt FDB entry is unique, aka
+ * no frames coming from any other port are going to have it. So
+ * for forwarding purposes, this is as though learning was broken
+ * (all frames get flooded).
+ */
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+ l2_lookup_params->shared_learn = !enabled;
+
rc = sja1105_static_config_reload(priv);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1751,6 +1724,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
cancel_work_sync(&priv->tagger_data.rxtstamp_work);
skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ sja1105_ptp_clock_unregister(priv);
+ sja1105_static_config_free(&priv->static_config);
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
@@ -2208,9 +2183,7 @@ static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
- sja1105_ptp_clock_unregister(priv);
dsa_unregister_switch(priv->ds);
- sja1105_static_config_free(&priv->static_config);
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d19cfdf681af..d8e8dd59f3d1 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -369,16 +369,15 @@ int sja1105_ptp_clock_register(struct sja1105_private *priv)
.mult = SJA1105_CC_MULT,
};
mutex_init(&priv->ptp_lock);
- INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
-
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
-
priv->ptp_caps = sja1105_ptp_caps;
priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
if (IS_ERR_OR_NULL(priv->clock))
return PTR_ERR(priv->clock);
+ INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
+ schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+
return sja1105_ptp_reset(priv);
}
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2a3e2450968e..a9478577b495 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -12,8 +12,8 @@ config NET_VENDOR_8390
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Western Digital cards. If you say Y, you will be
- asked for your specific card in the following questions.
+ the questions about National Semiconductor 8390 cards. If you say Y,
+ you will be asked for your specific card in the following questions.
if NET_VENDOR_8390
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 93a2d4deb27c..dc9dee55976b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -151,7 +151,6 @@ config NET_NETX
To compile this driver as a module, choose M here. The module
will be called netx-eth.
-source "drivers/net/ethernet/nuvoton/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
source "drivers/net/ethernet/oki-semi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fb9155cffcff..4bc3c95562bf 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,7 +65,6 @@ obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
-obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index ea34bcb868b5..edbb4b3604c7 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2362,7 +2362,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Allocate memory for the TCB's (Transmit Control Block) */
tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
- GFP_ATOMIC | GFP_DMA);
+ GFP_KERNEL | GFP_DMA);
if (!tx_ring->tcb_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3434730a7699..0537df06a9b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -860,7 +860,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_clk_disable_unprepare;
}
- db->phy_node = of_parse_phandle(np, "phy", 0);
+ db->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!db->phy_node)
+ db->phy_node = of_parse_phandle(np, "phy", 0);
if (!db->phy_node) {
dev_err(&pdev->dev, "no associated PHY\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index de4950d2022e..9f965cdfff5c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -14,7 +14,7 @@ config NET_VENDOR_AMD
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
+ the kernel: saying N will just cause the configurator to skip all
the questions regarding AMD chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b41f23679a08..7ce9c69e9c44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
ret = xgbe_platform_init();
if (ret)
- return ret;
+ goto err_platform_init;
ret = xgbe_pci_init();
if (ret)
- return ret;
+ goto err_pci_init;
return 0;
+
+err_pci_init:
+ xgbe_platform_exit();
+err_platform_init:
+ unregister_netdevice_notifier(&xgbe_netdev_notifier);
+ return ret;
}
static void __exit xgbe_mod_exit(void)
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index fde7ae33e302..f78b9c841296 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -11,8 +11,8 @@ config NET_VENDOR_APPLE
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about IBM devices. If you say Y, you will be asked for
+ kernel: saying N will just cause the configurator to skip all the
+ questions about Apple devices. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 440690b18734..aee827f07c16 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
break;
}
- if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ if (rule && rule->type == aq_rx_filter_vlan &&
+ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
struct ethtool_rxnfc cmd;
cmd.fs.location = rule->aq_fsp.location;
@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
!(aq_nic->packet_filter & IFF_PROMISC));
aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 100722ad5c2d..b4a0fb281e69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
if (err < 0)
goto err_exit;
+ err = aq_filters_vlans_update(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e1392766e21e..8f66e7817811 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self)
self->aq_nic_cfg.link_irq_vec);
err = request_threaded_irq(irqvec, NULL,
aq_linkstate_threaded_isr,
- IRQF_SHARED,
+ IRQF_SHARED | IRQF_ONESHOT,
self->ndev->name, self);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 715685aa48c3..28892b8acd0e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
}
}
+err_exit:
if (!was_tx_cleaned)
work_done = budget;
@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
1U << self->aq_ring_param.vec_idx);
}
}
-err_exit:
+
return work_done;
}
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 8b69d0d7e726..6703960c7cf5 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1141,7 +1141,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
ring_size * AG71XX_DESC_SIZE,
- &tx->descs_dma, GFP_ATOMIC);
+ &tx->descs_dma, GFP_KERNEL);
if (!tx->descs_cpu) {
kfree(tx->buf);
tx->buf = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e9017caf024d..e24f5d2b6afe 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -14,9 +14,9 @@ config NET_VENDOR_BROADCOM
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
- the questions regarding AMD chipsets. If you say Y, you will be asked
- for your specific chipset/driver in the following questions.
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding Broadcom chipsets. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
if NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9c5cea8db16..9483553ce444 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -992,7 +992,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_priv *priv =
container_of(napi, struct bcm_sysport_priv, napi);
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
unsigned int work_done = 0;
work_done = bcm_sysport_desc_rx(priv, budget);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 656ed80647f0..d10b421ed1f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
sw_cons = txdata->tx_pkt_cons;
+ /* Ensure subsequent loads occur after hw_cons */
+ smp_rmb();
+
while (sw_cons != hw_cons) {
u16 pkt_cons;
@@ -1931,8 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) %
- (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3055,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
*/
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_clear_vlan_info(bp);
bnx2x_vfpf_close_vf(bp);
- else if (unload_mode != UNLOAD_RECOVERY)
+ } else if (unload_mode != UNLOAD_RECOVERY) {
/* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
- else {
+ } else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c2f6e44e9a3f..8b08cb18e363 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
/**
* bnx2x_sp_event - handle ramrods completion.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2cc14db8f0ec..192ff8d5da32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
static int bnx2x_del_all_vlans(struct bnx2x *bp)
{
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
unsigned long ramrod_flags = 0, vlan_flags = 0;
- struct bnx2x_vlan_entry *vlan;
int rc;
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
if (rc)
return rc;
- /* Mark that hw forgot all entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
+ bnx2x_clear_vlan_info(bp);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7134d2c3eb1c..8dce4069472b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
if (bnapi->events & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bnapi->events & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
bnapi->events = 0;
}
@@ -2136,7 +2136,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
}
}
if (bp->flags & BNXT_FLAG_DIM) {
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr->rx_packets,
@@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
+ bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
int i, rc = 0;
u32 type;
@@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (rc)
goto err_out;
bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (agg_rings) {
type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
ring->fw_ring_id);
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
@@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
bnxt_hwrm_vnic_set_rss(bp, i, false);
}
-static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
- bool irq_re_init)
+static void bnxt_clear_vnic(struct bnxt *bp)
{
- if (bp->vnic_info) {
- bnxt_hwrm_clear_vnic_filter(bp);
+ if (!bp->vnic_info)
+ return;
+
+ bnxt_hwrm_clear_vnic_filter(bp);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
- /* before free the vnic, undo the vnic tpa settings */
- if (bp->flags & BNXT_FLAG_TPA)
- bnxt_set_tpa(bp, false);
- bnxt_hwrm_vnic_free(bp);
}
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ bnxt_hwrm_vnic_ctx_free(bp);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ bnxt_clear_vnic(bp);
bnxt_hwrm_ring_free(bp, close_path);
bnxt_hwrm_ring_grp_free(bp);
if (irq_re_init) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 549c90d3e465..c05d663212b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
if (idx)
req->dimensions = cpu_to_le16(1);
- if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
memcpy(data_addr, buf, bytesize);
-
- rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message_silent(bp, msg, msg_len,
+ HWRM_CMD_TIMEOUT);
+ }
if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
memcpy(buf, data_addr, bytesize);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c7ee63d69679..8445a0cce849 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err)
- goto flash_pkg_exit;
-
- if (resp->error_code) {
+ if (hwrm_err) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
- if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ if (resp->error_code && error_code ==
+ NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
hwrm_err = _hwrm_send_message(bp, &install,
sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
- if (hwrm_err)
- goto flash_pkg_exit;
}
+ if (hwrm_err)
+ goto flash_pkg_exit;
}
if (resp->result) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 6fe4a7174271..dd621f6bd127 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
- flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
+ flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
}
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
goto free_node;
bnxt_tc_set_src_fid(bp, flow, src_fid);
-
- if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
- bnxt_tc_set_flow_dir(bp, flow, src_fid);
+ bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -EOPNOTSUPP;
@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
* 2. 15th bit of flow_handle must specify the flow
* direction (TX/RX).
*/
- if (flow_node->flow.dir == BNXT_DIR_RX)
+ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index ffec57d1a5ec..4f05305052f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
__be16 inner_vlan_tci;
__be16 ether_type;
u8 num_vlans;
+ u8 dir;
+#define BNXT_DIR_RX 1
+#define BNXT_DIR_TX 0
};
struct bnxt_tc_l3_key {
@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
- u8 dir;
-#define BNXT_DIR_RX 1
-#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a2b57807453b..b22196880d6d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.set_coalesce = bcmgenet_set_coalesce,
.get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* Power down the unimac, based on mode. */
@@ -1895,7 +1896,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
{
struct bcmgenet_rx_ring *ring = container_of(napi,
struct bcmgenet_rx_ring, napi);
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
unsigned int work_done;
work_done = bcmgenet_desc_rx(ring, budget);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5ca17e62dc3e..35b59b5edf0f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,emac", .data = &emac_config },
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
- { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config },
+ { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 73632b843749..b821c9e1604c 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
#include "cavium_ptp.h"
-#define DRV_NAME "Cavium PTP Driver"
+#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 032224178b64..6dd65f9b347c 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct,
}
oct->num_iqs++;
- if (oct->fn_list.enable_io_queues(oct))
+ if (oct->fn_list.enable_io_queues(oct)) {
+ octeon_delete_instr_queue(oct, iq_no);
return 1;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index ad22554857bf..acb016834f04 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1381,24 +1381,18 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- int ret;
+ u8 *addr;
- ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
- "mac-address", mac, ETH_ALEN);
- if (ret)
- goto out;
-
- if (!is_valid_ether_addr(mac)) {
+ addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
+ if (!addr) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
dev_info(dev, "MAC address set to: %pM\n", mac);
- memcpy(dst, mac, ETH_ALEN);
-out:
- return ret;
+ ether_addr_copy(dst, mac);
+ return 0;
}
/* Currently only sets the MAC address. */
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
index 20c09cc4b323..60aa45b375b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb/my3126.c
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -94,7 +94,7 @@ static int my3126_interrupt_handler(struct cphy *cphy)
return cphy_cause_link_change;
}
-static void my3216_poll(struct work_struct *work)
+static void my3126_poll(struct work_struct *work)
{
struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(struct net_device *dev,
return NULL;
cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
- INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
+ INIT_DELAYED_WORK(&cphy->phy_update, my3126_poll);
cphy->bmsr = 0;
return cphy;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 1e82b9efe447..58f89f6a040f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
- goto out_free_adapter;
+ goto out_free_adapter_nofail;
}
adapter->pdev = pdev;
@@ -3397,6 +3397,9 @@ out_free_dev:
if (adapter->port[i])
free_netdev(adapter->port[i]);
+out_free_adapter_nofail:
+ kfree_skb(adapter->nofail_skb);
+
out_free_adapter:
kfree(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 02959035ed3f..d692251ee252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
- if (err)
+ if (err) {
+ kvfree(t);
return err;
+ }
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
kvfree(t);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 67202b6f352e..4311ad9c84b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5561,7 +5561,6 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
char name[IFNAMSIZ];
u32 devcap2;
u16 flags;
- int pos;
/* If we want to instantiate Virtual Functions, then our
* parent bridge's PCI-E needs to support Alternative Routing
@@ -5569,9 +5568,8 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
* and above.
*/
pbridge = pdev->bus->self;
- pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
- pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
- pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+ pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
+ pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
!(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 312599c6b35a..e447976bdd3e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
- spin_lock_init(&new->lock);
+ if (new)
+ spin_lock_init(&new->lock);
return new;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 9dd5ed9a2965..f7fc553356f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7309,7 +7309,6 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
} else {
unsigned int pack_align;
unsigned int ingpad, ingpack;
- unsigned int pcie_cap;
/* T5 introduced the separation of the Free List Padding and
* Packing Boundaries. Thus, we can select a smaller Padding
@@ -7334,8 +7333,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* multiple of the Maximum Payload Size.
*/
pack_align = fl_align;
- pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
+ if (pci_is_pcie(adap->pdev)) {
unsigned int mps, mps_log;
u16 devctl;
@@ -7343,9 +7341,8 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
* [bits 7:5] encodes sizes as powers of 2 starting at
* 128 bytes.
*/
- pci_read_config_word(adap->pdev,
- pcie_cap + PCI_EXP_DEVCTL,
- &devctl);
+ pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
+ &devctl);
mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
mps = 1 << mps_log;
if (mps > pack_align)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ef5d61d57597..323976c811e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- spin_lock(&adapter->mcc_cq_lock);
+ spin_lock_bh(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
- spin_unlock(&adapter->mcc_cq_lock);
+ spin_unlock_bh(&adapter->mcc_cq_lock);
return status;
}
@@ -581,9 +581,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
- local_bh_disable();
status = be_process_mcc(adapter);
- local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b7a246b33599..4d8e40ac66d2 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4698,8 +4698,13 @@ int be_update_queues(struct be_adapter *adapter)
int status;
if (netif_running(netdev)) {
+ /* be_tx_timeout() must not run concurrently with this
+ * function, synchronize with an already-running dev_watchdog
+ */
+ netif_tx_lock_bh(netdev);
/* device cannot transmit now, avoid dev_watchdog timeouts */
netif_carrier_off(netdev);
+ netif_tx_unlock_bh(netdev);
be_close(netdev);
}
@@ -5625,9 +5630,7 @@ static void be_worker(struct work_struct *work)
* mcc completions
*/
if (!netif_running(adapter->netdev)) {
- local_bh_disable();
be_process_mcc(adapter);
- local_bh_enable();
goto reschedule;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 133acca0bf31..092da2d90026 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -167,7 +167,7 @@ struct nps_enet_priv {
};
/**
- * nps_reg_set - Sets ENET register with provided value.
+ * nps_enet_reg_set - Sets ENET register with provided value.
* @priv: Pointer to EZchip ENET private data structure.
* @reg: Register offset from base address.
* @value: Value to set in register.
@@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
}
/**
- * nps_reg_get - Gets value of specified ENET register.
+ * nps_enet_reg_get - Gets value of specified ENET register.
* @priv: Pointer to EZchip ENET private data structure.
* @reg: Register offset from base address.
*
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index ed0d010c7cf2..04a59db03f2b 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
physical function (PF) devices, managing ENETC Ports at a privileged
@@ -12,6 +13,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
virtual function (VF) devices enabled by the ENETC PF driver.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 2fd2586e42bf..bc594892507a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
if (n != 1) {
err = -EPERM;
- goto err_irq;
+ goto err_irq_vectors;
}
ptp_qoriq->irq = pci_irq_vector(pdev, 0);
@@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
err_no_clock:
free_irq(ptp_qoriq->irq, ptp_qoriq);
err_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_vectors:
iounmap(base);
err_ioremap:
kfree(ptp_qoriq);
@@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
+ pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
pci_release_mem_regions(pdev);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index e80fedb27cee..210749bf1eac 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2439,9 +2439,6 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* buffers when not using jumbo frames.
* Must be large enough to accommodate the network MTU, but small enough
* to avoid wasting skb memory.
- *
- * Could be overridden once, at boot-time, via the
- * fm_set_max_frm() callback.
*/
static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
module_param(fsl_fm_max_frm, int, 0);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92372dc43be8..ebc37e256922 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -31,9 +31,6 @@
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
dma_addr_t bus; /* the bus for the desc_ring */
- u32 cnt; /* free-running total number of completed packets */
- u32 fill_cnt; /* free-running total number of descriptors posted */
- u32 mask; /* masks the cnt to the size of the ring */
u8 seqno; /* the next expected seqno for this desc*/
};
@@ -60,8 +57,6 @@ struct gve_rx_data_queue {
dma_addr_t data_bus; /* dma mapping of the slots */
struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
- u32 mask; /* masks the cnt to the size of the ring */
- u32 cnt; /* free-running total number of completed packets */
};
struct gve_priv;
@@ -73,6 +68,9 @@ struct gve_rx_ring {
struct gve_rx_data_queue data;
u64 rbytes; /* free-running bytes received */
u64 rpackets; /* free-running packets received */
+ u32 cnt; /* free-running total number of completed packets */
+ u32 fill_cnt; /* free-running total number of descs and buffs posted */
+ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 26540b856541..d8fa816f4473 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -138,8 +138,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring];
- data[i++] = rx->desc.cnt;
- data[i++] = rx->desc.fill_cnt;
+ data[i++] = rx->cnt;
+ data[i++] = rx->fill_cnt;
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
u64_stats_fetch_begin(&priv->tx[ring].statss);
s->tx_packets += priv->tx[ring].pkt_done;
s->tx_bytes += priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 1914b8350da7..59564ac99d2a 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -37,7 +37,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->data.qpl = NULL;
kvfree(rx->data.page_info);
- slots = rx->data.mask + 1;
+ slots = rx->mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
@@ -64,7 +64,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
*/
- slots = rx->data.mask + 1;
+ slots = rx->mask + 1;
rx->data.page_info = kvzalloc(slots *
sizeof(*rx->data.page_info), GFP_KERNEL);
@@ -111,7 +111,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->q_num = idx;
slots = priv->rx_pages_per_qpl;
- rx->data.mask = slots - 1;
+ rx->mask = slots - 1;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -125,7 +125,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
err = -ENOMEM;
goto abort_with_slots;
}
- rx->desc.fill_cnt = filled_pages;
+ rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
dma_wmb();
@@ -156,8 +156,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->desc.mask = slots - 1;
- rx->desc.cnt = 0;
+ rx->mask = slots - 1;
+ rx->cnt = 0;
rx->desc.seqno = 1;
gve_rx_add_to_block(priv, idx);
@@ -213,7 +213,7 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
{
u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
- iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]);
+ iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
}
static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
@@ -273,7 +273,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
}
static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
- netdev_features_t feat)
+ netdev_features_t feat, u32 idx)
{
struct gve_rx_slot_page_info *page_info;
struct gve_priv *priv = rx->gve;
@@ -282,14 +282,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
struct sk_buff *skb;
int pagecount;
u16 len;
- u32 idx;
/* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
return true;
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
- idx = rx->data.cnt & rx->data.mask;
page_info = &rx->data.page_info[idx];
/* gvnic can only receive into registered segments. If the buffer
@@ -340,8 +338,6 @@ have_skb:
if (!skb)
return true;
- rx->data.cnt++;
-
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
if (rx_desc->csum)
@@ -370,7 +366,7 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx)
__be16 flags_seq;
u32 next_idx;
- next_idx = rx->desc.cnt & rx->desc.mask;
+ next_idx = rx->cnt & rx->mask;
desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq;
@@ -385,8 +381,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
{
struct gve_priv *priv = rx->gve;
struct gve_rx_desc *desc;
- u32 cnt = rx->desc.cnt;
- u32 idx = cnt & rx->desc.mask;
+ u32 cnt = rx->cnt;
+ u32 idx = cnt & rx->mask;
u32 work_done = 0;
u64 bytes = 0;
@@ -401,10 +397,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
- if (!gve_rx(rx, desc, feat))
+ if (!gve_rx(rx, desc, feat, idx))
gve_schedule_reset(priv);
cnt++;
- idx = cnt & rx->desc.mask;
+ idx = cnt & rx->mask;
desc = rx->desc.desc_ring + idx;
rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
work_done++;
@@ -417,8 +413,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->rpackets += work_done;
rx->rbytes += bytes;
u64_stats_update_end(&rx->statss);
- rx->desc.cnt = cnt;
- rx->desc.fill_cnt += work_done;
+ rx->cnt = cnt;
+ rx->fill_cnt += work_done;
/* restock desc ring slots */
dma_wmb(); /* Ensure descs are visible before ringing doorbell */
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d60452845539..c84167447abe 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -220,6 +220,7 @@ struct hip04_priv {
unsigned int reg_inten;
struct napi_struct napi;
+ struct device *dev;
struct net_device *ndev;
struct tx_desc *tx_desc;
@@ -248,7 +249,7 @@ struct hip04_priv {
static inline unsigned int tx_count(unsigned int head, unsigned int tail)
{
- return (head - tail) % (TX_DESC_NUM - 1);
+ return (head - tail) % TX_DESC_NUM;
}
static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -465,7 +466,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
}
if (priv->tx_phys[tx_tail]) {
- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
priv->tx_skb[tx_tail]->len,
DMA_TO_DEVICE);
priv->tx_phys[tx_tail] = 0;
@@ -516,8 +517,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys)) {
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -585,6 +586,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
u16 len;
u32 err;
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+
while (cnt && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
@@ -593,7 +597,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
goto refill;
}
- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[priv->rx_head] = 0;
@@ -622,9 +626,9 @@ refill:
buf = netdev_alloc_frag(priv->rx_buf_size);
if (!buf)
goto done;
- phys = dma_map_single(&ndev->dev, buf,
+ phys = dma_map_single(priv->dev, buf,
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
goto done;
priv->rx_buf[priv->rx_head] = buf;
priv->rx_phys[priv->rx_head] = phys;
@@ -645,8 +649,7 @@ refill:
}
napi_complete_done(napi, rx);
done:
- /* clean up tx descriptors and start a new timer if necessary */
- tx_remaining = hip04_tx_reclaim(ndev, false);
+ /* start a new timer if necessary */
if (rx < budget && tx_remaining)
hip04_start_tx_timer(priv);
@@ -728,9 +731,9 @@ static int hip04_mac_open(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
dma_addr_t phys;
- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
return -EIO;
priv->rx_phys[i] = phys;
@@ -764,7 +767,7 @@ static int hip04_mac_stop(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
if (priv->rx_phys[i]) {
- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[i] = 0;
}
@@ -907,6 +910,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->dev = d;
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index bb6586d0e5af..ed3829ae4ef1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -754,17 +754,11 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
return (void *)misc_op;
}
-static int hns_dsaf_dev_match(struct device *dev, const void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
struct
platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
{
struct device *dev;
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_dsaf_dev_match);
+ dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
return dev ? to_platform_device(dev) : NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 8ad5292eebbe..75329ab775a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -43,7 +43,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
- HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0a7243825e7b..85a3b062371d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
.reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
.reset_level = HNAE3_GLOBAL_RESET },
{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
.reset_level = HNAE3_GLOBAL_RESET },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a38ac7cfe16b..690b9990215c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -304,7 +304,7 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HLCGE_MBX_PUSH_VLAN_INFO, vfid);
+ HCLGE_MBX_PUSH_VLAN_INFO, vfid);
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index f60b80bd605e..6a96987bd8f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -204,7 +204,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
- case HLCGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -307,7 +307,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
- case HLCGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_VLAN_INFO:
state = le16_to_cpu(msg_q[1]);
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 4138a8480347..cca71ba7a74a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3251,7 +3251,7 @@ static int ehea_mem_notifier(struct notifier_block *nb,
switch (action) {
case MEM_CANCEL_OFFLINE:
pr_info("memory offlining canceled");
- /* Fall through: re-add canceled memory block */
+ /* Fall through - re-add canceled memory block */
case MEM_ONLINE:
pr_info("memory is going online");
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d654c234aaf7..c5be4ebd8437 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
- unsigned int *mcastFilterSize_p;
+ __be32 *mcastFilterSize_p;
long ret;
unsigned long ret_attr;
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
- VETH_MCAST_FILTER_SIZE, NULL);
+ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE,
+ NULL);
if (!mcastFilterSize_p) {
dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
"attribute\n");
@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3da680073265..5cb55ea671e3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
(u64)tx_buff->indir_dma,
(u64)num_entries);
+ dma_unmap_single(dev, tx_buff->indir_dma,
+ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1981,6 +1983,13 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
while (rwi) {
+ if (adapter->state == VNIC_REMOVING ||
+ adapter->state == VNIC_REMOVED) {
+ kfree(rwi);
+ rc = EBUSY;
+ break;
+ }
+
if (adapter->force_reset_recovery) {
adapter->force_reset_recovery = false;
rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2788,7 +2797,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
- u8 *first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2826,6 @@ restart_loop:
txbuff->data_dma[j] = 0;
}
- /* if sub_crq was sent indirectly */
- first = &txbuff->indir_arr[0].generic.first;
- if (*first == IBMVNIC_CRQ_CMD) {
- dma_unmap_single(dev, txbuff->indir_dma,
- sizeof(txbuff->indir_arr),
- DMA_TO_DEVICE);
- *first = 0;
- }
if (txbuff->last_frag) {
dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 93f3b4e6185b..aa9323e55406 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3912,13 +3912,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
+ if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
- pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+ pcie_capability_read_word(adapter->pdev, reg, value);
return IGC_SUCCESS;
}
@@ -3926,13 +3924,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
+ if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
- pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+ pcie_capability_write_word(adapter->pdev, reg, *value);
return IGC_SUCCESS;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbaf712d6529..51c696b6bf64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
#include <net/vxlan.h>
#include <net/mpls.h>
#include <net/xdp_sock.h>
+#include <net/xfrm.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -2621,7 +2622,7 @@ adjust_by_size:
/* 16K ints/sec to 9.2K ints/sec */
avg_wire_size *= 15;
avg_wire_size += 11452;
- } else if (avg_wire_size <= 1980) {
+ } else if (avg_wire_size < 1968) {
/* 9.2K ints/sec to 8K ints/sec */
avg_wire_size *= 5;
avg_wire_size += 22420;
@@ -2654,6 +2655,8 @@ adjust_by_size:
case IXGBE_LINK_SPEED_2_5GB_FULL:
case IXGBE_LINK_SPEED_1GB_FULL:
case IXGBE_LINK_SPEED_10_FULL:
+ if (avg_wire_size > 8064)
+ avg_wire_size = 8064;
itr += DIV_ROUND_UP(avg_wire_size,
IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -7897,11 +7900,8 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
- rtnl_lock();
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
unregister_netdev(adapter->netdev);
- rtnl_unlock();
- }
ixgbe_service_event_complete(adapter);
return;
}
@@ -8698,7 +8698,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC
- if (secpath_exists(skb) &&
+ if (xfrm_offload(skb) &&
!ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 6b609553329f..a3b6d8c89127 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -633,19 +633,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- u32 i = tx_ring->next_to_clean, xsk_frames = 0;
- unsigned int budget = q_vector->tx.work_limit;
struct xdp_umem *umem = tx_ring->xsk_umem;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
- bool xmit_done;
+ u32 xsk_frames = 0;
- tx_bi = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC(tx_ring, i);
- i -= tx_ring->count;
+ tx_bi = &tx_ring->tx_buffer_info[ntc];
+ tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
- do {
+ while (ntc != ntu) {
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
@@ -661,22 +659,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
tx_bi++;
tx_desc++;
- i++;
- if (unlikely(!i)) {
- i -= tx_ring->count;
+ ntc++;
+ if (unlikely(ntc == tx_ring->count)) {
+ ntc = 0;
tx_bi = tx_ring->tx_buffer_info;
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
}
/* issue prefetch for next Tx descriptor */
prefetch(tx_desc);
+ }
- /* update budget accounting */
- budget--;
- } while (likely(budget));
-
- i += tx_ring->count;
- tx_ring->next_to_clean = i;
+ tx_ring->next_to_clean = ntc;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
@@ -688,8 +682,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
- xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
- return budget > 0 && xmit_done;
+ return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d2b41f9f87f8..72872d6ca80c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -30,6 +30,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/atomic.h>
+#include <net/xfrm.h>
#include "ixgbevf.h"
@@ -4161,7 +4162,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
- if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index f660cc2b8258..0b9e851f3da4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -319,20 +319,33 @@ static int orion_mdio_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->smi_busy_wait);
- for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
- dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
- if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ if (pdev->dev.of_node) {
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
+
+ if (!IS_ERR(of_clk_get(pdev->dev.of_node,
+ ARRAY_SIZE(dev->clk))))
+ dev_warn(&pdev->dev,
+ "unsupported number of clocks, limiting to the first "
+ __stringify(ARRAY_SIZE(dev->clk)) "\n");
+ } else {
+ dev->clk[0] = clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_clk;
}
- if (IS_ERR(dev->clk[i]))
- break;
- clk_prepare_enable(dev->clk[i]);
+ if (!IS_ERR(dev->clk[0]))
+ clk_prepare_enable(dev->clk[0]);
}
- if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk))))
- dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first "
- __stringify(ARRAY_SIZE(dev->clk)) "\n");
dev->err_interrupt = platform_get_irq(pdev, 0);
if (dev->err_interrupt > 0 &&
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index c51f1d5b550b..ccdd47f3b8fb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -811,6 +811,26 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
return 0;
}
+static void mvpp2_set_hw_csum(struct mvpp2_port *port,
+ enum mvpp2_bm_pool_log_num new_long_pool)
+{
+ const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Update L4 checksum when jumbo enable/disable on port.
+ * Only port 0 supports hardware checksum offload due to
+ * the Tx FIFO size limitation.
+ * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
+ * has 7 bits, so the maximum L3 offset is 128.
+ */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ port->dev->features &= ~csums;
+ port->dev->hw_features &= ~csums;
+ } else {
+ port->dev->features |= csums;
+ port->dev->hw_features |= csums;
+ }
+}
+
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -843,15 +863,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
/* Add port to new short & long pool */
mvpp2_swf_bm_pool_init(port);
- /* Update L4 checksum when jumbo enable/disable on port */
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
- } else {
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- }
+ mvpp2_set_hw_csum(port, new_long_pool);
}
dev->mtu = mtu;
@@ -3700,6 +3712,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
+ bool running = netif_running(dev);
int err;
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3708,40 +3721,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
- if (!netif_running(dev)) {
- err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- return 0;
- }
-
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
+ if (running)
+ mvpp2_stop_dev(port);
err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
+ if (err) {
+ netdev_err(dev, "failed to change MTU\n");
+ /* Reconfigure BM to the original MTU */
+ mvpp2_bm_update_mtu(dev, dev->mtu);
+ } else {
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- goto out_start;
}
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
-
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
+ if (running) {
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ }
- return 0;
-log_error:
- netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -4739,9 +4736,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
else
ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
if (old_ctrl0 != ctrl0)
writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
@@ -5208,10 +5205,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->features |= NETIF_F_NTUPLE;
}
- if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- }
+ mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -5759,9 +5753,6 @@ static int mvpp2_remove(struct platform_device *pdev)
mvpp2_dbgfs_cleanup(priv);
- flush_workqueue(priv->stats_queue);
- destroy_workqueue(priv->stats_queue);
-
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5770,6 +5761,8 @@ static int mvpp2_remove(struct platform_device *pdev)
i++;
}
+ destroy_workqueue(priv->stats_queue);
+
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f518312ffe69..e0363870f3a5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,20 @@ static const struct dmi_system_id msi_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
},
},
+ {
+ .ident = "ASUS P6T",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+ },
+ },
+ {
+ .ident = "ASUS P6X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 263cd0909fe0..1f7fff81f24d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -9,7 +9,6 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
- depends on NET_VENDOR_MEDIATEK
select PHYLIB
---help---
This driver supports the gigabit ethernet MACs in the
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6c01314e87b0..db3552f2d087 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
- goto rss_err;
+ goto qp_alloc_err;
}
rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1241,6 +1241,7 @@ indir_err:
MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
kfree(rss_map->indir_qp);
rss_map->indir_qp = NULL;
rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 1f6e16d5ea6b..309470ec0219 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_dev_port(dev, i, &port_cap)) {
mlx4_err(dev,
- "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
+ "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
} else if ((dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_DEFAULT) &&
(port_cap.dmfs_optimized_state ==
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 5bb6a26ea267..50862275544e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
struct mlx5_interface *intf;
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
+ list_for_each_entry_reverse(intf, &intf_list, list)
mlx5_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 79d93d6c7d7a..65bec19a438f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -159,7 +159,7 @@ do { \
enum mlx5e_rq_group {
MLX5E_RQ_GROUP_REGULAR,
MLX5E_RQ_GROUP_XSK,
- MLX5E_NUM_RQ_GROUPS /* Keep last. */
+#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
};
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -182,18 +182,15 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
-/* Use this function to get max num channels after netdev was created */
-static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
-{
- return min_t(unsigned int,
- netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS,
- netdev->num_tx_queues);
-}
-
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ union {
+ struct {
+ struct mlx5_wqe_eth_seg eth;
+ struct mlx5_wqe_data_seg data[0];
+ };
+ u8 tls_progress_params_ctx[0];
+ };
};
struct mlx5e_rx_wqe_ll {
@@ -830,6 +827,7 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ u16 max_nch;
u8 max_opened_tc;
struct hwtstamp_config tstamp;
u16 q_counter;
@@ -871,6 +869,7 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
int max_tc;
+ u8 rq_groups;
};
void mlx5e_build_ptys2ethtool_map(void);
@@ -1106,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash);
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index bd882b5ee9a7..3a615d663d84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -66,9 +66,10 @@ static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
*group = qid / nch;
}
-static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid)
+static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
+ struct mlx5e_params *params, u64 qid)
{
- return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS;
+ return qid < params->num_channels * profile->rq_groups;
}
/* Parameter calculations */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index d5e5afbdca6d..f777994f3005 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
};
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size)
+ const u32 **arr, u32 *size,
+ bool force_legacy)
{
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
@@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
const u32 *table;
@@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
speed = table[i];
@@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5e_port_eth_proto eproto;
+ bool force_legacy = false;
bool ext;
int err;
@@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
goto out;
-
- *speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
+ if (ext && !eproto.admin) {
+ force_legacy = true;
+ err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
+ if (err)
+ goto out;
+ }
+ *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
if (!(*speed))
err = -EINVAL;
@@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, table[i]);
@@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
return 0;
}
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy)
{
u32 link_modes = 0;
const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
for (i = 0; i < max_size; ++i) {
if (table[i] == speed)
link_modes |= MLX5E_PROT_MASK(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 70f536ec51c4..4a7f4497692b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext);
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index f3d98748b211..c7f86453c638 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
u8 state;
int err;
- if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- return 0;
-
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
sq->sqn, err);
- return err;
+ goto out;
}
- if (state != MLX5_SQC_STATE_ERR) {
- netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
- return -EINVAL;
- }
+ if (state != MLX5_SQC_STATE_ERR)
+ goto out;
mlx5e_tx_disable_queue(sq->txq);
err = mlx5e_wait_for_sq_flush(sq);
if (err)
- return err;
+ goto out;
/* At this point, no new packets will arrive from the stack as TXQ is
* marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
err = mlx5e_sq_to_ready(sq, state);
if (err)
- return err;
+ goto out;
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq);
return 0;
+out:
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ return err;
}
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index aaffa6f68dc0..7f78c004d12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
+
+ spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
+ spin_unlock(&c->xskicosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 407da83474ef..b7298f9ee3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,12 +11,14 @@
#include "accel/tls.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params))
+ (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
+ MLX5_ST_SZ_BYTES(tls_static_params))
#define MLX5E_KTLS_STATIC_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_PROGRESS_WQE_SZ \
- (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params))
+ (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
+ MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index ea032f54197e..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- cseg->imm = cpu_to_be32(priv_tx->tisn);
+ cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
static void
fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
- MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn);
+ MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state,
@@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
PROGRESS_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- fill_progress_params_ctx(wqe->data, priv_tx);
+ fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
u16 pi, u8 num_wqebbs,
- skb_frag_t *resync_dump_frag)
+ skb_frag_t *resync_dump_frag,
+ u32 num_bytes)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
wi->skb = NULL;
wi->num_wqebbs = num_wqebbs;
wi->resync_dump_frag = resync_dump_frag;
+ wi->num_bytes = num_bytes;
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
}
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
}
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
}
+struct mlx5e_dump_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_data_seg data;
+};
+
static int
tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
skb_frag_t *frag, u32 tisn, bool first)
{
struct mlx5_wqe_ctrl_seg *cseg;
- struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
- struct mlx5e_tx_wqe *wqe;
+ struct mlx5e_dump_wqe *wqe;
dma_addr_t dma_addr = 0;
- u16 ds_cnt, ds_cnt_inl;
u8 num_wqebbs;
- u16 pi, ihs;
+ u16 ds_cnt;
int fsz;
-
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- ds_cnt += 1; /* one frag */
+ u16 pi;
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
cseg = &wqe->ctrl;
- eseg = &wqe->eth;
- dseg = wqe->data;
+ dseg = &wqe->data;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- cseg->imm = cpu_to_be32(tisn);
+ cseg->tisn = cpu_to_be32(tisn << 8);
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
- dseg += ds_cnt_inl;
-
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- tx_fill_wi(sq, pi, num_wqebbs, frag);
+ tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
sq->pc += num_wqebbs;
WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- tx_fill_wi(sq, pi, 1, NULL);
+ tx_fill_wi(sq, pi, 1, NULL, 0);
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
@@ -412,7 +408,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
goto out;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
+ if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
@@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
priv_tx->expected_seq = seq + datalen;
cseg = &(*wqe)->ctrl;
- cseg->imm = cpu_to_be32(priv_tx->tisn);
+ cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 8657e0f26995..2c75b2752f58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
return &arfs_t->rules_hash[bucket_idx];
}
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) ?
- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
@@ -602,31 +596,9 @@ out:
arfs_may_expire_flow(priv);
}
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->dest;
- return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->source;
- return ((struct udphdr *)transport_header)->source;
-}
-
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
- const struct sk_buff *skb,
+ const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
- tuple->etype = skb->protocol;
+ tuple->etype = fk->basic.n_proto;
+ tuple->ip_proto = fk->basic.ip_proto;
if (tuple->etype == htons(ETH_P_IP)) {
- tuple->src_ipv4 = ip_hdr(skb)->saddr;
- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ tuple->src_ipv4 = fk->addrs.v4addrs.src;
+ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
} else {
- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
sizeof(struct in6_addr));
- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr));
}
- tuple->ip_proto = arfs_get_ip_proto(skb);
- tuple->src_port = arfs_get_src_port(skb);
- tuple->dst_port = arfs_get_dst_port(skb);
+ tuple->src_port = fk->ports.src;
+ tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
return rule;
}
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
- const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
{
- if (tuple->etype == htons(ETH_P_IP) &&
- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
- return true;
- if (tuple->etype == htons(ETH_P_IPV6) &&
- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
- sizeof(struct in6_addr))) &&
- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
- sizeof(struct in6_addr))))
- return true;
+ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+ return false;
+ if (tuple->etype != fk->basic.n_proto)
+ return false;
+ if (tuple->etype == htons(ETH_P_IP))
+ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+ if (tuple->etype == htons(ETH_P_IPV6))
+ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
- const struct sk_buff *skb)
+ const struct flow_keys *fk)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
- __be16 src_port = arfs_get_src_port(skb);
- __be16 dst_port = arfs_get_dst_port(skb);
- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
hlist_for_each_entry(arfs_rule, head, hlist) {
- if (arfs_rule->tuple.src_port == src_port &&
- arfs_rule->tuple.dst_port == dst_port &&
- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ if (arfs_cmp(&arfs_rule->tuple, fk))
return arfs_rule;
- }
}
return NULL;
@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
+ struct flow_keys fk;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
- arfs_rule = arfs_find_rule(arfs_t, skb);
+ arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
arfs_rule->rxq = rxq_index;
} else {
- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
- rxq_index, flow_id);
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 126ec4181286..20e628c907e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -391,7 +391,7 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
{
mutex_lock(&priv->state_lock);
- ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
+ ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
if (priv->xsk.refcnt) {
/* The upper half are XSK queues. */
@@ -785,7 +785,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
}
static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper,
+ u32 eth_proto_oper, bool force_legacy,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -795,7 +795,7 @@ static void get_speed_duplex(struct net_device *netdev,
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
+ speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
speed = SPEED_UNKNOWN;
goto out;
@@ -914,8 +914,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
/* Fields: eth_proto_admin and ext_eth_proto_admin are
* mutually exclusive. Hence try reading legacy advertising
* when extended advertising is zero.
- * admin_ext indicates how eth_proto_admin should be
- * interpreted
+ * admin_ext indicates which proto_admin (ext vs. legacy)
+ * should be read and interpreted
*/
admin_ext = ext;
if (ext && !eth_proto_admin) {
@@ -924,7 +924,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
admin_ext = false;
}
- eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
@@ -939,7 +939,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
+ get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
+ link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1016,45 +1017,77 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
+static bool ext_link_mode_requested(const unsigned long *adver)
+{
+#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
+ int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+
+ bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
+ return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static bool ext_speed_requested(u32 speed)
+{
+#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
+ return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
+}
+
+static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
+{
+ bool ext_link_mode = ext_link_mode_requested(adver);
+ bool ext_speed = ext_speed_requested(speed);
+
+ return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
+}
+
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_port_eth_proto eproto;
+ const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
- bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
+ u8 autoneg;
u32 speed;
+ bool ext;
int err;
u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
-#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
+ adver = link_ksettings->link_modes.advertising;
+ autoneg = link_ksettings->base.autoneg;
+ speed = link_ksettings->base.speed;
- ext_requested = !!(link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT ||
- link_ksettings->link_modes.advertising[1]);
+ ext = ext_requested(autoneg, adver, speed),
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ext_requested &= ext_supported;
+ if (!ext_supported && ext)
+ return -EOPNOTSUPP;
- speed = link_ksettings->base.speed;
- ethtool2ptys_adver_func = ext_requested ?
- mlx5e_ethtool2ptys_ext_adver_link :
+ ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
- link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
- ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) :
- mlx5e_port_speed2linkmodes(mdev, speed);
+ link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
+ mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
@@ -1067,14 +1100,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
&an_disable_admin);
- an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+ an_disable = autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
mlx5_toggle_port_link(mdev);
out:
@@ -1313,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EOPNOTSUPP;
+
if (pauseparam->autoneg)
return -EINVAL;
@@ -1654,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
+ struct ethtool_flash *flash)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->netdev;
+ const struct firmware *fw;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&fw, flash->data, &dev->dev);
+ if (err)
+ return err;
+
+ dev_hold(dev);
+ rtnl_unlock();
+
+ err = mlx5_firmware_flash(mdev, fw, NULL);
+ release_firmware(fw);
+
+ rtnl_lock();
+ dev_put(dev);
+ return err;
+}
+
+static int mlx5e_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
@@ -1936,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .flash_device = mlx5e_flash_device,
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ea3a490b569a..94304abc49e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -611,7 +611,8 @@ static int validate_flow(struct mlx5e_priv *priv,
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
- if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie))
+ if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
+ fs->ring_cookie))
return -EINVAL;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47eea6b3a1c3..9d5f6e56188f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -331,12 +331,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
- struct mlx5e_wqe_frag_info next_frag, *prev;
+ struct mlx5e_wqe_frag_info next_frag = {};
+ struct mlx5e_wqe_frag_info *prev = NULL;
int i;
next_frag.di = &rq->wqe.di[0];
- next_frag.offset = 0;
- prev = NULL;
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -1322,7 +1321,6 @@ err_free_txqsq:
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
- clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
@@ -1677,10 +1675,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
- int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ int err, tc;
for (tc = 0; tc < params->num_tc; tc++) {
- int txq_ix = c->ix + tc * max_nch;
+ int txq_ix = c->ix + tc * priv->max_nch;
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
@@ -2438,11 +2436,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int err;
int ix;
- for (ix = 0; ix < max_nch; ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
if (unlikely(err))
goto err_destroy_rqts;
@@ -2460,10 +2457,9 @@ err_destroy_rqts:
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
mlx5e_destroy_rqt(priv, &tirs[i].rqt);
}
@@ -2557,7 +2553,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
struct mlx5e_redirect_rqt_param direct_rrp = {
.is_rss = false,
{
@@ -2758,7 +2754,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
goto free_in;
}
- for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -2858,12 +2854,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, tc;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
for (tc = 0; tc < priv->profile->max_tc; tc++)
- priv->channel_tc2txq[i][tc] = i + tc * max_nch;
+ priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
}
static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
@@ -2884,7 +2879,7 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
int num_txqs = priv->channels.num * priv->channels.params.num_tc;
- int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS;
+ int num_rxqs = priv->channels.num * priv->profile->rq_groups;
struct net_device *netdev = priv->netdev;
mlx5e_netdev_set_tcs(netdev);
@@ -3306,7 +3301,6 @@ err_destroy_inner_tirs:
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
@@ -3319,7 +3313,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
if (!in)
return -ENOMEM;
- for (ix = 0; ix < max_nch; ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
memset(in, 0, inlen);
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
@@ -3358,10 +3352,9 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
mlx5e_destroy_tir(priv->mdev, &tirs[i]);
}
@@ -3487,7 +3480,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
- for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -4960,8 +4953,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
return err;
mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev),
- netdev->mtu);
+ priv->max_nch, netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -5164,6 +5156,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
};
/* mlx5e generic netdev management API (move to en_common.c) */
@@ -5181,6 +5174,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
mutex_init(&priv->state_lock);
@@ -5218,7 +5212,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
- nch * MLX5E_NUM_RQ_GROUPS);
+ nch * profile->rq_groups);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7245d287633d..d0684fdb69e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -735,8 +735,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- block_cb = flow_block_cb_alloc(f->net,
- mlx5e_rep_indr_setup_block_cb,
+ block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
indr_priv, indr_priv,
mlx5e_rep_indr_tc_block_unbind);
if (IS_ERR(block_cb)) {
@@ -753,7 +752,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (!indr_priv)
return -ENOENT;
- block_cb = flow_block_cb_lookup(f,
+ block_cb = flow_block_cb_lookup(f->block,
mlx5e_rep_indr_setup_block_cb,
indr_priv);
if (!block_cb)
@@ -1702,6 +1701,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1719,6 +1719,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 539b4d3656da..57f9f346d213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -172,7 +172,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
memset(s, 0, sizeof(*s));
- for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -1395,7 +1395,7 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ int max_nch = priv->max_nch;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -1409,8 +1409,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
bool is_xsk = priv->xsk.ever_used;
+ int max_nch = priv->max_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1452,8 +1452,8 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
bool is_xsk = priv->xsk.ever_used;
+ int max_nch = priv->max_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cc096f6011d9..00b2d4a86159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1230,13 +1230,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
- u64 bytes, packets, lastuse = 0;
struct mlx5e_tc_flow *flow;
struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
struct neighbour *n;
+ u64 lastuse;
if (m_neigh->family == AF_INET)
tbl = &arp_tbl;
@@ -1256,7 +1256,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
encaps[efi->index]);
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5e_tc_get_counter(flow);
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
break;
@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
- u8 *match_level, u8 *tunnel_match_level)
+ u8 *inner_match_level, u8 *outer_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
+ u8 *match_level;
- *match_level = MLX5_MATCH_NONE;
+ match_level = outer_match_level;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_META) |
@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mlx5e_get_tc_tun(filter_dev)) {
- if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev,
+ outer_match_level))
return -EOPNOTSUPP;
- /* In decap flow, header pointers should point to the inner
+ /* At this point, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
*/
+ match_level = inner_match_level;
headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
spec);
headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
+ u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
struct mlx5_eswitch_rep *rep;
int err;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
+ inner_match_level = MLX5_MATCH_NONE;
+ outer_match_level = MLX5_MATCH_NONE;
+
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
+ &outer_match_level);
+ non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
+ outer_match_level : inner_match_level;
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
- esw->offloads.inline_mode < match_level)) {
+ esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
- match_level, esw->offloads.inline_mode);
+ non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- flow->esw_attr->match_level = match_level;
- flow->esw_attr->tunnel_match_level = tunnel_match_level;
+ flow->esw_attr->inner_match_level = inner_match_level;
+ flow->esw_attr->outer_match_level = outer_match_level;
} else {
- flow->nic_attr->match_level = match_level;
+ flow->nic_attr->match_level = non_tunnel_match_level;
}
return err;
@@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
esw_attr->parse_attr = parse_attr;
esw_attr->chain = f->common.chain_index;
- esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+ esw_attr->prio = f->common.prio;
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c50b6f0769c8..49b06b256c92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
@@ -61,7 +61,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a38e8a3c7c9a..04685dbb280c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
- u8 match_level;
- u8 tunnel_match_level;
+ u8 inner_match_level;
+ u8 outer_match_level;
struct mlx5_fc *counter;
u32 chain;
u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 089ae4d48a82..0323fd078271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
- if (attr->tunnel_match_level != MLX5_MATCH_NONE)
- spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- if (attr->match_level != MLX5_MATCH_NONE)
- spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- } else if (attr->match_level != MLX5_MATCH_NONE) {
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- }
+ if (attr->inner_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
- if (attr->match_level != MLX5_MATCH_NONE)
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c48c382f926f..c1252d6be0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -68,7 +68,7 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
- FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
};
enum fs_flow_table_op_mod {
@@ -275,7 +275,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b3762123a69c..1834d9f3aa1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -369,6 +369,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
}
EXPORT_SYMBOL(mlx5_fc_query);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
+{
+ return counter->cache.lastuse;
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
data_size = crdump_size - offset;
else
data_size = MLX5_CR_DUMP_CHUNK_SIZE;
- err = devlink_fmsg_binary_put(fmsg, cr_data, data_size);
+ err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
+ data_size);
if (err)
goto free_data;
}
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
+ fatal_error = check_fatal_sensors(dev);
+
+ if (fatal_error && !health->fatal_error) {
+ mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+ dev->priv.health.fatal_error = fatal_error;
+ print_health_info(dev);
+ mlx5_trigger_health_work(dev);
+ goto out;
+ }
+
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
if (health->synd && health->synd != prev_synd)
queue_work(health->wq, &health->report_work);
- fatal_error = check_fatal_sensors(dev);
-
- if (fatal_error && !health->fatal_error) {
- mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
- dev->priv.health.fatal_error = fatal_error;
- print_health_info(dev);
- mlx5_trigger_health_work(dev);
- }
-
out:
mod_timer(&health->timer, get_next_poll_jiffies());
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ebd81f6b556e..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
return mlx5e_ethtool_get_ts_info(priv, info);
}
+static int mlx5i_flash_device(struct net_device *netdev,
+ struct ethtool_flash *flash)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ return mlx5e_ethtool_flash_device(priv, flash);
+}
+
enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_1X = 1 << 0,
MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam,
+ .flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 6bfaaab362dc..1a2560e3bf7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -88,8 +88,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
netdev->mtu = netdev->max_mtu;
mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev),
- netdev->mtu);
+ priv->max_nch, netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -118,11 +117,10 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < max_nch; i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -436,6 +434,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 6e56fa769d2e..c5a491e22e55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -355,6 +355,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea9ee88491e5..ea1d4d26ece0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
case 128:
general_obj_key_size =
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
+ key_p += sz_bytes;
break;
case 256:
general_obj_key_size =
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4d34d42b3b0e..eda9c23e87b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1604,14 +1604,14 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
bool register_block = false;
int err;
- block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_lookup(f->block,
+ mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb) {
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
if (!acl_block)
return -ENOMEM;
- block_cb = flow_block_cb_alloc(f->net,
- mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp, acl_block,
mlxsw_sp_tc_block_flower_release);
if (IS_ERR(block_cb)) {
@@ -1657,7 +1657,8 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_cb *block_cb;
int err;
- block_cb = flow_block_cb_lookup(f, mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_lookup(f->block,
+ mlxsw_sp_setup_tc_block_cb_flower,
mlxsw_sp);
if (!block_cb)
return;
@@ -1680,7 +1681,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
- tc_setup_cb_t *cb;
+ flow_setup_cb_t *cb;
bool ingress;
int err;
@@ -1702,7 +1703,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
&mlxsw_sp_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(f->net, cb, mlxsw_sp_port,
+ block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
mlxsw_sp_port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@@ -1718,7 +1719,7 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
f, ingress);
- block_cb = flow_block_cb_lookup(f, cb, mlxsw_sp_port);
+ block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
if (!block_cb)
return -ENOENT;
@@ -6329,7 +6330,7 @@ static int __init mlxsw_sp_module_init(void)
return 0;
err_sp2_pci_driver_register:
- mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 131f62ce9297..6664119fb0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -951,4 +951,8 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_nve_vxlan.c */
+int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index e8ac90564dbe..84a87d059333 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority >> 16;
+ rulei->priority = priority;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 1537f70bc26d..888ba4300bcc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752
+#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
+#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 1df164a4b06d..17f334b46c40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -775,6 +775,7 @@ static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
ops->fini(nve);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
+ memset(&nve->config, 0, sizeof(nve->config));
}
nve->num_nve_tunnels--;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 0035640156a1..12f664f42f21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -29,6 +29,7 @@ struct mlxsw_sp_nve {
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
u32 tunnel_index;
u16 ul_rif_index; /* Reserved for Spectrum */
+ unsigned int inc_parsing_depth_refs;
};
struct mlxsw_sp_nve_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 93ccd9fc2266..05517c7feaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -103,9 +103,9 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->udp_dport = cfg->dst_port;
}
-static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
- unsigned int parsing_depth,
- __be16 udp_dport)
+static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int parsing_depth,
+ __be16 udp_dport)
{
char mprs_pl[MLXSW_REG_MPRS_LEN];
@@ -113,6 +113,56 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
}
+static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ?
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH :
+ MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH;
+
+ return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport);
+}
+
+static int
+__mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ int err;
+
+ mlxsw_sp->nve->inc_parsing_depth_refs++;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
+ if (err)
+ goto err_nve_parsing_set;
+ return 0;
+
+err_nve_parsing_set:
+ mlxsw_sp->nve->inc_parsing_depth_refs--;
+ return err;
+}
+
+static void
+__mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ mlxsw_sp->nve->inc_parsing_depth_refs--;
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
+}
+
+int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp)
+{
+ __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
+
+ return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport);
+}
+
+void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp)
+{
+ __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
+
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport);
+}
+
static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
@@ -176,9 +226,7 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
int err;
- err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
- MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
- config->udp_dport);
+ err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
if (err)
return err;
@@ -203,8 +251,7 @@ err_promote_decap:
err_rtdp_set:
mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
err_config_set:
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
return err;
}
@@ -216,8 +263,7 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto, &config->ul_sip);
mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
}
static int
@@ -320,9 +366,7 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
int err;
- err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
- MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
- config->udp_dport);
+ err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
if (err)
return err;
@@ -348,8 +392,7 @@ err_promote_decap:
err_rtdp_set:
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
err_config_set:
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
return err;
}
@@ -361,8 +404,7 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto, &config->ul_sip);
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
}
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index bd9c2bc2d5d6..38bb1cfe4e8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -29,7 +29,7 @@
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
- struct rhashtable unmatched_ht;
+ struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
@@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key {
struct mlxsw_sp1_ptp_unmatched {
struct mlxsw_sp1_ptp_key key;
- struct rhash_head ht_node;
+ struct rhlist_head ht_node;
struct rcu_head rcu;
struct sk_buff *skb;
u64 timestamp;
@@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
* error.
*/
-static struct mlxsw_sp1_ptp_unmatched *
+static int
mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb,
@@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp1_ptp_unmatched *conflict;
+ int err;
unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
if (!unmatched)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
- conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
- if (conflict)
+ err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ if (err)
kfree(unmatched);
- return conflict;
+ return err;
}
static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp1_ptp_key key)
+ struct mlxsw_sp1_ptp_key key, int *p_length)
{
- return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
+ struct rhlist_head *tmp, *list;
+ int length = 0;
+
+ list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ mlxsw_sp1_ptp_unmatched_ht_params);
+ rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
+ last = unmatched;
+ length++;
+ }
+
+ *p_length = length;
+ return last;
}
static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
}
/* This function is called in the following scenarios:
@@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
- struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict;
+ struct mlxsw_sp1_ptp_unmatched *unmatched;
+ int length;
int err;
rcu_read_lock();
- unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
-
spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
- if (unmatched) {
- /* There was an unmatched entry when we looked, but it may have
- * been removed before we took the lock.
- */
- err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
- if (err)
- unmatched = NULL;
- }
-
- if (!unmatched) {
- /* We have no unmatched entry, but one may have been added after
- * we looked, but before we took the lock.
- */
- unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
- skb, timestamp);
- if (IS_ERR(unmatched)) {
- if (skb)
- mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
- key.local_port,
- key.ingress, NULL);
- unmatched = NULL;
- } else if (unmatched) {
- /* Save just told us, under lock, that the entry is
- * there, so this has to work.
- */
- err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
- unmatched);
- WARN_ON_ONCE(err);
- }
- }
-
- /* If unmatched is non-NULL here, it comes either from the lookup, or
- * from the save attempt above. In either case the entry was removed
- * from the hash table. If unmatched is NULL, a new unmatched entry was
- * added to the hash table, and there was no conflict.
- */
-
+ unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
unmatched->skb = skb;
} else if (timestamp && unmatched && unmatched->skb) {
unmatched->timestamp = timestamp;
- } else if (unmatched) {
- /* unmatched holds an older entry of the same type: either an
- * skb if we are handling skb, or a timestamp if we are handling
- * timestamp. We can't match that up, so save what we have.
+ } else {
+ /* Either there is no entry to match, or one that is there is
+ * incompatible.
*/
- conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
- skb, timestamp);
- if (IS_ERR(conflict)) {
- if (skb)
- mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
- key.local_port,
- key.ingress, NULL);
- } else {
- /* Above, we removed an object with this key from the
- * hash table, under lock, so conflict can not be a
- * valid pointer.
- */
- WARN_ON_ONCE(conflict);
- }
+ if (length < 100)
+ err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
+ skb, timestamp);
+ else
+ err = -E2BIG;
+ if (err && skb)
+ mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
+ key.local_port,
+ key.ingress, NULL);
+ unmatched = NULL;
+ }
+
+ if (unmatched) {
+ err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
+ WARN_ON_ONCE(err);
}
spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
@@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
local_bh_disable();
spin_lock(&ptp_state->unmatched_lock);
- err = rhashtable_remove_fast(&ptp_state->unmatched_ht,
- &unmatched->ht_node,
- mlxsw_sp1_ptp_unmatched_ht_params);
+ err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
+ mlxsw_sp1_ptp_unmatched_ht_params);
spin_unlock(&ptp_state->unmatched_lock);
if (err)
@@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
- rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter);
+ rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
rhashtable_walk_start(&iter);
while ((obj = rhashtable_walk_next(&iter))) {
if (IS_ERR(obj))
@@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
spin_lock_init(&ptp_state->unmatched_lock);
- err = rhashtable_init(&ptp_state->unmatched_ht,
- &mlxsw_sp1_ptp_unmatched_ht_params);
+ err = rhltable_init(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_ht_params);
if (err)
goto err_hashtable_init;
@@ -891,7 +863,7 @@ err_fifo_clr:
err_mtptpt1_set:
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
err_mtptpt_set:
- rhashtable_destroy(&ptp_state->unmatched_ht);
+ rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
@@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
- rhashtable_free_and_destroy(&ptp_state->unmatched_ht,
- &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
+ rhltable_free_and_destroy(&ptp_state->unmatched_ht,
+ &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
}
@@ -979,6 +951,9 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
+ u16 orig_ing_types = 0;
+ u16 orig_egr_types = 0;
+ int err;
int i;
/* MTPPPC configures timestamping globally, not per port. Find the
@@ -986,12 +961,26 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
*/
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
tmp = mlxsw_sp->ports[i];
+ if (tmp) {
+ orig_ing_types |= tmp->ptp.ing_types;
+ orig_egr_types |= tmp->ptp.egr_types;
+ }
if (tmp && tmp != mlxsw_sp_port) {
ing_types |= tmp->ptp.ing_types;
egr_types |= tmp->ptp.egr_types;
}
}
+ if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
+ err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
+ return err;
+ }
+ }
+ if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
+ mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp);
+
return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
ing_types, egr_types);
}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 90a8c6bead56..b9c4d48e28e4 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -6,8 +6,7 @@
config NET_VENDOR_MICREL
bool "Micrel devices"
default y
- depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \
- (ARM && ARCH_KS8695)
+ depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,14 +17,6 @@ config NET_VENDOR_MICREL
if NET_VENDOR_MICREL
-config ARM_KS8695_ETHER
- tristate "KS8695 Ethernet support"
- depends on ARM && ARCH_KS8695
- select MII
- ---help---
- If you wish to compile a kernel for the KS8695 and want to
- use the internal ethernet then you should answer Y to this.
-
config KS8842
tristate "Micrel KSZ8841/42 with generic bus interface"
depends on HAS_IOMEM && DMA_ENGINE
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index 848fc1c5a5dc..6d8ac5527aef 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -3,7 +3,6 @@
# Makefile for the Micrel network device drivers.
#
-obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
deleted file mode 100644
index 1390ef5323a2..000000000000
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ /dev/null
@@ -1,1632 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Micrel KS8695 (Centaur) Ethernet.
- *
- * Copyright 2008 Simtec Electronics
- * Daniel Silverstone <dsilvers@simtec.co.uk>
- * Vincent Sanders <vince@simtec.co.uk>
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/interrupt.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <asm/irq.h>
-
-#include <mach/regs-switch.h>
-#include <mach/regs-misc.h>
-#include <asm/mach/irq.h>
-#include <mach/regs-irq.h>
-
-#include "ks8695net.h"
-
-#define MODULENAME "ks8695_ether"
-#define MODULEVERSION "1.02"
-
-/*
- * Transmit and device reset timeout, default 5 seconds.
- */
-static int watchdog = 5000;
-
-/* Hardware structures */
-
-/**
- * struct rx_ring_desc - Receive descriptor ring element
- * @status: The status of the descriptor element (E.g. who owns it)
- * @length: The number of bytes in the block pointed to by data_ptr
- * @data_ptr: The physical address of the data block to receive into
- * @next_desc: The physical address of the next descriptor element.
- */
-struct rx_ring_desc {
- __le32 status;
- __le32 length;
- __le32 data_ptr;
- __le32 next_desc;
-};
-
-/**
- * struct tx_ring_desc - Transmit descriptor ring element
- * @owner: Who owns the descriptor
- * @status: The number of bytes in the block pointed to by data_ptr
- * @data_ptr: The physical address of the data block to receive into
- * @next_desc: The physical address of the next descriptor element.
- */
-struct tx_ring_desc {
- __le32 owner;
- __le32 status;
- __le32 data_ptr;
- __le32 next_desc;
-};
-
-/**
- * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
- * @skb: The buffer in the ring
- * @dma_ptr: The mapped DMA pointer of the buffer
- * @length: The number of bytes mapped to dma_ptr
- */
-struct ks8695_skbuff {
- struct sk_buff *skb;
- dma_addr_t dma_ptr;
- u32 length;
-};
-
-/* Private device structure */
-
-#define MAX_TX_DESC 8
-#define MAX_TX_DESC_MASK 0x7
-#define MAX_RX_DESC 16
-#define MAX_RX_DESC_MASK 0xf
-
-/*napi_weight have better more than rx DMA buffers*/
-#define NAPI_WEIGHT 64
-
-#define MAX_RXBUF_SIZE 0x700
-
-#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
-#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
-#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
-
-/**
- * enum ks8695_dtype - Device type
- * @KS8695_DTYPE_WAN: This device is a WAN interface
- * @KS8695_DTYPE_LAN: This device is a LAN interface
- * @KS8695_DTYPE_HPNA: This device is an HPNA interface
- */
-enum ks8695_dtype {
- KS8695_DTYPE_WAN,
- KS8695_DTYPE_LAN,
- KS8695_DTYPE_HPNA,
-};
-
-/**
- * struct ks8695_priv - Private data for the KS8695 Ethernet
- * @in_suspend: Flag to indicate if we're suspending/resuming
- * @ndev: The net_device for this interface
- * @dev: The platform device object for this interface
- * @dtype: The type of this device
- * @io_regs: The ioremapped registers for this interface
- * @napi : Add support NAPI for Rx
- * @rx_irq_name: The textual name of the RX IRQ from the platform data
- * @tx_irq_name: The textual name of the TX IRQ from the platform data
- * @link_irq_name: The textual name of the link IRQ from the
- * platform data if available
- * @rx_irq: The IRQ number for the RX IRQ
- * @tx_irq: The IRQ number for the TX IRQ
- * @link_irq: The IRQ number for the link IRQ if available
- * @regs_req: The resource request for the registers region
- * @phyiface_req: The resource request for the phy/switch region
- * if available
- * @phyiface_regs: The ioremapped registers for the phy/switch if available
- * @ring_base: The base pointer of the dma coherent memory for the rings
- * @ring_base_dma: The DMA mapped equivalent of ring_base
- * @tx_ring: The pointer in ring_base of the TX ring
- * @tx_ring_used: The number of slots in the TX ring which are occupied
- * @tx_ring_next_slot: The next slot to fill in the TX ring
- * @tx_ring_dma: The DMA mapped equivalent of tx_ring
- * @tx_buffers: The sk_buff mappings for the TX ring
- * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
- * @rx_ring: The pointer in ring_base of the RX ring
- * @rx_ring_dma: The DMA mapped equivalent of rx_ring
- * @rx_buffers: The sk_buff mappings for the RX ring
- * @next_rx_desc_read: The next RX descriptor to read from on IRQ
- * @rx_lock: A lock to protect Rx irq function
- * @msg_enable: The flags for which messages to emit
- */
-struct ks8695_priv {
- int in_suspend;
- struct net_device *ndev;
- struct device *dev;
- enum ks8695_dtype dtype;
- void __iomem *io_regs;
-
- struct napi_struct napi;
-
- const char *rx_irq_name, *tx_irq_name, *link_irq_name;
- int rx_irq, tx_irq, link_irq;
-
- struct resource *regs_req, *phyiface_req;
- void __iomem *phyiface_regs;
-
- void *ring_base;
- dma_addr_t ring_base_dma;
-
- struct tx_ring_desc *tx_ring;
- int tx_ring_used;
- int tx_ring_next_slot;
- dma_addr_t tx_ring_dma;
- struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
- spinlock_t txq_lock;
-
- struct rx_ring_desc *rx_ring;
- dma_addr_t rx_ring_dma;
- struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
- int next_rx_desc_read;
- spinlock_t rx_lock;
-
- int msg_enable;
-};
-
-/* Register access */
-
-/**
- * ks8695_readreg - Read from a KS8695 ethernet register
- * @ksp: The device to read from
- * @reg: The register to read
- */
-static inline u32
-ks8695_readreg(struct ks8695_priv *ksp, int reg)
-{
- return readl(ksp->io_regs + reg);
-}
-
-/**
- * ks8695_writereg - Write to a KS8695 ethernet register
- * @ksp: The device to write to
- * @reg: The register to write
- * @value: The value to write to the register
- */
-static inline void
-ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
-{
- writel(value, ksp->io_regs + reg);
-}
-
-/* Utility functions */
-
-/**
- * ks8695_port_type - Retrieve port-type as user-friendly string
- * @ksp: The device to return the type for
- *
- * Returns a string indicating which of the WAN, LAN or HPNA
- * ports this device is likely to represent.
- */
-static const char *
-ks8695_port_type(struct ks8695_priv *ksp)
-{
- switch (ksp->dtype) {
- case KS8695_DTYPE_LAN:
- return "LAN";
- case KS8695_DTYPE_WAN:
- return "WAN";
- case KS8695_DTYPE_HPNA:
- return "HPNA";
- }
-
- return "UNKNOWN";
-}
-
-/**
- * ks8695_update_mac - Update the MAC registers in the device
- * @ksp: The device to update
- *
- * Updates the MAC registers in the KS8695 device from the address in the
- * net_device structure associated with this interface.
- */
-static void
-ks8695_update_mac(struct ks8695_priv *ksp)
-{
- /* Update the HW with the MAC from the net_device */
- struct net_device *ndev = ksp->ndev;
- u32 machigh, maclow;
-
- maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
- (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
- machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
-
- ks8695_writereg(ksp, KS8695_MAL, maclow);
- ks8695_writereg(ksp, KS8695_MAH, machigh);
-
-}
-
-/**
- * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
- * @ksp: The device to refill
- *
- * Iterates the RX ring of the device looking for empty slots.
- * For each empty slot, we allocate and map a new SKB and give it
- * to the hardware.
- * This can be called from interrupt context safely.
- */
-static void
-ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
-{
- /* Run around the RX ring, filling in any missing sk_buff's */
- int buff_n;
-
- for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
- if (!ksp->rx_buffers[buff_n].skb) {
- struct sk_buff *skb =
- netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
- dma_addr_t mapping;
-
- ksp->rx_buffers[buff_n].skb = skb;
- if (skb == NULL) {
- /* Failed to allocate one, perhaps
- * we'll try again later.
- */
- break;
- }
-
- mapping = dma_map_single(ksp->dev, skb->data,
- MAX_RXBUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
- /* Failed to DMA map this SKB, try later */
- dev_kfree_skb_irq(skb);
- ksp->rx_buffers[buff_n].skb = NULL;
- break;
- }
- ksp->rx_buffers[buff_n].dma_ptr = mapping;
- ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
-
- /* Record this into the DMA ring */
- ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
- ksp->rx_ring[buff_n].length =
- cpu_to_le32(MAX_RXBUF_SIZE);
-
- wmb();
-
- /* And give ownership over to the hardware */
- ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
- }
- }
-}
-
-/* Maximum number of multicast addresses which the KS8695 HW supports */
-#define KS8695_NR_ADDRESSES 16
-
-/**
- * ks8695_init_partial_multicast - Init the mcast addr registers
- * @ksp: The device to initialise
- * @addr: The multicast address list to use
- * @nr_addr: The number of addresses in the list
- *
- * This routine is a helper for ks8695_set_multicast - it writes
- * the additional-address registers in the KS8695 ethernet device
- * and cleans up any others left behind.
- */
-static void
-ks8695_init_partial_multicast(struct ks8695_priv *ksp,
- struct net_device *ndev)
-{
- u32 low, high;
- int i;
- struct netdev_hw_addr *ha;
-
- i = 0;
- netdev_for_each_mc_addr(ha, ndev) {
- /* Ran out of space in chip? */
- BUG_ON(i == KS8695_NR_ADDRESSES);
-
- low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
- (ha->addr[4] << 8) | (ha->addr[5]);
- high = (ha->addr[0] << 8) | (ha->addr[1]);
-
- ks8695_writereg(ksp, KS8695_AAL_(i), low);
- ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
- i++;
- }
-
- /* Clear the remaining Additional Station Addresses */
- for (; i < KS8695_NR_ADDRESSES; i++) {
- ks8695_writereg(ksp, KS8695_AAL_(i), 0);
- ks8695_writereg(ksp, KS8695_AAH_(i), 0);
- }
-}
-
-/* Interrupt handling */
-
-/**
- * ks8695_tx_irq - Transmit IRQ handler
- * @irq: The IRQ which went off (ignored)
- * @dev_id: The net_device for the interrupt
- *
- * Process the TX ring, clearing out any transmitted slots.
- * Allows the net_device to pass us new packets once slots are
- * freed.
- */
-static irqreturn_t
-ks8695_tx_irq(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct ks8695_priv *ksp = netdev_priv(ndev);
- int buff_n;
-
- for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
- if (ksp->tx_buffers[buff_n].skb &&
- !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
- rmb();
- /* An SKB which is not owned by HW is present */
- /* Update the stats for the net_device */
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
-
- /* Free the packet from the ring */
- ksp->tx_ring[buff_n].data_ptr = 0;
-
- /* Free the sk_buff */
- dma_unmap_single(ksp->dev,
- ksp->tx_buffers[buff_n].dma_ptr,
- ksp->tx_buffers[buff_n].length,
- DMA_TO_DEVICE);
- dev_consume_skb_irq(ksp->tx_buffers[buff_n].skb);
- ksp->tx_buffers[buff_n].skb = NULL;
- ksp->tx_ring_used--;
- }
- }
-
- netif_wake_queue(ndev);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
- * @ksp: Private data for the KS8695 Ethernet
- *
- * For KS8695 document:
- * Interrupt Enable Register (offset 0xE204)
- * Bit29 : WAN MAC Receive Interrupt Enable
- * Bit16 : LAN MAC Receive Interrupt Enable
- * Interrupt Status Register (Offset 0xF208)
- * Bit29: WAN MAC Receive Status
- * Bit16: LAN MAC Receive Status
- * So, this Rx interrupt enable/status bit number is equal
- * as Rx IRQ number.
- */
-static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
-{
- return ksp->rx_irq;
-}
-
-/**
- * ks8695_rx_irq - Receive IRQ handler
- * @irq: The IRQ which went off (ignored)
- * @dev_id: The net_device for the interrupt
- *
- * Inform NAPI that packet reception needs to be scheduled
- */
-
-static irqreturn_t
-ks8695_rx_irq(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- spin_lock(&ksp->rx_lock);
-
- if (napi_schedule_prep(&ksp->napi)) {
- unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
- unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
- /*disable rx interrupt*/
- status &= ~mask_bit;
- writel(status , KS8695_IRQ_VA + KS8695_INTEN);
- __napi_schedule(&ksp->napi);
- }
-
- spin_unlock(&ksp->rx_lock);
- return IRQ_HANDLED;
-}
-
-/**
- * ks8695_rx - Receive packets called by NAPI poll method
- * @ksp: Private data for the KS8695 Ethernet
- * @budget: Number of packets allowed to process
- */
-static int ks8695_rx(struct ks8695_priv *ksp, int budget)
-{
- struct net_device *ndev = ksp->ndev;
- struct sk_buff *skb;
- int buff_n;
- u32 flags;
- int pktlen;
- int received = 0;
-
- buff_n = ksp->next_rx_desc_read;
- while (received < budget
- && ksp->rx_buffers[buff_n].skb
- && (!(ksp->rx_ring[buff_n].status &
- cpu_to_le32(RDES_OWN)))) {
- rmb();
- flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
-
- /* Found an SKB which we own, this means we
- * received a packet
- */
- if ((flags & (RDES_FS | RDES_LS)) !=
- (RDES_FS | RDES_LS)) {
- /* This packet is not the first and
- * the last segment. Therefore it is
- * a "spanning" packet and we can't
- * handle it
- */
- goto rx_failure;
- }
-
- if (flags & (RDES_ES | RDES_RE)) {
- /* It's an error packet */
- ndev->stats.rx_errors++;
- if (flags & RDES_TL)
- ndev->stats.rx_length_errors++;
- if (flags & RDES_RF)
- ndev->stats.rx_length_errors++;
- if (flags & RDES_CE)
- ndev->stats.rx_crc_errors++;
- if (flags & RDES_RE)
- ndev->stats.rx_missed_errors++;
-
- goto rx_failure;
- }
-
- pktlen = flags & RDES_FLEN;
- pktlen -= 4; /* Drop the CRC */
-
- /* Retrieve the sk_buff */
- skb = ksp->rx_buffers[buff_n].skb;
-
- /* Clear it from the ring */
- ksp->rx_buffers[buff_n].skb = NULL;
- ksp->rx_ring[buff_n].data_ptr = 0;
-
- /* Unmap the SKB */
- dma_unmap_single(ksp->dev,
- ksp->rx_buffers[buff_n].dma_ptr,
- ksp->rx_buffers[buff_n].length,
- DMA_FROM_DEVICE);
-
- /* Relinquish the SKB to the network layer */
- skb_put(skb, pktlen);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&ksp->napi, skb);
-
- /* Record stats */
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += pktlen;
- goto rx_finished;
-
-rx_failure:
- /* This ring entry is an error, but we can
- * re-use the skb
- */
- /* Give the ring entry back to the hardware */
- ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
-rx_finished:
- received++;
- buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
- }
-
- /* And note which RX descriptor we last did */
- ksp->next_rx_desc_read = buff_n;
-
- /* And refill the buffers */
- ks8695_refill_rxbuffers(ksp);
-
- /* Kick the RX DMA engine, in case it became suspended */
- ks8695_writereg(ksp, KS8695_DRSC, 0);
-
- return received;
-}
-
-
-/**
- * ks8695_poll - Receive packet by NAPI poll method
- * @ksp: Private data for the KS8695 Ethernet
- * @budget: The remaining number packets for network subsystem
- *
- * Invoked by the network core when it requests for new
- * packets from the driver
- */
-static int ks8695_poll(struct napi_struct *napi, int budget)
-{
- struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
- unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
- unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
- int work_done;
-
- work_done = ks8695_rx(ksp, budget);
-
- if (work_done < budget && napi_complete_done(napi, work_done)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ksp->rx_lock, flags);
- /* enable rx interrupt */
- writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
- spin_unlock_irqrestore(&ksp->rx_lock, flags);
- }
- return work_done;
-}
-
-/**
- * ks8695_link_irq - Link change IRQ handler
- * @irq: The IRQ which went off (ignored)
- * @dev_id: The net_device for the interrupt
- *
- * The WAN interface can generate an IRQ when the link changes,
- * report this to the net layer and the user.
- */
-static irqreturn_t
-ks8695_link_irq(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
-
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- if (ctrl & WMC_WLS) {
- netif_carrier_on(ndev);
- if (netif_msg_link(ksp))
- dev_info(ksp->dev,
- "%s: Link is now up (10%sMbps/%s-duplex)\n",
- ndev->name,
- (ctrl & WMC_WSS) ? "0" : "",
- (ctrl & WMC_WDS) ? "Full" : "Half");
- } else {
- netif_carrier_off(ndev);
- if (netif_msg_link(ksp))
- dev_info(ksp->dev, "%s: Link is now down.\n",
- ndev->name);
- }
-
- return IRQ_HANDLED;
-}
-
-
-/* KS8695 Device functions */
-
-/**
- * ks8695_reset - Reset a KS8695 ethernet interface
- * @ksp: The interface to reset
- *
- * Perform an engine reset of the interface and re-program it
- * with sensible defaults.
- */
-static void
-ks8695_reset(struct ks8695_priv *ksp)
-{
- int reset_timeout = watchdog;
- /* Issue the reset via the TX DMA control register */
- ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
- while (reset_timeout--) {
- if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
- break;
- msleep(1);
- }
-
- if (reset_timeout < 0) {
- dev_crit(ksp->dev,
- "Timeout waiting for DMA engines to reset\n");
- /* And blithely carry on */
- }
-
- /* Definitely wait long enough before attempting to program
- * the engines
- */
- msleep(10);
-
- /* RX: unicast and broadcast */
- ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
- /* TX: pad and add CRC */
- ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
-}
-
-/**
- * ks8695_shutdown - Shut down a KS8695 ethernet interface
- * @ksp: The interface to shut down
- *
- * This disables packet RX/TX, cleans up IRQs, drains the rings,
- * and basically places the interface into a clean shutdown
- * state.
- */
-static void
-ks8695_shutdown(struct ks8695_priv *ksp)
-{
- u32 ctrl;
- int buff_n;
-
- /* Disable packet transmission */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
-
- /* Disable packet reception */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
-
- /* Release the IRQs */
- free_irq(ksp->rx_irq, ksp->ndev);
- free_irq(ksp->tx_irq, ksp->ndev);
- if (ksp->link_irq != -1)
- free_irq(ksp->link_irq, ksp->ndev);
-
- /* Throw away any pending TX packets */
- for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
- if (ksp->tx_buffers[buff_n].skb) {
- /* Remove this SKB from the TX ring */
- ksp->tx_ring[buff_n].owner = 0;
- ksp->tx_ring[buff_n].status = 0;
- ksp->tx_ring[buff_n].data_ptr = 0;
-
- /* Unmap and bin this SKB */
- dma_unmap_single(ksp->dev,
- ksp->tx_buffers[buff_n].dma_ptr,
- ksp->tx_buffers[buff_n].length,
- DMA_TO_DEVICE);
- dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
- ksp->tx_buffers[buff_n].skb = NULL;
- }
- }
-
- /* Purge the RX buffers */
- for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
- if (ksp->rx_buffers[buff_n].skb) {
- /* Remove the SKB from the RX ring */
- ksp->rx_ring[buff_n].status = 0;
- ksp->rx_ring[buff_n].data_ptr = 0;
-
- /* Unmap and bin the SKB */
- dma_unmap_single(ksp->dev,
- ksp->rx_buffers[buff_n].dma_ptr,
- ksp->rx_buffers[buff_n].length,
- DMA_FROM_DEVICE);
- dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
- ksp->rx_buffers[buff_n].skb = NULL;
- }
- }
-}
-
-
-/**
- * ks8695_setup_irq - IRQ setup helper function
- * @irq: The IRQ number to claim
- * @irq_name: The name to give the IRQ claimant
- * @handler: The function to call to handle the IRQ
- * @ndev: The net_device to pass in as the dev_id argument to the handler
- *
- * Return 0 on success.
- */
-static int
-ks8695_setup_irq(int irq, const char *irq_name,
- irq_handler_t handler, struct net_device *ndev)
-{
- int ret;
-
- ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
-
- if (ret) {
- dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ks8695_init_net - Initialise a KS8695 ethernet interface
- * @ksp: The interface to initialise
- *
- * This routine fills the RX ring, initialises the DMA engines,
- * allocates the IRQs and then starts the packet TX and RX
- * engines.
- */
-static int
-ks8695_init_net(struct ks8695_priv *ksp)
-{
- int ret;
- u32 ctrl;
-
- ks8695_refill_rxbuffers(ksp);
-
- /* Initialise the DMA engines */
- ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
- ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
-
- /* Request the IRQs */
- ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
- ks8695_rx_irq, ksp->ndev);
- if (ret)
- return ret;
- ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
- ks8695_tx_irq, ksp->ndev);
- if (ret)
- return ret;
- if (ksp->link_irq != -1) {
- ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
- ks8695_link_irq, ksp->ndev);
- if (ret)
- return ret;
- }
-
- /* Set up the ring indices */
- ksp->next_rx_desc_read = 0;
- ksp->tx_ring_next_slot = 0;
- ksp->tx_ring_used = 0;
-
- /* Bring up transmission */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- /* Enable packet transmission */
- ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
-
- /* Bring up the reception */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- /* Enable packet reception */
- ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
- /* And start the DMA engine */
- ks8695_writereg(ksp, KS8695_DRSC, 0);
-
- /* All done */
- return 0;
-}
-
-/**
- * ks8695_release_device - HW resource release for KS8695 e-net
- * @ksp: The device to be freed
- *
- * This unallocates io memory regions, dma-coherent regions etc
- * which were allocated in ks8695_probe.
- */
-static void
-ks8695_release_device(struct ks8695_priv *ksp)
-{
- /* Unmap the registers */
- iounmap(ksp->io_regs);
- if (ksp->phyiface_regs)
- iounmap(ksp->phyiface_regs);
-
- /* And release the request */
- release_resource(ksp->regs_req);
- kfree(ksp->regs_req);
- if (ksp->phyiface_req) {
- release_resource(ksp->phyiface_req);
- kfree(ksp->phyiface_req);
- }
-
- /* Free the ring buffers */
- dma_free_coherent(ksp->dev, RING_DMA_SIZE,
- ksp->ring_base, ksp->ring_base_dma);
-}
-
-/* Ethtool support */
-
-/**
- * ks8695_get_msglevel - Get the messages enabled for emission
- * @ndev: The network device to read from
- */
-static u32
-ks8695_get_msglevel(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- return ksp->msg_enable;
-}
-
-/**
- * ks8695_set_msglevel - Set the messages enabled for emission
- * @ndev: The network device to configure
- * @value: The messages to set for emission
- */
-static void
-ks8695_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- ksp->msg_enable = value;
-}
-
-/**
- * ks8695_wan_get_link_ksettings - Get device-specific settings.
- * @ndev: The network device to read settings from
- * @cmd: The ethtool structure to read into
- */
-static int
-ks8695_wan_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
- u32 supported, advertising;
-
- /* All ports on the KS8695 support these... */
- supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
- SUPPORTED_TP | SUPPORTED_MII);
-
- advertising = ADVERTISED_TP | ADVERTISED_MII;
- cmd->base.port = PORT_MII;
- supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
- cmd->base.phy_address = 0;
-
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- if ((ctrl & WMC_WAND) == 0) {
- /* auto-negotiation is enabled */
- advertising |= ADVERTISED_Autoneg;
- if (ctrl & WMC_WANA100F)
- advertising |= ADVERTISED_100baseT_Full;
- if (ctrl & WMC_WANA100H)
- advertising |= ADVERTISED_100baseT_Half;
- if (ctrl & WMC_WANA10F)
- advertising |= ADVERTISED_10baseT_Full;
- if (ctrl & WMC_WANA10H)
- advertising |= ADVERTISED_10baseT_Half;
- if (ctrl & WMC_WANAP)
- advertising |= ADVERTISED_Pause;
- cmd->base.autoneg = AUTONEG_ENABLE;
-
- cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
- cmd->base.duplex = (ctrl & WMC_WDS) ?
- DUPLEX_FULL : DUPLEX_HALF;
- } else {
- /* auto-negotiation is disabled */
- cmd->base.autoneg = AUTONEG_DISABLE;
-
- cmd->base.speed = (ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10;
- cmd->base.duplex = (ctrl & WMC_WANFF) ?
- DUPLEX_FULL : DUPLEX_HALF;
- }
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-/**
- * ks8695_wan_set_link_ksettings - Set device-specific settings.
- * @ndev: The network device to configure
- * @cmd: The settings to configure
- */
-static int
-ks8695_wan_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
-
- if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100))
- return -EINVAL;
- if ((cmd->base.duplex != DUPLEX_HALF) &&
- (cmd->base.duplex != DUPLEX_FULL))
- return -EINVAL;
- if (cmd->base.port != PORT_MII)
- return -EINVAL;
- if ((cmd->base.autoneg != AUTONEG_DISABLE) &&
- (cmd->base.autoneg != AUTONEG_ENABLE))
- return -EINVAL;
-
- if (cmd->base.autoneg == AUTONEG_ENABLE) {
- if ((advertising & (ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full)) == 0)
- return -EINVAL;
-
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
- WMC_WANA10F | WMC_WANA10H);
- if (advertising & ADVERTISED_100baseT_Full)
- ctrl |= WMC_WANA100F;
- if (advertising & ADVERTISED_100baseT_Half)
- ctrl |= WMC_WANA100H;
- if (advertising & ADVERTISED_10baseT_Full)
- ctrl |= WMC_WANA10F;
- if (advertising & ADVERTISED_10baseT_Half)
- ctrl |= WMC_WANA10H;
-
- /* force a re-negotiation */
- ctrl |= WMC_WANR;
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- } else {
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* disable auto-negotiation */
- ctrl |= WMC_WAND;
- ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
- if (cmd->base.speed == SPEED_100)
- ctrl |= WMC_WANF100;
- if (cmd->base.duplex == DUPLEX_FULL)
- ctrl |= WMC_WANFF;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- }
-
- return 0;
-}
-
-/**
- * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
- * @ndev: The network device to restart autoneotiation on
- */
-static int
-ks8695_wan_nwayreset(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
-
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- if ((ctrl & WMC_WAND) == 0)
- writel(ctrl | WMC_WANR,
- ksp->phyiface_regs + KS8695_WMC);
- else
- /* auto-negotiation not enabled */
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
- * @ndev: The device to retrieve settings from
- * @param: The structure to fill out with the information
- */
-static void
-ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
-
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* advertise Pause */
- param->autoneg = (ctrl & WMC_WANAP);
-
- /* current Rx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- param->rx_pause = (ctrl & DRXC_RFCE);
-
- /* current Tx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- param->tx_pause = (ctrl & DTXC_TFCE);
-}
-
-/**
- * ks8695_get_drvinfo - Retrieve driver information
- * @ndev: The network device to retrieve info about
- * @info: The info structure to fill out.
- */
-static void
-ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, MODULEVERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
- sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops ks8695_ethtool_ops = {
- .get_msglevel = ks8695_get_msglevel,
- .set_msglevel = ks8695_set_msglevel,
- .get_drvinfo = ks8695_get_drvinfo,
-};
-
-static const struct ethtool_ops ks8695_wan_ethtool_ops = {
- .get_msglevel = ks8695_get_msglevel,
- .set_msglevel = ks8695_set_msglevel,
- .nway_reset = ks8695_wan_nwayreset,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = ks8695_wan_get_pause,
- .get_drvinfo = ks8695_get_drvinfo,
- .get_link_ksettings = ks8695_wan_get_link_ksettings,
- .set_link_ksettings = ks8695_wan_set_link_ksettings,
-};
-
-/* Network device interface functions */
-
-/**
- * ks8695_set_mac - Update MAC in net dev and HW
- * @ndev: The network device to update
- * @addr: The new MAC address to set
- */
-static int
-ks8695_set_mac(struct net_device *ndev, void *addr)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- struct sockaddr *address = addr;
-
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
-
- ks8695_update_mac(ksp);
-
- dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
- ndev->name, ndev->dev_addr);
-
- return 0;
-}
-
-/**
- * ks8695_set_multicast - Set up the multicast behaviour of the interface
- * @ndev: The net_device to configure
- *
- * This routine, called by the net layer, configures promiscuity
- * and multicast reception behaviour for the interface.
- */
-static void
-ks8695_set_multicast(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
-
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
-
- if (ndev->flags & IFF_PROMISC) {
- /* enable promiscuous mode */
- ctrl |= DRXC_RA;
- } else if (ndev->flags & ~IFF_PROMISC) {
- /* disable promiscuous mode */
- ctrl &= ~DRXC_RA;
- }
-
- if (ndev->flags & IFF_ALLMULTI) {
- /* enable all multicast mode */
- ctrl |= DRXC_RM;
- } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
- /* more specific multicast addresses than can be
- * handled in hardware
- */
- ctrl |= DRXC_RM;
- } else {
- /* enable specific multicasts */
- ctrl &= ~DRXC_RM;
- ks8695_init_partial_multicast(ksp, ndev);
- }
-
- ks8695_writereg(ksp, KS8695_DRXC, ctrl);
-}
-
-/**
- * ks8695_timeout - Handle a network tx/rx timeout.
- * @ndev: The net_device which timed out.
- *
- * A network transaction timed out, reset the device.
- */
-static void
-ks8695_timeout(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- netif_stop_queue(ndev);
- ks8695_shutdown(ksp);
-
- ks8695_reset(ksp);
-
- ks8695_update_mac(ksp);
-
- /* We ignore the return from this since it managed to init
- * before it probably will be okay to init again.
- */
- ks8695_init_net(ksp);
-
- /* Reconfigure promiscuity etc */
- ks8695_set_multicast(ndev);
-
- /* And start the TX queue once more */
- netif_start_queue(ndev);
-}
-
-/**
- * ks8695_start_xmit - Start a packet transmission
- * @skb: The packet to transmit
- * @ndev: The network device to send the packet on
- *
- * This routine, called by the net layer, takes ownership of the
- * sk_buff and adds it to the TX ring. It then kicks the TX DMA
- * engine to ensure transmission begins.
- */
-static netdev_tx_t
-ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- int buff_n;
- dma_addr_t dmap;
-
- spin_lock_irq(&ksp->txq_lock);
-
- if (ksp->tx_ring_used == MAX_TX_DESC) {
- /* Somehow we got entered when we have no room */
- spin_unlock_irq(&ksp->txq_lock);
- return NETDEV_TX_BUSY;
- }
-
- buff_n = ksp->tx_ring_next_slot;
-
- BUG_ON(ksp->tx_buffers[buff_n].skb);
-
- dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
- /* Failed to DMA map this SKB, give it back for now */
- spin_unlock_irq(&ksp->txq_lock);
- dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
- "transmission, trying later\n", ndev->name);
- return NETDEV_TX_BUSY;
- }
-
- ksp->tx_buffers[buff_n].dma_ptr = dmap;
- /* Mapped okay, store the buffer pointer and length for later */
- ksp->tx_buffers[buff_n].skb = skb;
- ksp->tx_buffers[buff_n].length = skb->len;
-
- /* Fill out the TX descriptor */
- ksp->tx_ring[buff_n].data_ptr =
- cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
- ksp->tx_ring[buff_n].status =
- cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
- (skb->len & TDES_TBS));
-
- wmb();
-
- /* Hand it over to the hardware */
- ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
-
- if (++ksp->tx_ring_used == MAX_TX_DESC)
- netif_stop_queue(ndev);
-
- /* Kick the TX DMA in case it decided to go IDLE */
- ks8695_writereg(ksp, KS8695_DTSC, 0);
-
- /* And update the next ring slot */
- ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
-
- spin_unlock_irq(&ksp->txq_lock);
- return NETDEV_TX_OK;
-}
-
-/**
- * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
- * @ndev: The net_device to stop
- *
- * This disables the TX queue and cleans up a KS8695 ethernet
- * device.
- */
-static int
-ks8695_stop(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- netif_stop_queue(ndev);
- napi_disable(&ksp->napi);
-
- ks8695_shutdown(ksp);
-
- return 0;
-}
-
-/**
- * ks8695_open - Open (bring up) a KS8695 ethernet interface
- * @ndev: The net_device to open
- *
- * This resets, configures the MAC, initialises the RX ring and
- * DMA engines and starts the TX queue for a KS8695 ethernet
- * device.
- */
-static int
-ks8695_open(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- int ret;
-
- ks8695_reset(ksp);
-
- ks8695_update_mac(ksp);
-
- ret = ks8695_init_net(ksp);
- if (ret) {
- ks8695_shutdown(ksp);
- return ret;
- }
-
- napi_enable(&ksp->napi);
- netif_start_queue(ndev);
-
- return 0;
-}
-
-/* Platform device driver */
-
-/**
- * ks8695_init_switch - Init LAN switch to known good defaults.
- * @ksp: The device to initialise
- *
- * This initialises the LAN switch in the KS8695 to a known-good
- * set of defaults.
- */
-static void
-ks8695_init_switch(struct ks8695_priv *ksp)
-{
- u32 ctrl;
-
- /* Default value for SEC0 according to datasheet */
- ctrl = 0x40819e00;
-
- /* LED0 = Speed LED1 = Link/Activity */
- ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
- ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
-
- /* Enable Switch */
- ctrl |= SEC0_ENABLE;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
-
- /* Defaults for SEC1 */
- writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
-}
-
-/**
- * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
- * @ksp: The device to initialise
- *
- * This initialises a KS8695's WAN phy to sensible values for
- * autonegotiation etc.
- */
-static void
-ks8695_init_wan_phy(struct ks8695_priv *ksp)
-{
- u32 ctrl;
-
- /* Support auto-negotiation */
- ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
- WMC_WANA10F | WMC_WANA10H);
-
- /* LED0 = Activity , LED1 = Link */
- ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
-
- /* Restart Auto-negotiation */
- ctrl |= WMC_WANR;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-
- writel(0, ksp->phyiface_regs + KS8695_WPPM);
- writel(0, ksp->phyiface_regs + KS8695_PPS);
-}
-
-static const struct net_device_ops ks8695_netdev_ops = {
- .ndo_open = ks8695_open,
- .ndo_stop = ks8695_stop,
- .ndo_start_xmit = ks8695_start_xmit,
- .ndo_tx_timeout = ks8695_timeout,
- .ndo_set_mac_address = ks8695_set_mac,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = ks8695_set_multicast,
-};
-
-/**
- * ks8695_probe - Probe and initialise a KS8695 ethernet interface
- * @pdev: The platform device to probe
- *
- * Initialise a KS8695 ethernet device from platform data.
- *
- * This driver requires at least one IORESOURCE_MEM for the
- * registers and two IORESOURCE_IRQ for the RX and TX IRQs
- * respectively. It can optionally take an additional
- * IORESOURCE_MEM for the switch or phy in the case of the lan or
- * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
- * port.
- */
-static int
-ks8695_probe(struct platform_device *pdev)
-{
- struct ks8695_priv *ksp;
- struct net_device *ndev;
- struct resource *regs_res, *phyiface_res;
- struct resource *rxirq_res, *txirq_res, *linkirq_res;
- int ret = 0;
- int buff_n;
- bool inv_mac_addr = false;
- u32 machigh, maclow;
-
- /* Initialise a net_device */
- ndev = alloc_etherdev(sizeof(struct ks8695_priv));
- if (!ndev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- dev_dbg(&pdev->dev, "ks8695_probe() called\n");
-
- /* Configure our private structure a little */
- ksp = netdev_priv(ndev);
-
- ksp->dev = &pdev->dev;
- ksp->ndev = ndev;
- ksp->msg_enable = NETIF_MSG_LINK;
-
- /* Retrieve resources */
- regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
-
- if (!(regs_res && rxirq_res && txirq_res)) {
- dev_err(ksp->dev, "insufficient resources\n");
- ret = -ENOENT;
- goto failure;
- }
-
- ksp->regs_req = request_mem_region(regs_res->start,
- resource_size(regs_res),
- pdev->name);
-
- if (!ksp->regs_req) {
- dev_err(ksp->dev, "cannot claim register space\n");
- ret = -EIO;
- goto failure;
- }
-
- ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
-
- if (!ksp->io_regs) {
- dev_err(ksp->dev, "failed to ioremap registers\n");
- ret = -EINVAL;
- goto failure;
- }
-
- if (phyiface_res) {
- ksp->phyiface_req =
- request_mem_region(phyiface_res->start,
- resource_size(phyiface_res),
- phyiface_res->name);
-
- if (!ksp->phyiface_req) {
- dev_err(ksp->dev,
- "cannot claim switch register space\n");
- ret = -EIO;
- goto failure;
- }
-
- ksp->phyiface_regs = ioremap(phyiface_res->start,
- resource_size(phyiface_res));
-
- if (!ksp->phyiface_regs) {
- dev_err(ksp->dev,
- "failed to ioremap switch registers\n");
- ret = -EINVAL;
- goto failure;
- }
- }
-
- ksp->rx_irq = rxirq_res->start;
- ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
- ksp->tx_irq = txirq_res->start;
- ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
- ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
- ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
- linkirq_res->name : "Ethernet Link";
-
- /* driver system setup */
- ndev->netdev_ops = &ks8695_netdev_ops;
- ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
-
- netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
-
- /* Retrieve the default MAC addr from the chip. */
- /* The bootloader should have left it in there for us. */
-
- machigh = ks8695_readreg(ksp, KS8695_MAH);
- maclow = ks8695_readreg(ksp, KS8695_MAL);
-
- ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
- ndev->dev_addr[1] = machigh & 0xFF;
- ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
- ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
- ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
- ndev->dev_addr[5] = maclow & 0xFF;
-
- if (!is_valid_ether_addr(ndev->dev_addr))
- inv_mac_addr = true;
-
- /* In order to be efficient memory-wise, we allocate both
- * rings in one go.
- */
- ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
- &ksp->ring_base_dma, GFP_KERNEL);
- if (!ksp->ring_base) {
- ret = -ENOMEM;
- goto failure;
- }
-
- /* Specify the TX DMA ring buffer */
- ksp->tx_ring = ksp->ring_base;
- ksp->tx_ring_dma = ksp->ring_base_dma;
-
- /* And initialise the queue's lock */
- spin_lock_init(&ksp->txq_lock);
- spin_lock_init(&ksp->rx_lock);
-
- /* Specify the RX DMA ring buffer */
- ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
- ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
-
- /* Zero the descriptor rings */
- memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
- memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
-
- /* Build the rings */
- for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
- ksp->tx_ring[buff_n].next_desc =
- cpu_to_le32(ksp->tx_ring_dma +
- (sizeof(struct tx_ring_desc) *
- ((buff_n + 1) & MAX_TX_DESC_MASK)));
- }
-
- for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
- ksp->rx_ring[buff_n].next_desc =
- cpu_to_le32(ksp->rx_ring_dma +
- (sizeof(struct rx_ring_desc) *
- ((buff_n + 1) & MAX_RX_DESC_MASK)));
- }
-
- /* Initialise the port (physically) */
- if (ksp->phyiface_regs && ksp->link_irq == -1) {
- ks8695_init_switch(ksp);
- ksp->dtype = KS8695_DTYPE_LAN;
- ndev->ethtool_ops = &ks8695_ethtool_ops;
- } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
- ks8695_init_wan_phy(ksp);
- ksp->dtype = KS8695_DTYPE_WAN;
- ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
- } else {
- /* No initialisation since HPNA does not have a PHY */
- ksp->dtype = KS8695_DTYPE_HPNA;
- ndev->ethtool_ops = &ks8695_ethtool_ops;
- }
-
- /* And bring up the net_device with the net core */
- platform_set_drvdata(pdev, ndev);
- ret = register_netdev(ndev);
-
- if (ret == 0) {
- if (inv_mac_addr)
- dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
- ndev->name);
- dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
- ks8695_port_type(ksp), ndev->dev_addr);
- } else {
- /* Report the failure to register the net_device */
- dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
- goto failure;
- }
-
- /* All is well */
- return 0;
-
- /* Error exit path */
-failure:
- ks8695_release_device(ksp);
- free_netdev(ndev);
-
- return ret;
-}
-
-/**
- * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
- * @pdev: The device to suspend
- * @state: The suspend state
- *
- * This routine detaches and shuts down a KS8695 ethernet device.
- */
-static int
-ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- ksp->in_suspend = 1;
-
- if (netif_running(ndev)) {
- netif_device_detach(ndev);
- ks8695_shutdown(ksp);
- }
-
- return 0;
-}
-
-/**
- * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
- * @pdev: The device to resume
- *
- * This routine re-initialises and re-attaches a KS8695 ethernet
- * device.
- */
-static int
-ks8695_drv_resume(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- if (netif_running(ndev)) {
- ks8695_reset(ksp);
- ks8695_init_net(ksp);
- ks8695_set_multicast(ndev);
- netif_device_attach(ndev);
- }
-
- ksp->in_suspend = 0;
-
- return 0;
-}
-
-/**
- * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
- * @pdev: The platform device to remove
- *
- * This unregisters and releases a KS8695 ethernet device.
- */
-static int
-ks8695_drv_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ks8695_priv *ksp = netdev_priv(ndev);
-
- netif_napi_del(&ksp->napi);
-
- unregister_netdev(ndev);
- ks8695_release_device(ksp);
- free_netdev(ndev);
-
- dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
-}
-
-static struct platform_driver ks8695_driver = {
- .driver = {
- .name = MODULENAME,
- },
- .probe = ks8695_probe,
- .remove = ks8695_drv_remove,
- .suspend = ks8695_drv_suspend,
- .resume = ks8695_drv_resume,
-};
-
-module_platform_driver(ks8695_driver);
-
-MODULE_AUTHOR("Simtec Electronics");
-MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" MODULENAME);
-
-module_param(watchdog, int, 0400);
-MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
diff --git a/drivers/net/ethernet/micrel/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h
deleted file mode 100644
index b18fad4ad5fd..000000000000
--- a/drivers/net/ethernet/micrel/ks8695net.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Micrel KS8695 (Centaur) Ethernet.
- *
- * Copyright 2008 Simtec Electronics
- * Daniel Silverstone <dsilvers@simtec.co.uk>
- * Vincent Sanders <vince@simtec.co.uk>
- */
-
-#ifndef KS8695NET_H
-#define KS8695NET_H
-
-/* Receive descriptor flags */
-#define RDES_OWN (1 << 31) /* Ownership */
-#define RDES_FS (1 << 30) /* First Descriptor */
-#define RDES_LS (1 << 29) /* Last Descriptor */
-#define RDES_IPE (1 << 28) /* IP Checksum error */
-#define RDES_TCPE (1 << 27) /* TCP Checksum error */
-#define RDES_UDPE (1 << 26) /* UDP Checksum error */
-#define RDES_ES (1 << 25) /* Error summary */
-#define RDES_MF (1 << 24) /* Multicast Frame */
-#define RDES_RE (1 << 19) /* MII Error reported */
-#define RDES_TL (1 << 18) /* Frame too Long */
-#define RDES_RF (1 << 17) /* Runt Frame */
-#define RDES_CE (1 << 16) /* CRC error */
-#define RDES_FT (1 << 15) /* Frame Type */
-#define RDES_FLEN (0x7ff) /* Frame Length */
-
-#define RDES_RER (1 << 25) /* Receive End of Ring */
-#define RDES_RBS (0x7ff) /* Receive Buffer Size */
-
-/* Transmit descriptor flags */
-
-#define TDES_OWN (1 << 31) /* Ownership */
-
-#define TDES_IC (1 << 31) /* Interrupt on Completion */
-#define TDES_FS (1 << 30) /* First Segment */
-#define TDES_LS (1 << 29) /* Last Segment */
-#define TDES_IPCKG (1 << 28) /* IP Checksum generate */
-#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */
-#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */
-#define TDES_TER (1 << 25) /* Transmit End of Ring */
-#define TDES_TBS (0x7ff) /* Transmit Buffer Size */
-
-/*
- * Network controller register offsets
- */
-#define KS8695_DTXC (0x00) /* DMA Transmit Control */
-#define KS8695_DRXC (0x04) /* DMA Receive Control */
-#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */
-#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */
-#define KS8695_TDLB (0x10) /* Transmit Descriptor List
- * Base Address
- */
-#define KS8695_RDLB (0x14) /* Receive Descriptor List
- * Base Address
- */
-#define KS8695_MAL (0x18) /* MAC Station Address Low */
-#define KS8695_MAH (0x1c) /* MAC Station Address High */
-#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional
- * Station Address
- * (0..15) Low
- */
-#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional
- * Station Address
- * (0..15) High
- */
-
-
-/* DMA Transmit Control Register */
-#define DTXC_TRST (1 << 31) /* Soft Reset */
-#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */
-#define DTXC_TUCG (1 << 18) /* Transmit UDP
- * Checksum Generate
- */
-#define DTXC_TTCG (1 << 17) /* Transmit TCP
- * Checksum Generate
- */
-#define DTXC_TICG (1 << 16) /* Transmit IP
- * Checksum Generate
- */
-#define DTXC_TFCE (1 << 9) /* Transmit Flow
- * Control Enable
- */
-#define DTXC_TLB (1 << 8) /* Loopback mode */
-#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */
-#define DTXC_TAC (1 << 1) /* Transmit Add CRC */
-#define DTXC_TE (1 << 0) /* TX Enable */
-
-/* DMA Receive Control Register */
-#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */
-#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */
-#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */
-#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */
-#define DRXC_RFCE (1 << 9) /* Receive Flow Control
- * Enable
- */
-#define DRXC_RB (1 << 6) /* Receive Broadcast */
-#define DRXC_RM (1 << 5) /* Receive Multicast */
-#define DRXC_RU (1 << 4) /* Receive Unicast */
-#define DRXC_RERR (1 << 3) /* Receive Error Frame */
-#define DRXC_RA (1 << 2) /* Receive All */
-#define DRXC_RE (1 << 0) /* RX Enable */
-
-/* Additional Station Address High */
-#define AAH_E (1 << 31) /* Address Enabled */
-
-#endif /* KS8695NET_H */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b71e4ecbe469..6932e615d4b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1818,6 +1818,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
ocelot_ace_deinit();
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
break;
case OCELOT_ACL_ACTION_TRAP:
VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x0);
+ VCAP_ACT_SET(MASK_MODE, 0x1);
VCAP_ACT_SET(POLICE_ENA, 0x0);
VCAP_ACT_SET(POLICE_IDX, 0x0);
VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7aaddc09c185..b894bc0c9c16 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -13,12 +13,6 @@ struct ocelot_port_block {
struct ocelot_port *port;
};
-static u16 get_prio(u32 prio)
-{
- /* prio starts from 0x1000 while the ids starts from 0 */
- return prio >> 16;
-}
-
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
struct ocelot_ace_rule *rule)
{
@@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
}
finished_key_parsing:
- ocelot_rule->prio = get_prio(f->common.prio);
+ ocelot_rule->prio = f->common.prio;
ocelot_rule->id = f->cookie;
return ocelot_flower_parse_action(f, ocelot_rule);
}
@@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
struct ocelot_ace_rule rule;
int ret;
- rule.prio = get_prio(f->common.prio);
+ rule.prio = f->common.prio;
rule.port = port_block->port;
rule.id = f->cookie;
@@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
struct ocelot_ace_rule rule;
int ret;
- rule.prio = get_prio(f->common.prio);
+ rule.prio = f->common.prio;
rule.port = port_block->port;
rule.id = f->cookie;
ret = ocelot_ace_rule_stats_update(&rule);
@@ -316,15 +310,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port,
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
- block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
- port);
+ block_cb = flow_block_cb_lookup(f->block,
+ ocelot_setup_tc_block_cb_flower, port);
if (!block_cb) {
port_block = ocelot_port_block_create(port);
if (!port_block)
return -ENOMEM;
- block_cb = flow_block_cb_alloc(f->net,
- ocelot_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
port, port_block,
ocelot_tc_block_unbind);
if (IS_ERR(block_cb)) {
@@ -351,8 +344,8 @@ void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port,
{
struct flow_block_cb *block_cb;
- block_cb = flow_block_cb_lookup(f, ocelot_setup_tc_block_cb_flower,
- port);
+ block_cb = flow_block_cb_lookup(f->block,
+ ocelot_setup_tc_block_cb_flower, port);
if (!block_cb)
return;
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index 9e6464ffae5d..16a6db71ca5e 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -134,7 +134,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
- tc_setup_cb_t *cb;
+ flow_setup_cb_t *cb;
int err;
netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n",
@@ -156,7 +156,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(f->net, cb, port, port, NULL);
+ block_cb = flow_block_cb_alloc(cb, port, port, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@@ -169,7 +169,7 @@ static int ocelot_setup_tc_block(struct ocelot_port *port,
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
- block_cb = flow_block_cb_lookup(f, cb, port);
+ block_cb = flow_block_cb_lookup(f->block, cb, port);
if (!block_cb)
return -ENOENT;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index d8b7fba96d58..337b0cbfd153 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* setup (if available). */
status = myri10ge_request_irq(mgp);
if (status != 0)
- goto abort_with_firmware;
+ goto abort_with_slices;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d0a01e8f000a..b339125b2f09 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
- printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
- dev_kfree_skb(skb);
- return NETDEV_TX_BUSY;
+ pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4054b70d7719..5afcb3c4c2ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool clr_gpr, lmem_step step)
{
s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
- bool first = true, last;
+ bool first = true, narrow_ld, last;
bool needs_inc = false;
swreg stack_off_reg;
u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
needs_inc = true;
}
+
+ narrow_ld = clr_gpr && size < 8;
+
if (lm3) {
+ unsigned int nop_cnt;
+
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
- /* For size < 4 one slot will be filled by zeroing of upper. */
- wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ /* For size < 4 one slot will be filled by zeroing of upper,
+ * but be careful, that zeroing could be eliminated by zext
+ * optimization.
+ */
+ nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
+ wrp_nops(nfp_prog, nop_cnt);
}
- if (clr_gpr && size < 8)
+ if (narrow_ld)
wrp_zext(nfp_prog, meta, gpr);
while (size) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index d5bbe3d6048b..05981b54eaab 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
type = cmsg_hdr->type;
switch (type) {
- case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
- nfp_flower_cmsg_portreify_rx(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
@@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
- if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
- type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
+ if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
@@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
+ } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
+ /* Handle REIFY acks outside wq to prevent RTNL conflict. */
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ dev_consume_skb_any(skb);
} else {
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index faa8ba012a37..457bdc60f3ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1318,8 +1318,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
&nfp_block_cb_list))
return -EBUSY;
- block_cb = flow_block_cb_alloc(f->net,
- nfp_flower_setup_tc_block_cb,
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
repr, repr, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
@@ -1328,7 +1327,8 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
- block_cb = flow_block_cb_lookup(f, nfp_flower_setup_tc_block_cb,
+ block_cb = flow_block_cb_lookup(f->block,
+ nfp_flower_setup_tc_block_cb,
repr);
if (!block_cb)
return -ENOENT;
@@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
struct flow_block_cb *block_cb;
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
- !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
- nfp_flower_internal_port_can_offload(app, netdev)))
+ if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !nfp_flower_internal_port_can_offload(app, netdev)) ||
+ (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
case FLOW_BLOCK_BIND:
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (cb_priv &&
+ flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
+ cb_priv,
+ &nfp_block_cb_list))
+ return -EBUSY;
+
cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
if (!cb_priv)
return -ENOMEM;
@@ -1424,8 +1432,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- block_cb = flow_block_cb_alloc(f->net,
- nfp_flower_setup_indr_block_cb,
+ block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
cb_priv, cb_priv,
nfp_flower_setup_indr_tc_release);
if (IS_ERR(block_cb)) {
@@ -1442,7 +1449,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
if (!cb_priv)
return -ENOENT;
- block_cb = flow_block_cb_lookup(f,
+ block_cb = flow_block_cb_lookup(f->block,
nfp_flower_setup_indr_block_cb,
cb_priv);
if (!block_cb)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 86e968cd5ffd..124a43dc136a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (flow->common.prio != (1 << 16)) {
+ if (flow->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a7a80f4b722a..f0ee982eb1b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
flow.daddr = *(__be32 *)n->primary_key;
- /* Only concerned with route changes for representors. */
- if (!nfp_netdev_is_nfp_repr(n->dev))
- return NOTIFY_DONE;
-
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
app = app_priv->app;
+ if (!nfp_netdev_is_nfp_repr(n->dev) &&
+ !nfp_flower_internal_port_can_offload(app, n->dev))
+ return NOTIFY_DONE;
+
/* Only concerned with changes to routes already added to NFP. */
if (!nfp_tun_has_route(app, flow.daddr))
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d9cbe84ac6ad..1b840ee47339 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -444,12 +444,12 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
- data = nfp_pr_et(data, "rx_tls_decrypted");
+ data = nfp_pr_et(data, "rx_tls_decrypted_packets");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
- data = nfp_pr_et(data, "tx_tls_encrypted");
+ data = nfp_pr_et(data, "tx_tls_encrypted_packets");
data = nfp_pr_et(data, "tx_tls_ooo");
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index 70b1a03c0953..01229190132d 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_NI
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about National Instrument devices.
+ the questions about National Instruments devices.
If you say Y, you will be asked for your specific device in the
following questions.
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
deleted file mode 100644
index 325e26c549f8..000000000000
--- a/drivers/net/ethernet/nuvoton/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Nuvoton network device configuration
-#
-
-config NET_VENDOR_NUVOTON
- bool "Nuvoton devices"
- default y
- depends on ARM && ARCH_W90X900
- ---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Nuvoton cards. If you say Y, you will be asked
- for your specific card in the following questions.
-
-if NET_VENDOR_NUVOTON
-
-config W90P910_ETH
- tristate "Nuvoton w90p910 Ethernet support"
- depends on ARM && ARCH_W90X900
- select PHYLIB
- select MII
- ---help---
- Say Y here if you want to use built-in Ethernet ports
- on w90p910 processor.
-
-endif # NET_VENDOR_NUVOTON
diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile
deleted file mode 100644
index 66f6e728d54b..000000000000
--- a/drivers/net/ethernet/nuvoton/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Nuvoton network device drivers.
-#
-
-obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
deleted file mode 100644
index 3d73970b3a2e..000000000000
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2008-2009 Nuvoton technology corporation.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/mii.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-
-#define DRV_MODULE_NAME "w90p910-emc"
-#define DRV_MODULE_VERSION "0.1"
-
-/* Ethernet MAC Registers */
-#define REG_CAMCMR 0x00
-#define REG_CAMEN 0x04
-#define REG_CAMM_BASE 0x08
-#define REG_CAML_BASE 0x0c
-#define REG_TXDLSA 0x88
-#define REG_RXDLSA 0x8C
-#define REG_MCMDR 0x90
-#define REG_MIID 0x94
-#define REG_MIIDA 0x98
-#define REG_FFTCR 0x9C
-#define REG_TSDR 0xa0
-#define REG_RSDR 0xa4
-#define REG_DMARFC 0xa8
-#define REG_MIEN 0xac
-#define REG_MISTA 0xb0
-#define REG_CTXDSA 0xcc
-#define REG_CTXBSA 0xd0
-#define REG_CRXDSA 0xd4
-#define REG_CRXBSA 0xd8
-
-/* mac controller bit */
-#define MCMDR_RXON 0x01
-#define MCMDR_ACP (0x01 << 3)
-#define MCMDR_SPCRC (0x01 << 5)
-#define MCMDR_TXON (0x01 << 8)
-#define MCMDR_FDUP (0x01 << 18)
-#define MCMDR_ENMDC (0x01 << 19)
-#define MCMDR_OPMOD (0x01 << 20)
-#define SWR (0x01 << 24)
-
-/* cam command regiser */
-#define CAMCMR_AUP 0x01
-#define CAMCMR_AMP (0x01 << 1)
-#define CAMCMR_ABP (0x01 << 2)
-#define CAMCMR_CCAM (0x01 << 3)
-#define CAMCMR_ECMP (0x01 << 4)
-#define CAM0EN 0x01
-
-/* mac mii controller bit */
-#define MDCCR (0x0a << 20)
-#define PHYAD (0x01 << 8)
-#define PHYWR (0x01 << 16)
-#define PHYBUSY (0x01 << 17)
-#define PHYPRESP (0x01 << 18)
-#define CAM_ENTRY_SIZE 0x08
-
-/* rx and tx status */
-#define TXDS_TXCP (0x01 << 19)
-#define RXDS_CRCE (0x01 << 17)
-#define RXDS_PTLE (0x01 << 19)
-#define RXDS_RXGD (0x01 << 20)
-#define RXDS_ALIE (0x01 << 21)
-#define RXDS_RP (0x01 << 22)
-
-/* mac interrupt status*/
-#define MISTA_EXDEF (0x01 << 19)
-#define MISTA_TXBERR (0x01 << 24)
-#define MISTA_TDU (0x01 << 23)
-#define MISTA_RDU (0x01 << 10)
-#define MISTA_RXBERR (0x01 << 11)
-
-#define ENSTART 0x01
-#define ENRXINTR 0x01
-#define ENRXGD (0x01 << 4)
-#define ENRXBERR (0x01 << 11)
-#define ENTXINTR (0x01 << 16)
-#define ENTXCP (0x01 << 18)
-#define ENTXABT (0x01 << 21)
-#define ENTXBERR (0x01 << 24)
-#define ENMDC (0x01 << 19)
-#define PHYBUSY (0x01 << 17)
-#define MDCCR_VAL 0xa00000
-
-/* rx and tx owner bit */
-#define RX_OWEN_DMA (0x01 << 31)
-#define RX_OWEN_CPU (~(0x03 << 30))
-#define TX_OWEN_DMA (0x01 << 31)
-#define TX_OWEN_CPU (~(0x01 << 31))
-
-/* tx frame desc controller bit */
-#define MACTXINTEN 0x04
-#define CRCMODE 0x02
-#define PADDINGMODE 0x01
-
-/* fftcr controller bit */
-#define TXTHD (0x03 << 8)
-#define BLENGTH (0x01 << 20)
-
-/* global setting for driver */
-#define RX_DESC_SIZE 50
-#define TX_DESC_SIZE 10
-#define MAX_RBUFF_SZ 0x600
-#define MAX_TBUFF_SZ 0x600
-#define TX_TIMEOUT (HZ/2)
-#define DELAY 1000
-#define CAM0 0x0
-
-static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
-
-struct w90p910_rxbd {
- unsigned int sl;
- unsigned int buffer;
- unsigned int reserved;
- unsigned int next;
-};
-
-struct w90p910_txbd {
- unsigned int mode;
- unsigned int buffer;
- unsigned int sl;
- unsigned int next;
-};
-
-struct recv_pdesc {
- struct w90p910_rxbd desclist[RX_DESC_SIZE];
- char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
-};
-
-struct tran_pdesc {
- struct w90p910_txbd desclist[TX_DESC_SIZE];
- char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
-};
-
-struct w90p910_ether {
- struct recv_pdesc *rdesc;
- struct tran_pdesc *tdesc;
- dma_addr_t rdesc_phys;
- dma_addr_t tdesc_phys;
- struct platform_device *pdev;
- struct resource *res;
- struct sk_buff *skb;
- struct clk *clk;
- struct clk *rmiiclk;
- struct mii_if_info mii;
- struct timer_list check_timer;
- void __iomem *reg;
- int rxirq;
- int txirq;
- unsigned int cur_tx;
- unsigned int cur_rx;
- unsigned int finish_tx;
- unsigned int rx_packets;
- unsigned int rx_bytes;
- unsigned int start_tx_ptr;
- unsigned int start_rx_ptr;
- unsigned int linkflag;
-};
-
-static void update_linkspeed_register(struct net_device *dev,
- unsigned int speed, unsigned int duplex)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = __raw_readl(ether->reg + REG_MCMDR);
-
- if (speed == SPEED_100) {
- /* 100 full/half duplex */
- if (duplex == DUPLEX_FULL) {
- val |= (MCMDR_OPMOD | MCMDR_FDUP);
- } else {
- val |= MCMDR_OPMOD;
- val &= ~MCMDR_FDUP;
- }
- } else {
- /* 10 full/half duplex */
- if (duplex == DUPLEX_FULL) {
- val |= MCMDR_FDUP;
- val &= ~MCMDR_OPMOD;
- } else {
- val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
- }
- }
-
- __raw_writel(val, ether->reg + REG_MCMDR);
-}
-
-static void update_linkspeed(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct platform_device *pdev;
- unsigned int bmsr, bmcr, lpa, speed, duplex;
-
- pdev = ether->pdev;
-
- if (!mii_link_ok(&ether->mii)) {
- ether->linkflag = 0x0;
- netif_carrier_off(dev);
- dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
- return;
- }
-
- if (ether->linkflag == 1)
- return;
-
- bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
- bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
-
- if (bmcr & BMCR_ANENABLE) {
- if (!(bmsr & BMSR_ANEGCOMPLETE))
- return;
-
- lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
-
- if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
- speed = SPEED_100;
- else
- speed = SPEED_10;
-
- if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
- duplex = DUPLEX_FULL;
- else
- duplex = DUPLEX_HALF;
-
- } else {
- speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- update_linkspeed_register(dev, speed, duplex);
-
- dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
- (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
- ether->linkflag = 0x01;
-
- netif_carrier_on(dev);
-}
-
-static void w90p910_check_link(struct timer_list *t)
-{
- struct w90p910_ether *ether = from_timer(ether, t, check_timer);
- struct net_device *dev = ether->mii.dev;
-
- update_linkspeed(dev);
- mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
-}
-
-static void w90p910_write_cam(struct net_device *dev,
- unsigned int x, unsigned char *pval)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int msw, lsw;
-
- msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
-
- lsw = (pval[4] << 24) | (pval[5] << 16);
-
- __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
- __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
-}
-
-static int w90p910_init_desc(struct net_device *dev)
-{
- struct w90p910_ether *ether;
- struct w90p910_txbd *tdesc;
- struct w90p910_rxbd *rdesc;
- struct platform_device *pdev;
- unsigned int i;
-
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- &ether->tdesc_phys, GFP_KERNEL);
- if (!ether->tdesc)
- return -ENOMEM;
-
- ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
- &ether->rdesc_phys, GFP_KERNEL);
- if (!ether->rdesc) {
- dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- ether->tdesc, ether->tdesc_phys);
- return -ENOMEM;
- }
-
- for (i = 0; i < TX_DESC_SIZE; i++) {
- unsigned int offset;
-
- tdesc = &(ether->tdesc->desclist[i]);
-
- if (i == TX_DESC_SIZE - 1)
- offset = offsetof(struct tran_pdesc, desclist[0]);
- else
- offset = offsetof(struct tran_pdesc, desclist[i + 1]);
-
- tdesc->next = ether->tdesc_phys + offset;
- tdesc->buffer = ether->tdesc_phys +
- offsetof(struct tran_pdesc, tran_buf[i]);
- tdesc->sl = 0;
- tdesc->mode = 0;
- }
-
- ether->start_tx_ptr = ether->tdesc_phys;
-
- for (i = 0; i < RX_DESC_SIZE; i++) {
- unsigned int offset;
-
- rdesc = &(ether->rdesc->desclist[i]);
-
- if (i == RX_DESC_SIZE - 1)
- offset = offsetof(struct recv_pdesc, desclist[0]);
- else
- offset = offsetof(struct recv_pdesc, desclist[i + 1]);
-
- rdesc->next = ether->rdesc_phys + offset;
- rdesc->sl = RX_OWEN_DMA;
- rdesc->buffer = ether->rdesc_phys +
- offsetof(struct recv_pdesc, recv_buf[i]);
- }
-
- ether->start_rx_ptr = ether->rdesc_phys;
-
- return 0;
-}
-
-static void w90p910_set_fifo_threshold(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = TXTHD | BLENGTH;
- __raw_writel(val, ether->reg + REG_FFTCR);
-}
-
-static void w90p910_return_default_idle(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = __raw_readl(ether->reg + REG_MCMDR);
- val |= SWR;
- __raw_writel(val, ether->reg + REG_MCMDR);
-}
-
-static void w90p910_trigger_rx(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- __raw_writel(ENSTART, ether->reg + REG_RSDR);
-}
-
-static void w90p910_trigger_tx(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- __raw_writel(ENSTART, ether->reg + REG_TSDR);
-}
-
-static void w90p910_enable_mac_interrupt(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
- val |= ENTXBERR | ENRXBERR | ENTXABT;
-
- __raw_writel(val, ether->reg + REG_MIEN);
-}
-
-static void w90p910_get_and_clear_int(struct net_device *dev,
- unsigned int *val)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- *val = __raw_readl(ether->reg + REG_MISTA);
- __raw_writel(*val, ether->reg + REG_MISTA);
-}
-
-static void w90p910_set_global_maccmd(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = __raw_readl(ether->reg + REG_MCMDR);
- val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
- __raw_writel(val, ether->reg + REG_MCMDR);
-}
-
-static void w90p910_enable_cam(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- w90p910_write_cam(dev, CAM0, dev->dev_addr);
-
- val = __raw_readl(ether->reg + REG_CAMEN);
- val |= CAM0EN;
- __raw_writel(val, ether->reg + REG_CAMEN);
-}
-
-static void w90p910_enable_cam_command(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
- __raw_writel(val, ether->reg + REG_CAMCMR);
-}
-
-static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = __raw_readl(ether->reg + REG_MCMDR);
-
- if (enable)
- val |= MCMDR_TXON;
- else
- val &= ~MCMDR_TXON;
-
- __raw_writel(val, ether->reg + REG_MCMDR);
-}
-
-static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- unsigned int val;
-
- val = __raw_readl(ether->reg + REG_MCMDR);
-
- if (enable)
- val |= MCMDR_RXON;
- else
- val &= ~MCMDR_RXON;
-
- __raw_writel(val, ether->reg + REG_MCMDR);
-}
-
-static void w90p910_set_curdest(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
- __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
-}
-
-static void w90p910_reset_mac(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- w90p910_enable_tx(dev, 0);
- w90p910_enable_rx(dev, 0);
- w90p910_set_fifo_threshold(dev);
- w90p910_return_default_idle(dev);
-
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
-
- w90p910_init_desc(dev);
-
- netif_trans_update(dev); /* prevent tx timeout */
- ether->cur_tx = 0x0;
- ether->finish_tx = 0x0;
- ether->cur_rx = 0x0;
-
- w90p910_set_curdest(dev);
- w90p910_enable_cam(dev);
- w90p910_enable_cam_command(dev);
- w90p910_enable_mac_interrupt(dev);
- w90p910_enable_tx(dev, 1);
- w90p910_enable_rx(dev, 1);
- w90p910_trigger_tx(dev);
- w90p910_trigger_rx(dev);
-
- netif_trans_update(dev); /* prevent tx timeout */
-
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-}
-
-static void w90p910_mdio_write(struct net_device *dev,
- int phy_id, int reg, int data)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct platform_device *pdev;
- unsigned int val, i;
-
- pdev = ether->pdev;
-
- __raw_writel(data, ether->reg + REG_MIID);
-
- val = (phy_id << 0x08) | reg;
- val |= PHYBUSY | PHYWR | MDCCR_VAL;
- __raw_writel(val, ether->reg + REG_MIIDA);
-
- for (i = 0; i < DELAY; i++) {
- if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
- break;
- }
-
- if (i == DELAY)
- dev_warn(&pdev->dev, "mdio write timed out\n");
-}
-
-static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct platform_device *pdev;
- unsigned int val, i, data;
-
- pdev = ether->pdev;
-
- val = (phy_id << 0x08) | reg;
- val |= PHYBUSY | MDCCR_VAL;
- __raw_writel(val, ether->reg + REG_MIIDA);
-
- for (i = 0; i < DELAY; i++) {
- if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
- break;
- }
-
- if (i == DELAY) {
- dev_warn(&pdev->dev, "mdio read timed out\n");
- data = 0xffff;
- } else {
- data = __raw_readl(ether->reg + REG_MIID);
- }
-
- return data;
-}
-
-static int w90p910_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *address = addr;
-
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
- w90p910_write_cam(dev, CAM0, dev->dev_addr);
-
- return 0;
-}
-
-static int w90p910_ether_close(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct platform_device *pdev;
-
- pdev = ether->pdev;
-
- dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
- ether->rdesc, ether->rdesc_phys);
- dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- ether->tdesc, ether->tdesc_phys);
-
- netif_stop_queue(dev);
-
- del_timer_sync(&ether->check_timer);
- clk_disable(ether->rmiiclk);
- clk_disable(ether->clk);
-
- free_irq(ether->txirq, dev);
- free_irq(ether->rxirq, dev);
-
- return 0;
-}
-
-static int w90p910_send_frame(struct net_device *dev,
- unsigned char *data, int length)
-{
- struct w90p910_ether *ether;
- struct w90p910_txbd *txbd;
- struct platform_device *pdev;
- unsigned char *buffer;
-
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- txbd = &ether->tdesc->desclist[ether->cur_tx];
- buffer = ether->tdesc->tran_buf[ether->cur_tx];
-
- if (length > 1514) {
- dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
- length = 1514;
- }
-
- txbd->sl = length & 0xFFFF;
-
- memcpy(buffer, data, length);
-
- txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
-
- w90p910_enable_tx(dev, 1);
-
- w90p910_trigger_tx(dev);
-
- if (++ether->cur_tx >= TX_DESC_SIZE)
- ether->cur_tx = 0;
-
- txbd = &ether->tdesc->desclist[ether->cur_tx];
-
- if (txbd->mode & TX_OWEN_DMA)
- netif_stop_queue(dev);
-
- return 0;
-}
-
-static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
- ether->skb = skb;
- dev_consume_skb_irq(skb);
- return 0;
- }
- return -EAGAIN;
-}
-
-static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
-{
- struct w90p910_ether *ether;
- struct w90p910_txbd *txbd;
- struct platform_device *pdev;
- struct net_device *dev;
- unsigned int cur_entry, entry, status;
-
- dev = dev_id;
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- w90p910_get_and_clear_int(dev, &status);
-
- cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
-
- entry = ether->tdesc_phys +
- offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
-
- while (entry != cur_entry) {
- txbd = &ether->tdesc->desclist[ether->finish_tx];
-
- if (++ether->finish_tx >= TX_DESC_SIZE)
- ether->finish_tx = 0;
-
- if (txbd->sl & TXDS_TXCP) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += txbd->sl & 0xFFFF;
- } else {
- dev->stats.tx_errors++;
- }
-
- txbd->sl = 0x0;
- txbd->mode = 0x0;
-
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- entry = ether->tdesc_phys +
- offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
- }
-
- if (status & MISTA_EXDEF) {
- dev_err(&pdev->dev, "emc defer exceed interrupt\n");
- } else if (status & MISTA_TXBERR) {
- dev_err(&pdev->dev, "emc bus error interrupt\n");
- w90p910_reset_mac(dev);
- } else if (status & MISTA_TDU) {
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
- }
-
- return IRQ_HANDLED;
-}
-
-static void netdev_rx(struct net_device *dev)
-{
- struct w90p910_ether *ether;
- struct w90p910_rxbd *rxbd;
- struct platform_device *pdev;
- struct sk_buff *skb;
- unsigned char *data;
- unsigned int length, status, val, entry;
-
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- rxbd = &ether->rdesc->desclist[ether->cur_rx];
-
- do {
- val = __raw_readl(ether->reg + REG_CRXDSA);
-
- entry = ether->rdesc_phys +
- offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
-
- if (val == entry)
- break;
-
- status = rxbd->sl;
- length = status & 0xFFFF;
-
- if (status & RXDS_RXGD) {
- data = ether->rdesc->recv_buf[ether->cur_rx];
- skb = netdev_alloc_skb(dev, length + 2);
- if (!skb) {
- dev->stats.rx_dropped++;
- return;
- }
-
- skb_reserve(skb, 2);
- skb_put(skb, length);
- skb_copy_to_linear_data(skb, data, length);
- skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += length;
- netif_rx(skb);
- } else {
- dev->stats.rx_errors++;
-
- if (status & RXDS_RP) {
- dev_err(&pdev->dev, "rx runt err\n");
- dev->stats.rx_length_errors++;
- } else if (status & RXDS_CRCE) {
- dev_err(&pdev->dev, "rx crc err\n");
- dev->stats.rx_crc_errors++;
- } else if (status & RXDS_ALIE) {
- dev_err(&pdev->dev, "rx alignment err\n");
- dev->stats.rx_frame_errors++;
- } else if (status & RXDS_PTLE) {
- dev_err(&pdev->dev, "rx longer err\n");
- dev->stats.rx_over_errors++;
- }
- }
-
- rxbd->sl = RX_OWEN_DMA;
- rxbd->reserved = 0x0;
-
- if (++ether->cur_rx >= RX_DESC_SIZE)
- ether->cur_rx = 0;
-
- rxbd = &ether->rdesc->desclist[ether->cur_rx];
-
- } while (1);
-}
-
-static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev;
- struct w90p910_ether *ether;
- struct platform_device *pdev;
- unsigned int status;
-
- dev = dev_id;
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- w90p910_get_and_clear_int(dev, &status);
-
- if (status & MISTA_RDU) {
- netdev_rx(dev);
- w90p910_trigger_rx(dev);
-
- return IRQ_HANDLED;
- } else if (status & MISTA_RXBERR) {
- dev_err(&pdev->dev, "emc rx bus error\n");
- w90p910_reset_mac(dev);
- }
-
- netdev_rx(dev);
- return IRQ_HANDLED;
-}
-
-static int w90p910_ether_open(struct net_device *dev)
-{
- struct w90p910_ether *ether;
- struct platform_device *pdev;
-
- ether = netdev_priv(dev);
- pdev = ether->pdev;
-
- w90p910_reset_mac(dev);
- w90p910_set_fifo_threshold(dev);
- w90p910_set_curdest(dev);
- w90p910_enable_cam(dev);
- w90p910_enable_cam_command(dev);
- w90p910_enable_mac_interrupt(dev);
- w90p910_set_global_maccmd(dev);
- w90p910_enable_rx(dev, 1);
-
- clk_enable(ether->rmiiclk);
- clk_enable(ether->clk);
-
- ether->rx_packets = 0x0;
- ether->rx_bytes = 0x0;
-
- if (request_irq(ether->txirq, w90p910_tx_interrupt,
- 0x0, pdev->name, dev)) {
- dev_err(&pdev->dev, "register irq tx failed\n");
- return -EAGAIN;
- }
-
- if (request_irq(ether->rxirq, w90p910_rx_interrupt,
- 0x0, pdev->name, dev)) {
- dev_err(&pdev->dev, "register irq rx failed\n");
- free_irq(ether->txirq, dev);
- return -EAGAIN;
- }
-
- mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
- netif_start_queue(dev);
- w90p910_trigger_rx(dev);
-
- dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
-
- return 0;
-}
-
-static void w90p910_ether_set_multicast_list(struct net_device *dev)
-{
- struct w90p910_ether *ether;
- unsigned int rx_mode;
-
- ether = netdev_priv(dev);
-
- if (dev->flags & IFF_PROMISC)
- rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
- rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else
- rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
- __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
-}
-
-static int w90p910_ether_ioctl(struct net_device *dev,
- struct ifreq *ifr, int cmd)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
-}
-
-static void w90p910_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
-}
-
-static int w90p910_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- mii_ethtool_get_link_ksettings(&ether->mii, cmd);
-
- return 0;
-}
-
-static int w90p910_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- return mii_ethtool_set_link_ksettings(&ether->mii, cmd);
-}
-
-static int w90p910_nway_reset(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- return mii_nway_restart(&ether->mii);
-}
-
-static u32 w90p910_get_link(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- return mii_link_ok(&ether->mii);
-}
-
-static const struct ethtool_ops w90p910_ether_ethtool_ops = {
- .get_drvinfo = w90p910_get_drvinfo,
- .nway_reset = w90p910_nway_reset,
- .get_link = w90p910_get_link,
- .get_link_ksettings = w90p910_get_link_ksettings,
- .set_link_ksettings = w90p910_set_link_ksettings,
-};
-
-static const struct net_device_ops w90p910_ether_netdev_ops = {
- .ndo_open = w90p910_ether_open,
- .ndo_stop = w90p910_ether_close,
- .ndo_start_xmit = w90p910_ether_start_xmit,
- .ndo_set_rx_mode = w90p910_ether_set_multicast_list,
- .ndo_set_mac_address = w90p910_set_mac_address,
- .ndo_do_ioctl = w90p910_ether_ioctl,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void get_mac_address(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
- struct platform_device *pdev;
- char addr[ETH_ALEN];
-
- pdev = ether->pdev;
-
- addr[0] = 0x00;
- addr[1] = 0x02;
- addr[2] = 0xac;
- addr[3] = 0x55;
- addr[4] = 0x88;
- addr[5] = 0xa8;
-
- if (is_valid_ether_addr(addr))
- memcpy(dev->dev_addr, &addr, ETH_ALEN);
- else
- dev_err(&pdev->dev, "invalid mac address\n");
-}
-
-static int w90p910_ether_setup(struct net_device *dev)
-{
- struct w90p910_ether *ether = netdev_priv(dev);
-
- dev->netdev_ops = &w90p910_ether_netdev_ops;
- dev->ethtool_ops = &w90p910_ether_ethtool_ops;
-
- dev->tx_queue_len = 16;
- dev->dma = 0x0;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- get_mac_address(dev);
-
- ether->cur_tx = 0x0;
- ether->cur_rx = 0x0;
- ether->finish_tx = 0x0;
- ether->linkflag = 0x0;
- ether->mii.phy_id = 0x01;
- ether->mii.phy_id_mask = 0x1f;
- ether->mii.reg_num_mask = 0x1f;
- ether->mii.dev = dev;
- ether->mii.mdio_read = w90p910_mdio_read;
- ether->mii.mdio_write = w90p910_mdio_write;
-
- timer_setup(&ether->check_timer, w90p910_check_link, 0);
-
- return 0;
-}
-
-static int w90p910_ether_probe(struct platform_device *pdev)
-{
- struct w90p910_ether *ether;
- struct net_device *dev;
- int error;
-
- dev = alloc_etherdev(sizeof(struct w90p910_ether));
- if (!dev)
- return -ENOMEM;
-
- ether = netdev_priv(dev);
-
- ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ether->res == NULL) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- error = -ENXIO;
- goto failed_free;
- }
-
- if (!request_mem_region(ether->res->start,
- resource_size(ether->res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto failed_free;
- }
-
- ether->reg = ioremap(ether->res->start, resource_size(ether->res));
- if (ether->reg == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto failed_free_mem;
- }
-
- ether->txirq = platform_get_irq(pdev, 0);
- if (ether->txirq < 0) {
- dev_err(&pdev->dev, "failed to get ether tx irq\n");
- error = -ENXIO;
- goto failed_free_io;
- }
-
- ether->rxirq = platform_get_irq(pdev, 1);
- if (ether->rxirq < 0) {
- dev_err(&pdev->dev, "failed to get ether rx irq\n");
- error = -ENXIO;
- goto failed_free_io;
- }
-
- platform_set_drvdata(pdev, dev);
-
- ether->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ether->clk)) {
- dev_err(&pdev->dev, "failed to get ether clock\n");
- error = PTR_ERR(ether->clk);
- goto failed_free_io;
- }
-
- ether->rmiiclk = clk_get(&pdev->dev, "RMII");
- if (IS_ERR(ether->rmiiclk)) {
- dev_err(&pdev->dev, "failed to get ether clock\n");
- error = PTR_ERR(ether->rmiiclk);
- goto failed_put_clk;
- }
-
- ether->pdev = pdev;
-
- w90p910_ether_setup(dev);
-
- error = register_netdev(dev);
- if (error != 0) {
- dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n");
- error = -ENODEV;
- goto failed_put_rmiiclk;
- }
-
- return 0;
-failed_put_rmiiclk:
- clk_put(ether->rmiiclk);
-failed_put_clk:
- clk_put(ether->clk);
-failed_free_io:
- iounmap(ether->reg);
-failed_free_mem:
- release_mem_region(ether->res->start, resource_size(ether->res));
-failed_free:
- free_netdev(dev);
- return error;
-}
-
-static int w90p910_ether_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct w90p910_ether *ether = netdev_priv(dev);
-
- unregister_netdev(dev);
-
- clk_put(ether->rmiiclk);
- clk_put(ether->clk);
-
- iounmap(ether->reg);
- release_mem_region(ether->res->start, resource_size(ether->res));
-
- del_timer_sync(&ether->check_timer);
-
- free_netdev(dev);
- return 0;
-}
-
-static struct platform_driver w90p910_ether_driver = {
- .probe = w90p910_ether_probe,
- .remove = w90p910_ether_remove,
- .driver = {
- .name = "nuc900-emc",
- },
-};
-
-module_platform_driver(w90p910_ether_driver);
-
-MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 MAC driver!");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:nuc900-emc");
-
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b327b29f5d57..a6b4bfae4684 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -713,6 +713,21 @@ struct nv_skb_map {
struct nv_skb_map *next_tx_ctx;
};
+struct nv_txrx_stats {
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+};
+
+#define nv_txrx_stats_inc(member) \
+ __this_cpu_inc(np->txrx_stats->member)
+#define nv_txrx_stats_add(member, count) \
+ __this_cpu_add(np->txrx_stats->member, (count))
+
/*
* SMP locking:
* All hardware access under netdev_priv(dev)->lock, except the performance
@@ -797,10 +812,7 @@ struct fe_priv {
/* RX software stats */
struct u64_stats_sync swstats_rx_syncp;
- u64 stat_rx_packets;
- u64 stat_rx_bytes; /* not always available in HW */
- u64 stat_rx_missed_errors;
- u64 stat_rx_dropped;
+ struct nv_txrx_stats __percpu *txrx_stats;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -826,9 +838,6 @@ struct fe_priv {
/* TX software stats */
struct u64_stats_sync swstats_tx_syncp;
- u64 stat_tx_packets; /* not always available in HW */
- u64 stat_tx_bytes;
- u64 stat_tx_dropped;
/* msi/msi-x fields */
u32 msi_flags;
@@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
}
}
+static void nv_get_stats(int cpu, struct fe_priv *np,
+ struct rtnl_link_stats64 *storage)
+{
+ struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
+ unsigned int syncp_start;
+ u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
+ u64 tx_packets, tx_bytes, tx_dropped;
+
+ do {
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
+ rx_packets = src->stat_rx_packets;
+ rx_bytes = src->stat_rx_bytes;
+ rx_dropped = src->stat_rx_dropped;
+ rx_missed_errors = src->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
+
+ storage->rx_packets += rx_packets;
+ storage->rx_bytes += rx_bytes;
+ storage->rx_dropped += rx_dropped;
+ storage->rx_missed_errors += rx_missed_errors;
+
+ do {
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
+ tx_packets = src->stat_tx_packets;
+ tx_bytes = src->stat_tx_bytes;
+ tx_dropped = src->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+
+ storage->tx_packets += tx_packets;
+ storage->tx_bytes += tx_bytes;
+ storage->tx_dropped += tx_dropped;
+}
+
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
@@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
__releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- unsigned int syncp_start;
+ int cpu;
/*
* Note: because HW stats are not always available and for
@@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
*/
/* software stats */
- do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
- storage->rx_packets = np->stat_rx_packets;
- storage->rx_bytes = np->stat_rx_bytes;
- storage->rx_dropped = np->stat_rx_dropped;
- storage->rx_missed_errors = np->stat_rx_missed_errors;
- } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
-
- do {
- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
- storage->tx_packets = np->stat_tx_packets;
- storage->tx_bytes = np->stat_tx_bytes;
- storage->tx_dropped = np->stat_tx_dropped;
- } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
+ for_each_online_cpu(cpu)
+ nv_get_stats(cpu, np, storage);
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_dropped++;
+ nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
@@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_dropped++;
+ nv_txrx_stats_inc(stat_rx_dropped);
u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
}
@@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
}
if (nv_release_txskb(np, &np->tx_skb[i])) {
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
}
np->tx_skb[i].dma = 0;
@@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_dropped++;
+ nv_txrx_stats_inc(stat_tx_dropped);
u64_stats_update_end(&np->swstats_tx_syncp);
return NETDEV_TX_OK;
}
@@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
@@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
bytes_compl += np->get_tx_ctx->skb->len;
@@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_legacybackoff_reseed(dev);
}
} else {
+ unsigned int len;
+
u64_stats_update_begin(&np->swstats_tx_syncp);
- np->stat_tx_packets++;
- np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ nv_txrx_stats_inc(stat_tx_packets);
+ len = np->get_tx_ctx->skb->len;
+ nv_txrx_stats_add(stat_tx_bytes, len);
u64_stats_update_end(&np->swstats_tx_syncp);
}
@@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
}
}
+static void rx_missing_handler(u32 flags, struct fe_priv *np)
+{
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ nv_txrx_stats_inc(stat_rx_missed_errors);
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
+}
+
static int nv_rx_process(struct net_device *dev, int limit)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME) {
- u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_missed_errors++;
- u64_stats_update_end(&np->swstats_rx_syncp);
- }
+ rx_missing_handler(flags, np);
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_packets++;
- np->stat_rx_bytes += len;
+ nv_txrx_stats_inc(stat_rx_packets);
+ nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
@@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
- np->stat_rx_packets++;
- np->stat_rx_bytes += len;
+ nv_txrx_stats_inc(stat_rx_packets);
+ nv_txrx_stats_add(stat_rx_bytes, len);
u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
@@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
SET_NETDEV_DEV(dev, &pci_dev->dev);
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
+ np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
+ if (!np->txrx_stats) {
+ pr_err("np->txrx_stats, alloc memory error.\n");
+ err = -ENOMEM;
+ goto out_alloc_percpu;
+ }
timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
@@ -6060,6 +6110,8 @@ out_relreg:
out_disable:
pci_disable_device(pci_dev);
out_free:
+ free_percpu(np->txrx_stats);
+out_alloc_percpu:
free_netdev(dev);
out:
return err;
@@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
static void nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ free_percpu(np->txrx_stats);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
index 261f107e2be0..418afb84c84b 100644
--- a/drivers/net/ethernet/nxp/Kconfig
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config LPC_ENET
tristate "NXP ethernet MAC on LPC devices"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
help
Say Y or M here if you want to use the NXP ethernet MAC included on
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index f7e11f1b0426..141571e2ec11 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -14,14 +14,12 @@
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-
-#include <mach/board.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
+#include <linux/soc/nxp/lpc32xx-misc.h>
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
@@ -1237,16 +1235,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
dma_addr_t dma_handle;
struct resource *res;
int irq, ret;
- u32 tmp;
/* Setup network interface for RMII or MII mode */
- tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
- tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
- if (lpc_phy_interface_mode(dev) == PHY_INTERFACE_MODE_MII)
- tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
- else
- tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
- __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
+ lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev));
/* Get platform resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1311,19 +1302,18 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Get size of DMA buffers/descriptors region */
pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
- pldat->dma_buff_base_v = 0;
if (use_iram_for_net(dev)) {
- dma_handle = LPC32XX_IRAM_BASE;
- if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
- pldat->dma_buff_base_v =
- io_p2v(LPC32XX_IRAM_BASE);
- else
+ if (pldat->dma_buff_size >
+ lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) {
+ pldat->dma_buff_base_v = NULL;
+ pldat->dma_buff_size = 0;
netdev_err(ndev,
"IRAM not big enough for net buffers, using SDRAM instead.\n");
+ }
}
- if (pldat->dma_buff_base_v == 0) {
+ if (pldat->dma_buff_base_v == NULL) {
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto err_out_free_irq;
@@ -1344,13 +1334,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->dma_buff_base_p = dma_handle;
netdev_dbg(ndev, "IO address space :%pR\n", res);
- netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
+ netdev_dbg(ndev, "IO address size :%zd\n",
+ (size_t)resource_size(res));
netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
- netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
- netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
- pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
+ netdev_dbg(ndev, "DMA buffer P address :%pad\n",
+ &pldat->dma_buff_base_p);
netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
pldat->dma_buff_base_v);
@@ -1397,8 +1388,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto err_out_unregister_netdev;
- netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
- res->start, ndev->irq);
+ netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
+ (unsigned long)res->start, ndev->irq);
device_init_wakeup(dev, 1);
device_set_wakeup_enable(dev, 0);
@@ -1409,7 +1400,7 @@ err_out_unregister_netdev:
unregister_netdev(ndev);
err_out_dma_unmap:
if (!use_iram_for_net(dev) ||
- pldat->dma_buff_size > lpc32xx_return_iram_size())
+ pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
dma_free_coherent(dev, pldat->dma_buff_size,
pldat->dma_buff_base_v,
pldat->dma_buff_base_p);
@@ -1436,7 +1427,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
unregister_netdev(ndev);
if (!use_iram_for_net(&pldat->pdev->dev) ||
- pldat->dma_buff_size > lpc32xx_return_iram_size())
+ pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
pldat->dma_buff_base_v,
pldat->dma_buff_base_p);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8161e308e64b..ead3750b4489 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Packet engine device configuration
+# Packet Engines device configuration
#
config NET_VENDOR_PACKET_ENGINES
- bool "Packet Engine devices"
+ bool "Packet Engines devices"
default y
depends on PCI
---help---
@@ -12,7 +12,7 @@ config NET_VENDOR_PACKET_ENGINES
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about packet engine devices. If you say Y, you will
+ the questions about Packet Engines devices. If you say Y, you will
be asked for your specific card in the following questions.
if NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile
index 1553c9cfc254..cf054b796d11 100644
--- a/drivers/net/ethernet/packetengines/Makefile
+++ b/drivers/net/ethernet/packetengines/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Makefile for the Packet Engine network device drivers.
+# Makefile for the Packet Engines network device drivers.
#
obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 4e8118a08654..9f5113639eaf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strncpy(bit_name,
+ strlcpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 829dd60ab937..1efff7f68ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ goto err4;
}
}
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err4:
+ qed_ll2_dealloc_if(cdev);
err3:
qed_hw_stop(cdev);
err2:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index f900fde448db..158ac0738911 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
/* Vendor specific information */
dev->vendor_id = cdev->vendor_id;
dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
+ dev->hw_ver = cdev->chip_rev;
dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
@@ -530,9 +530,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
/* Check atomic operations support in PCI configuration space. */
- pci_read_config_dword(cdev->pdev,
- cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
- &pci_status_control);
+ pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
+ &pci_status_control);
if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 60189923737a..21d38167f961 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -206,9 +206,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
- ul_header->udp_ip4_ind = 1;
+ ul_header->udp_ind = 1;
else
- ul_header->udp_ip4_ind = 0;
+ ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -239,6 +239,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
@@ -246,7 +247,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
- ul_header->udp_ip4_ind = 0;
+
+ if (ip6h->nexthdr == IPPROTO_UDP)
+ ul_header->udp_ind = 1;
+ else
+ ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -419,7 +424,7 @@ sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
- ul_header->udp_ip4_ind = 0;
+ ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0637c6752a78..bae0074ab9aa 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3251,9 +3251,9 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
ret = phy_read_paged(tp->phydev, 0x0a46, 0x13);
if (ret & BIT(8))
- phy_modify_paged(tp->phydev, 0x0c41, 0x12, 0, BIT(1));
+ phy_modify_paged(tp->phydev, 0x0c41, 0x15, 0, BIT(1));
else
- phy_modify_paged(tp->phydev, 0x0c41, 0x12, BIT(1), 0);
+ phy_modify_paged(tp->phydev, 0x0c41, 0x15, BIT(1), 0);
/* Enable PHY auto speed down */
phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb)
skb_copy_to_linear_data(skb, data, pkt_size);
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
return skb;
}
@@ -6136,10 +6137,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (ret)
return ret;
- if (tp->supports_gmii)
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- else
+ if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
@@ -6589,13 +6587,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
+ /* fall through */
+ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
flags = PCI_IRQ_LEGACY;
- } else {
+ break;
+ default:
flags = PCI_IRQ_ALL_TYPES;
+ break;
}
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ef8f08931fe8..6cacd5e893ac 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver
*
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
kfree(ts_skb);
if (tag == tfa_tag) {
skb_tstamp_tx(skb, &shhwtstamps);
+ dev_consume_skb_any(skb);
break;
+ } else {
+ dev_kfree_skb_any(skb);
}
}
ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
goto unmap;
}
- ts_skb->skb = skb;
+ ts_skb->skb = skb_get(skb);
ts_skb->tag = priv->ts_skb_tag++;
priv->ts_skb_tag &= 0x3ff;
list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
/* Clear the timestamp list */
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
kfree(ts_skb);
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 079f459c73a5..2c5d3f5b84dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2208,10 +2208,12 @@ static int rocker_router_fib_event(struct notifier_block *nb,
if (fen_info->fi->fib_nh_is_v6) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ kfree(fib_work);
return notifier_from_errno(-EINVAL);
}
if (fen_info->fi->nh) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ kfree(fib_work);
return notifier_from_errno(-EINVAL);
}
}
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 027938017579..e92a178a76df 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_SAMSUNG
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
+ the kernel: saying N will just cause the configurator to skip all
the questions about Samsung chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 7a5e6c5abb57..276c7cae7cee 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
printk(KERN_ERR "Sgiseeq: Cannot register net device, "
"aborting.\n");
err = -ENODEV;
- goto err_out_free_page;
+ goto err_out_free_attrs;
}
printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
return 0;
-err_out_free_page:
- free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
err_out_free_dev:
free_netdev(dev);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index bd14803545de..8d88e4083456 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -712,6 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev)
/* Found an external PHY */
break;
}
+ /* Else, fall through */
default:
/* Internal media only */
SMC_GET_PHY_ID1(lp, 1, id1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 4644b2aeeba1..e2e469c37a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
int ret;
struct device *dev = &bsp_priv->pdev->dev;
- if (!ldo) {
- dev_err(dev, "no regulator found\n");
- return -1;
- }
+ if (!ldo)
+ return 0;
if (enable) {
ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4083019c547a..f97a4096f8fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
int ret;
u32 reg, val;
- regmap_field_read(gmac->regmap_field, &val);
+ ret = regmap_field_read(gmac->regmap_field, &val);
+ if (ret) {
+ dev_err(priv->device, "Fail to read from regmap field.\n");
+ return ret;
+ }
+
reg = gmac->variant->default_syscon_value;
if (reg != val)
dev_warn(priv->device,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 01c2e2d83e76..fc9954e4a772 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 7f86dffb264d..3174b701aa90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -44,11 +44,13 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
#define XGMAC_FILTER_HMC BIT(2)
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_MAX_HASH_TABLE 8
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -99,11 +101,12 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
-#define XGMAC_ADDR0_HIGH 0x00000300
+#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
+#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
#define XGMAC_DCS GENMASK(19, 16)
#define XGMAC_DCS_SHIFT 16
-#define XGMAC_ADDR0_LOW 0x00000304
+#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 0a32c96a7854..85c68b7ee8c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -4,6 +4,8 @@
* stmmac XGMAC support.
*/
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
#include "stmmac.h"
#include "dwxgmac2.h"
@@ -106,6 +108,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 value, reg;
reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +173,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
u32 value, reg;
reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_QxMDMACH(queue);
@@ -278,10 +284,10 @@ static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
u32 value;
value = (addr[5] << 8) | addr[4];
- writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH);
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(value, ioaddr + XGMAC_ADDR0_LOW);
+ writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
}
static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
@@ -291,8 +297,8 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
u32 hi_addr, lo_addr;
/* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH);
- lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW);
+ hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
+ lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
/* Extract the MAC address from the high and low words */
addr[0] = lo_addr & 0xff;
@@ -303,19 +309,82 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
+ int mcbitslog2)
+{
+ int numhashregs, regs;
+
+ switch (mcbitslog2) {
+ case 6:
+ numhashregs = 2;
+ break;
+ case 7:
+ numhashregs = 4;
+ break;
+ case 8:
+ numhashregs = 8;
+ break;
+ default:
+ return;
+ }
+
+ for (regs = 0; regs < numhashregs; regs++)
+ writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
+}
+
static void dwxgmac2_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 value = XGMAC_FILTER_RA;
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ u32 mc_filter[8];
+ int i;
+
+ value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
+ value |= XGMAC_FILTER_HPF;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
if (dev->flags & IFF_PROMISC) {
- value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF;
+ value |= XGMAC_FILTER_PR;
+ value |= XGMAC_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
value |= XGMAC_FILTER_PM;
- writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0));
- writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1));
+
+ for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ value |= XGMAC_FILTER_HMC;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
+ (32 - mcbitslog2));
+ mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
+ }
+ }
+
+ dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
+ value |= XGMAC_FILTER_PR;
+ } else {
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwxgmac2_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+
+ for ( ; reg < XGMAC_ADDR_MAX; reg++) {
+ writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
+ writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
+ }
}
writel(value, ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c7c9e5f162e6..fd54c7c87485 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -814,20 +814,15 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mac_supported, 10baseT_Full);
phylink_set(mac_supported, 100baseT_Half);
phylink_set(mac_supported, 100baseT_Full);
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
phylink_set(mac_supported, Autoneg);
phylink_set(mac_supported, Pause);
phylink_set(mac_supported, Asym_Pause);
phylink_set_port_modes(mac_supported);
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4 ||
- priv->plat->has_xgmac) {
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
- }
-
/* Cut down 1G if asked to */
if ((max_speed > 0) && (max_speed < 1000)) {
phylink_set(mask, 1000baseT_Full);
@@ -1295,6 +1290,8 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
+ stmmac_clear_rx_descriptors(priv, queue);
+
for (i = 0; i < DMA_RX_SIZE; i++) {
struct dma_desc *p;
@@ -1312,8 +1309,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
rx_q->cur_rx = 0;
rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
- stmmac_clear_rx_descriptors(priv, queue);
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
@@ -1555,9 +1550,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
}
- rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE,
- sizeof(*rx_q->buf_pool),
- GFP_KERNEL);
+ rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
if (!rx_q->buf_pool)
goto err_dma;
@@ -1608,15 +1602,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*tx_q->tx_skbuff_dma),
- GFP_KERNEL);
+ tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
goto err_dma;
- tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
+ tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
if (!tx_q->tx_skbuff)
goto err_dma;
@@ -3277,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- int dirty = stmmac_rx_dirty(priv, queue);
+ int len, dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
@@ -3297,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
buf->addr = page_pool_get_dma_addr(buf->page);
+
+ /* Sync whole allocation to device. This will invalidate old
+ * data.
+ */
+ dma_sync_single_for_device(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
+
stmmac_set_desc_addr(priv, p, buf->addr);
stmmac_refill_desc3(priv, rx_q, p);
@@ -3431,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
skb_copy_to_linear_data(skb, page_address(buf->page),
frame_len);
skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",
@@ -4319,8 +4320,9 @@ int stmmac_dvr_probe(struct device *device,
NAPI_POLL_WEIGHT);
}
if (queue < priv->plat->tx_queues_to_use) {
- netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 73fc2524372e..154daf4d1072 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -370,6 +370,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
return ERR_PTR(-ENOMEM);
*mac = of_get_mac_address(np);
+ if (IS_ERR(*mac)) {
+ if (PTR_ERR(*mac) == -EPROBE_DEFER)
+ return ERR_CAST(*mac);
+
+ *mac = NULL;
+ }
+
plat->interface = of_get_phy_mode(np);
/* Some wrapper drivers still rely on phy_node. Let's save it while
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18af9813..6c305b6ecad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
entry = &priv->tc_entries[i];
if (!entry->in_use && !first && free)
first = entry;
- if (entry->handle == loc && !free)
+ if ((entry->handle == loc) && !free && !entry->is_frag)
dup = entry;
}
@@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv,
struct stmmac_tc_entry *entry, *frag = NULL;
struct tc_u32_sel *sel = cls->knode.sel;
u32 off, data, mask, real_off, rem;
- u32 prio = cls->common.prio;
+ u32 prio = cls->common.prio << 16;
int ret;
/* Only 1 match per entry */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 32a89744972d..a46b8b2e44e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!cpsw)
return -ENOMEM;
+ platform_set_drvdata(pdev, cpsw);
cpsw->dev = dev;
mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_cpts;
}
- platform_set_drvdata(pdev, cpsw);
priv = netdev_priv(ndev);
priv->cpsw = cpsw;
priv->ndev = ndev;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5b196ebfed49..0f346761a2b2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -788,6 +788,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
+ /* Fall through */
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 8479a440527b..12466a72cefc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
pci_unmap_single(lp->pci_dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 78a7de3fb622..c62f474b6d08 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
static void tsi108_stat_carry(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
u32 carry1, carry2;
- spin_lock_irq(&data->misclock);
+ spin_lock_irqsave(&data->misclock, flags);
carry1 = TSI_READ(TSI108_STAT_CARRY1);
carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
TSI108_STAT_TXPAUSEDROP_CARRY,
&data->tx_pause_drop);
- spin_unlock_irq(&data->misclock);
+ spin_unlock_irqrestore(&data->misclock, flags);
}
/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 2f354ba029a6..cd0a8f46e7c6 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -13,7 +13,7 @@ config NET_VENDOR_XSCALE
Note that the answer to this question does not directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about XSacle IXP devices. If you say Y, you will be
+ the questions about XScale IXP devices. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_XSCALE
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 331c16d30d5d..23281aeeb222 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
sp->dev->stats.rx_bytes += count;
- if ((skb = dev_alloc_skb(count)) == NULL)
+ if ((skb = dev_alloc_skb(count + 1)) == NULL)
goto out_mem;
- ptr = skb_put(skb, count);
+ ptr = skb_put(skb, count + 1);
*ptr++ = cmd; /* KISS command */
memcpy(ptr, sp->cooked_buf + 1, count);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index daab2c07d891..9303aeb2595f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -500,8 +500,9 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
}
break;
}
+ /* fall through */
- default: /* fall through */
+ default:
if (bc->hdlctx.calibrate <= 0)
return 0;
i = min_t(int, cnt, bc->hdlctx.calibrate);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index afdcc5664ea6..e8fce6d715ef 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net,
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
- rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@@ -1240,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
+ struct netvsc_device *nvdev;
struct netvsc_vf_pcpu_stats vf_tot;
int i;
+ rcu_read_lock();
+
+ nvdev = rcu_dereference(ndev_ctx->nvdev);
if (!nvdev)
- return;
+ goto out;
netdev_stats_to_stats64(t, &net->stats);
@@ -1284,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
t->rx_packets += packets;
t->multicast += multicast;
}
+out:
+ rcu_read_unlock();
}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
err = hwsim_subscribe_all_others(phy);
if (err < 0) {
mutex_unlock(&hwsim_phys_lock);
- goto err_reg;
+ goto err_subscribe;
}
}
list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
return idx;
+err_subscribe:
+ ieee802154_unregister_hw(phy->hw);
err_reg:
kfree(pib);
err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
return 0;
platform_drv:
- genl_unregister_family(&hwsim_genl_family);
-platform_dev:
platform_device_unregister(mac802154hwsim_dev);
+platform_dev:
+ genl_unregister_family(&hwsim_genl_family);
return rc;
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index c5c417a3c0ce..bcc40a236624 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
debugfs_remove_recursive(nsim_dev_port->ddir);
}
+static struct net *nsim_devlink_net(struct devlink *devlink)
+{
+ return &init_net;
+}
+
static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
}
static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB_RULES, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
}
static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
}
static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
{
- struct nsim_dev *nsim_dev = priv;
+ struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB_RULES, false);
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
static int nsim_dev_resources_register(struct devlink *devlink)
{
- struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct devlink_resource_size_params params = {
.size_max = (u64)-1,
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
+ struct net *net = nsim_devlink_net(devlink);
int err;
u64 n;
@@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, &params);
@@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, &params);
@@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, &params);
@@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(nsim_dev->fib_data,
- NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, &params);
@@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB,
nsim_dev_ipv4_fib_resource_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB_RULES,
nsim_dev_ipv4_fib_rules_res_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB,
nsim_dev_ipv6_fib_resource_occ_get,
- nsim_dev);
+ net);
devlink_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES,
nsim_dev_ipv6_fib_rules_res_occ_get,
- nsim_dev);
+ net);
out:
return err;
}
@@ -199,11 +196,11 @@ out:
static int nsim_dev_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
- struct nsim_dev *nsim_dev = devlink_priv(devlink);
enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
};
+ struct net *net = nsim_devlink_net(devlink);
int i;
for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
@@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink,
err = devlink_resource_size_get(devlink, res_ids[i], &val);
if (!err) {
- err = nsim_fib_set_max(nsim_dev->fib_data,
- res_ids[i], val, extack);
+ err = nsim_fib_set_max(net, res_ids[i], val, extack);
if (err)
return err;
}
@@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
- nsim_dev->fib_data = nsim_fib_create();
- if (IS_ERR(nsim_dev->fib_data)) {
- err = PTR_ERR(nsim_dev->fib_data);
- goto err_devlink_free;
- }
-
err = nsim_dev_resources_register(devlink);
if (err)
- goto err_fib_destroy;
+ goto err_devlink_free;
err = devlink_register(devlink, &nsim_bus_dev->dev);
if (err)
@@ -315,8 +305,6 @@ err_dl_unregister:
devlink_unregister(devlink);
err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
-err_fib_destroy:
- nsim_fib_destroy(nsim_dev->fib_data);
err_devlink_free:
devlink_free(devlink);
return ERR_PTR(err);
@@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
nsim_dev_debugfs_exit(nsim_dev);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
- nsim_fib_destroy(nsim_dev->fib_data);
mutex_destroy(&nsim_dev->port_list_lock);
devlink_free(devlink);
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 8c57ba747772..f61d094746c0 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,6 +18,7 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
+#include <net/netns/generic.h>
#include "netdevsim.h"
@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
- struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, bool max)
+static unsigned int nsim_fib_net_id;
+
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, u64 val,
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack)
{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
int err = 0;
@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct nsim_fib_data *data,
- struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct fib_notifier_info *info, bool add)
{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
- fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(data, info,
- event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(data, info,
- event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
- fib_nb);
+ struct nsim_fib_data *data;
+ struct net *net;
+
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ data = net_generic(net, nsim_fib_net_id);
+
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+ }
+ rcu_read_unlock();
}
-struct nsim_fib_data *nsim_fib_create(void)
-{
- struct nsim_fib_data *data;
- int err;
+static struct notifier_block nsim_fib_nb = {
+ .notifier_call = nsim_fib_event_nb,
+};
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
+/* Initialize per network namespace state */
+static int __net_init nsim_fib_netns_init(struct net *net)
+{
+ struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
data->ipv4.fib.max = (u64)-1;
data->ipv4.rules.max = (u64)-1;
@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
data->ipv6.fib.max = (u64)-1;
data->ipv6.rules.max = (u64)-1;
- data->fib_nb.notifier_call = nsim_fib_event_nb;
- err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
- if (err) {
- pr_err("Failed to register fib notifier\n");
- goto err_out;
- }
+ return 0;
+}
- return data;
+static struct pernet_operations nsim_fib_net_ops = {
+ .init = nsim_fib_netns_init,
+ .id = &nsim_fib_net_id,
+ .size = sizeof(struct nsim_fib_data),
+};
-err_out:
- kfree(data);
- return ERR_PTR(err);
+void nsim_fib_exit(void)
+{
+ unregister_pernet_subsys(&nsim_fib_net_ops);
+ unregister_fib_notifier(&nsim_fib_nb);
}
-void nsim_fib_destroy(struct nsim_fib_data *data)
+int nsim_fib_init(void)
{
- unregister_fib_notifier(&data->fib_nb);
- kfree(data);
+ int err;
+
+ err = register_pernet_subsys(&nsim_fib_net_ops);
+ if (err < 0) {
+ pr_err("Failed to register pernet subsystem\n");
+ goto err_out;
+ }
+
+ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
+ if (err < 0) {
+ pr_err("Failed to register fib notifier\n");
+ goto err_out;
+ }
+
+err_out:
+ return err;
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0740940f41b1..55f57f76d01b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -357,12 +357,18 @@ static int __init nsim_module_init(void)
if (err)
goto err_dev_exit;
- err = rtnl_link_register(&nsim_link_ops);
+ err = nsim_fib_init();
if (err)
goto err_bus_exit;
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_fib_exit;
+
return 0;
+err_fib_exit:
+ nsim_fib_exit();
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
@@ -373,6 +379,7 @@ err_dev_exit:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
+ nsim_fib_exit();
nsim_bus_exit();
nsim_dev_exit();
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 79c05af2a7c0..9404637d34b7 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
unsigned int port_index);
-struct nsim_fib_data *nsim_fib_create(void);
-void nsim_fib_destroy(struct nsim_fib_data *fib_data);
-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
- enum nsim_resource_id res_id, u64 val,
+int nsim_fib_init(void);
+void nsim_fib_exit(void);
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 222ccd9ecfce..6ad8b1c63c34 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev)
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
* value before reset.
- *
- * So let's first disable the RX and TX delays in PHY and enable
- * them based on the mode selected (this also takes care of RGMII
- * mode where we expect delays to be disabled)
*/
-
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
- * otherwise keep it disabled
- */
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
ret = at803x_enable_rx_delay(phydev);
- if (ret < 0)
- return ret;
- }
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
- * otherwise keep it disabled
- */
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
ret = at803x_enable_tx_delay(phydev);
- }
+ else
+ ret = at803x_disable_tx_delay(phydev);
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 3ffe46df249e..7c5265fd2b94 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -216,8 +216,10 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
if (IS_ERR(gpiod)) {
if (PTR_ERR(gpiod) == -EPROBE_DEFER)
return gpiod;
- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
- fixed_link_node);
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+ fixed_link_node);
gpiod = NULL;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd04fe762056..ce940871331e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -262,11 +262,6 @@ static struct class mdio_bus_class = {
};
#if IS_ENABLED(CONFIG_OF_MDIO)
-/* Helper function for of_mdio_find_bus */
-static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
-{
- return dev->of_node == mdio_bus_np;
-}
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
* @mdio_bus_np: Pointer to the mii_bus.
@@ -287,9 +282,7 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
if (!mdio_bus_np)
return NULL;
- d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np,
- of_mdio_bus_match);
-
+ d = class_find_device_by_of_node(&mdio_bus_class, mdio_bus_np);
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 28676af97b42..645d354ffb48 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -2226,8 +2226,8 @@ static int vsc8514_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2251,8 +2251,8 @@ static int vsc8574_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2281,8 +2281,8 @@ static int vsc8584_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2311,8 +2311,8 @@ static int vsc85xx_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index b9d4145781ca..7935593debb1 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
int val, devad;
bool link = true;
+ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (val < 0)
+ return val;
+
+ /* Autoneg is being started, therefore disregard current
+ * link status and report link as down.
+ */
+ if (val & MDIO_AN_CTRL1_RESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+ }
+
while (mmd_mask && link) {
devad = __ffs(mmd_mask);
mmd_mask &= ~BIT(devad);
@@ -509,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_status);
+/**
+ * genphy_c45_config_aneg - restart auto-negotiation or forced setup
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we force a configuration.
+ */
+int genphy_c45_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
+
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef7aa738e0dc..6b0f89369b46 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev)
* allowed to call genphy_config_aneg()
*/
if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
- return -EOPNOTSUPP;
+ return genphy_c45_config_aneg(phydev);
return genphy_config_aneg(phydev);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6b5cb87f3866..27ebc2c6c2d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
*/
int genphy_update_link(struct phy_device *phydev)
{
- int status;
+ int status = 0, bmcr;
+
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ /* Autoneg is being started, therefore disregard BMSR value and
+ * report link as down.
+ */
+ if (bmcr & BMCR_ANRESTART)
+ goto done;
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
@@ -1774,6 +1784,12 @@ done:
phydev->link = status & BMSR_LSTATUS ? 1 : 0;
phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
+ /* Consider the case that autoneg was started and "aneg complete"
+ * bit has been reset, but "link up" bit not yet.
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = 0;
+
return 0;
}
EXPORT_SYMBOL(genphy_update_link);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index b86a4b2116f8..59a94e07e7c5 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
if (!phy->last_triggered)
led_trigger_event(&phy->led_link_trigger->trigger,
LED_FULL);
+ else
+ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
led_trigger_event(&plt->trigger, LED_FULL);
phy->last_triggered = plt;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5d0af041b8f9..a5a57ca94c1a 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -216,6 +216,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
+ phylink_set(pl->supported, Pause);
+ phylink_set(pl->supported, Asym_Pause);
if (s) {
__set_bit(s->bit, pl->supported);
} else {
@@ -374,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
* Local device Link partner
* Pause AsymDir Pause AsymDir Result
* 1 X 1 X TX+RX
- * 0 1 1 1 RX
- * 1 1 0 1 TX
+ * 0 1 1 1 TX
+ * 1 1 0 1 RX
*/
static void phylink_resolve_flow(struct phylink *pl,
struct phylink_link_state *state)
@@ -396,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
else if (pause & MLO_PAUSE_ASYM)
new_pause = state->pause & MLO_PAUSE_SYM ?
- MLO_PAUSE_RX : MLO_PAUSE_TX;
+ MLO_PAUSE_TX : MLO_PAUSE_RX;
} else {
new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
}
@@ -990,10 +992,10 @@ void phylink_start(struct phylink *pl)
}
if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
mod_timer(&pl->link_poll, jiffies + HZ);
- if (pl->sfp_bus)
- sfp_upstream_start(pl->sfp_bus);
if (pl->phydev)
phy_start(pl->phydev);
+ if (pl->sfp_bus)
+ sfp_upstream_start(pl->sfp_bus);
}
EXPORT_SYMBOL_GPL(phylink_start);
@@ -1010,10 +1012,10 @@ void phylink_stop(struct phylink *pl)
{
ASSERT_RTNL();
- if (pl->phydev)
- phy_stop(pl->phydev);
if (pl->sfp_bus)
sfp_upstream_stop(pl->sfp_bus);
+ if (pl->phydev)
+ phy_stop(pl->phydev);
del_timer_sync(&pl->link_poll);
if (pl->link_irq) {
free_irq(pl->link_irq, pl);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 2d816aadea79..e36c04c26866 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -517,7 +517,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
static void sfp_hwmon_to_rx_power(long *value)
{
- *value = DIV_ROUND_CLOSEST(*value, 100);
+ *value = DIV_ROUND_CLOSEST(*value, 10);
}
static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1d902ecb4aa8..a44dd3c8af63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1115,6 +1115,9 @@ static const struct proto_ops pppoe_ops = {
.recvmsg = pppoe_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 5ef422a43d70..08364f10a43f 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
@@ -98,6 +99,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(pppox_ioctl);
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == PPPOEIOCSFWD32)
+ cmd = PPPOEIOCSFWD;
+
+ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
static int pppox_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index a8e52c8e4128..734de7de03f7 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -623,6 +623,9 @@ static const struct proto_ops pptp_ops = {
.recvmsg = sock_no_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppox_pptp_proto = {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index abfa0da9bbd2..e8089def5a46 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
team->dev->hard_header_len = max_hard_header_len;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3d443597bd04..aab0be40d443 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi, bool napi_frags)
+ bool skip_filter, bool napi, bool napi_frags,
+ bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
* initialized tfile; otherwise we risk using half-initialized
* object.
*/
- rcu_assign_pointer(tfile->tun, tun);
+ if (publish_tun)
+ rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
tun_set_real_num_queues(tun);
@@ -1599,7 +1601,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
return true;
}
-static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
+static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
+ struct page_frag *alloc_frag, char *buf,
int buflen, int len, int pad)
{
struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1612,7 @@ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
skb_reserve(skb, pad);
skb_put(skb, len);
+ skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
@@ -1686,7 +1690,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
*/
if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
+ pad);
}
*skb_xdp = 0;
@@ -1723,7 +1728,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
rcu_read_unlock();
local_bh_enable();
- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
err_xdp:
put_page(alloc_frag->page);
@@ -2727,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS);
+ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
if (err < 0)
return err;
@@ -2826,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS);
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
+ /* free_netdev() won't check refcnt, to aovid race
+ * with dev_put() we need publish tun after registration.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
@@ -2975,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
if (ret < 0)
goto unlock;
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
- tun->flags & IFF_NAPI_FRAGS);
+ tun->flags & IFF_NAPI_FRAGS, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8458e88c18e9..32f53de5b1fe 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
skip:
- if (rndis && header.usb_cdc_acm_descriptor &&
+ /* Communcation class functions with bmCapabilities are not
+ * RNDIS. But some Wireless class RNDIS functions use
+ * bmCapabilities for their own purpose. The failsafe is
+ * therefore applied only to Communication class RNDIS
+ * functions. The rndis test is redundant, but a cheap
+ * optimization.
+ */
+ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+ header.usb_cdc_acm_descriptor &&
header.usb_cdc_acm_descriptor->bmCapabilities) {
dev_dbg(&intf->dev,
"ACM capabilities %02x, not really RNDIS?\n",
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 5519248a791e..32b08b18e120 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
}
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto err;
}
/* enable ethernet mode (?) */
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d62b6706a537..fc5895f85cee 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
usb_buf, 24);
if (status != 0)
- return status;
+ goto out;
memcpy(usb_buf, init_msg_2, 12);
status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
usb_buf, 28);
if (status != 0)
- return status;
+ goto out;
memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
kfree(usb_buf);
return status;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3d92ea6fcc02..f033fee225a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out3;
+ goto out4;
}
usb_set_intfdata(intf, dev);
@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_phy_init(dev);
if (ret < 0)
- goto out4;
+ goto out5;
return 0;
-out4:
+out5:
unregister_netdev(netdev);
+out4:
+ usb_free_urb(dev->urb_intr);
out3:
lan78xx_unbind(dev, intf);
out2:
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6d25dea5ad4b..f7d117d80cfb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
- __u8 tmp;
+ __u8 tmp = 0;
__le16 retdatai;
int ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 69e0a2acfcb0..b6dc5d714b5e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1295,6 +1295,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 39e0768d734d..04137ac373b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -50,7 +50,7 @@
#define PLA_TEREDO_WAKE_BASE 0xc0c4
#define PLA_MAR 0xcd00
#define PLA_BACKUP 0xd000
-#define PAL_BDC_CR 0xd1a0
+#define PLA_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
#define PLA_SUSPEND_FLAG 0xd38a
@@ -274,7 +274,7 @@
#define TEREDO_RS_EVENT_MASK 0x00fe
#define OOB_TEREDO_EN 0x0001
-/* PAL_BDC_CR */
+/* PLA_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
/* PLA_EFUSE_CMD */
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
+ if (ret < 0)
+ memset(data, 0xff, size);
+ else
+ memcpy(data, tmp, size);
- memcpy(data, tmp, size);
kfree(tmp);
return ret;
@@ -3191,9 +3194,9 @@ static void r8152b_enter_oob(struct r8152 *tp)
rtl_rx_vlan_en(tp, true);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
ocp_data |= ALDPS_PROXY_MODE;
- ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
@@ -3577,9 +3580,9 @@ static void r8153_enter_oob(struct r8152 *tp)
rtl_rx_vlan_en(tp, true);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
ocp_data |= ALDPS_PROXY_MODE;
- ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
@@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- if (!test_bit(RTL8152_UNPLUG, &tp->flags))
- napi_disable(&tp->napi);
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf,
return 0;
out1:
- netif_napi_del(&tp->napi);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
@@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
if (tp) {
rtl_set_unplug(tp);
- netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4f3de0ac8b0b..ba98e0971b84 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
}
- if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
+ if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 54edf8956a25..6e84328bdd40 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct ipv6hdr *iph;
struct net *net = dev_net(skb->dev);
- struct flowi6 fl6 = {
- /* needed to match OIF rule */
- .flowi6_oif = dev->ifindex,
- .flowi6_iif = LOOPBACK_IFINDEX,
- .daddr = iph->daddr,
- .saddr = iph->saddr,
- .flowlabel = ip6_flowinfo(iph),
- .flowi6_mark = skb->mark,
- .flowi6_proto = iph->nexthdr,
- .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
- };
+ struct flowi6 fl6;
int ret = NET_XMIT_DROP;
struct dst_entry *dst;
struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ goto err;
+
+ iph = ipv6_hdr(skb);
+
+ memset(&fl6, 0, sizeof(fl6));
+ /* needed to match OIF rule */
+ fl6.flowi6_oif = dev->ifindex;
+ fl6.flowi6_iif = LOOPBACK_IFINDEX;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = ip6_flowinfo(iph);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = iph->nexthdr;
+ fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
dst = ip6_route_output(net, NULL, &fl6);
if (dst == dst_null)
goto err;
@@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
- struct iphdr *ip4h = ip_hdr(skb);
+ struct iphdr *ip4h;
int ret = NET_XMIT_DROP;
- struct flowi4 fl4 = {
- /* needed to match OIF rule */
- .flowi4_oif = vrf_dev->ifindex,
- .flowi4_iif = LOOPBACK_IFINDEX,
- .flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
- .flowi4_proto = ip4h->protocol,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
- };
+ struct flowi4 fl4;
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+
+ memset(&fl4, 0, sizeof(fl4));
+ /* needed to match OIF rule */
+ fl4.flowi4_oif = vrf_dev->ifindex;
+ fl4.flowi4_iif = LOOPBACK_IFINDEX;
+ fl4.flowi4_tos = RT_TOS(ip4h->tos);
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+ fl4.flowi4_proto = ip4h->protocol;
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index d74349628db2..0e6a51525d91 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
- lmc_trace(dev, "lmc_runnin_reset_out");
+ lmc_trace(dev, "lmc_running_reset_out");
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index a9ac3f37b904..e2e679a01b65 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -413,6 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
case SDLA_RET_NO_BUFS:
if (cmd == SDLA_INFORMATION_WRITE)
break;
+ /* Else, fall through */
default:
netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168bb734..489cba9b284d 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
}
result = i2400m_barker_db_add(barker);
if (result < 0)
- goto error_add;
+ goto error_parse_add;
}
kfree(options_orig);
}
return 0;
+error_parse_add:
error_parse:
+ kfree(options_orig);
error_add:
kfree(i2400m_barker_db);
return result;
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 6642bcb27761..8efb493ceec2 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
"%d\n", result);
result = 0;
error_cmd:
+ kfree(cmd);
kfree_skb(ack_skb);
error_msg_to_dev:
error_alloc:
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d436cc51dfd1..2fb4258941a5 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -177,6 +177,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_cfg
},
{
@@ -184,6 +185,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_cfg
},
{
@@ -192,6 +194,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
.doit = wil_rf_sector_get_selected
},
{
@@ -200,6 +203,7 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .policy = wil_rf_sector_policy,
.doit = wil_rf_sector_set_selected
},
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index f6500899fc14..d07e7c7355d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -112,6 +112,7 @@ const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .policy = VENDOR_CMD_RAW_DATA,
.doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
},
};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 1f500cddb3a7..55b713255b8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
+ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index d55312ef58c9..9b0bb89599fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -776,7 +776,6 @@ struct iwl_rss_config_cmd {
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
-#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
@@ -812,10 +811,12 @@ struct iwl_rxq_sync_notification {
*
* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
*/
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
+ IWL_MVM_RXQ_NSSN_SYNC,
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e411ac98290d..4d81776f576d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -2438,17 +2438,19 @@ static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt,
{
u32 img_name_len = le32_to_cpu(dbg_info->img_name_len);
u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len);
- const char err_str[] =
- "WRT: ext=%d. Invalid %s name length %d, expected %d\n";
if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) {
- IWL_WARN(fwrt, err_str, ext, "image", img_name_len,
+ IWL_WARN(fwrt,
+ "WRT: ext=%d. Invalid image name length %d, expected %d\n",
+ ext, img_name_len,
IWL_FW_INI_MAX_IMG_NAME_LEN);
return;
}
if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) {
- IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len,
+ IWL_WARN(fwrt,
+ "WRT: ext=%d. Invalid debug cfg name length %d, expected %d\n",
+ ext, dbg_cfg_name_len,
IWL_FW_INI_MAX_DBG_CFG_NAME_LEN);
return;
}
@@ -2775,8 +2777,6 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
struct iwl_ucode_tlv *tlv = iter;
void *ini_tlv = (void *)tlv->data;
u32 type = le32_to_cpu(tlv->type);
- const char invalid_ap_str[] =
- "WRT: ext=%d. Invalid apply point %d for %s\n";
switch (type) {
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
@@ -2786,8 +2786,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv;
if (pnt != IWL_FW_INI_APPLY_EARLY) {
- IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
- "buffer allocation");
+ IWL_ERR(fwrt,
+ "WRT: ext=%d. Invalid apply point %d for buffer allocation\n",
+ ext, pnt);
goto next;
}
@@ -2797,8 +2798,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
}
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
- IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
- "host command");
+ IWL_ERR(fwrt,
+ "WRT: ext=%d. Invalid apply point %d for host command\n",
+ ext, pnt);
goto next;
}
iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 1c1bf1b281cd..6c04f8223aff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
+extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 57d09049e615..38672dd5aae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1640,6 +1640,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
+ iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1660,8 +1662,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_fw_dbg_free(drv->trans);
#endif
+ iwl_fw_dbg_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1d608e9e9101..5de54d1559dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -755,7 +755,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
for (i = 0; i < n_profiles; i++) {
/* the tables start at element 3 */
- static int pos = 3;
+ int pos = 3;
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
@@ -880,6 +880,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 36, 29 and
+ * 17.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
struct iwl_geo_tx_power_profiles_resp *resp;
@@ -909,6 +925,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
.data = { data },
};
+ if (!iwl_mvm_sar_geo_support(mvm))
+ return -EOPNOTSUPP;
+
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -934,13 +953,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
- /*
- * This command is not supported on earlier firmware versions.
- * Unfortunately, we don't have a TLV API flag to rely on, so
- * rely on the major version which is in the first byte of
- * ucode_ver.
- */
- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+ if (!iwl_mvm_sar_geo_support(mvm))
return 0;
ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cpu_to_le32(vif->bss_conf.use_short_slot ?
MAC_FLG_SHORT_SLOT : 0);
- cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+ cmd->filter_flags = 0;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
/* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 ap_sta_id = mvmvif->ap_sta_id;
u32 dtim_offs;
/*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
dtim_offs);
ctxt_sta->is_assoc = cpu_to_le32(1);
+
+ /*
+ * allow multicast data frames only as long as the station is
+ * authorized, i.e., GTK keys are already installed (if needed)
+ */
+ if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct iwl_mvm_sta *mvmsta =
+ iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvmsta->sta_state ==
+ IEEE80211_STA_AUTHORIZED)
+ cmd.filter_flags |=
+ cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+ }
+
+ rcu_read_unlock();
+ }
} else {
ctxt_sta->is_assoc = cpu_to_le32(0);
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
MAC_FILTER_IN_CONTROL_AND_MGMT |
MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST |
- MAC_FILTER_IN_CRC32);
+ MAC_FILTER_IN_CRC32 |
+ MAC_FILTER_ACCEPT_GRP);
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
/* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
- MAC_FILTER_IN_PROBE_REQUEST);
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_ACCEPT_GRP);
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 55cd49ccbf0b..a7bc00d1296f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
},
};
-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
- enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
- ieee80211_hw_set(hw, TX_AMSDU);
+ /*
+ * On older devices, enabling TX A-MSDU occasionally leads to
+ * something getting messed up, the command read from the FIFO
+ * gets out of sync and isn't a TX command, so that we have an
+ * assert EDC.
+ *
+ * It's not clear where the bug is, but since we didn't used to
+ * support A-MSDU until moving the mac80211 iTXQs, just leave it
+ * for older devices. We also don't see this issue on any newer
+ * devices.
+ */
+ if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
+ ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
if (iwl_mvm_has_tlc_offload(mvm)) {
@@ -2726,7 +2738,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_early_keys[i] = NULL;
- ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
if (ret)
goto out_quota_failed;
}
@@ -3315,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ /*
+ * Now that the station is authorized, i.e., keys were already
+ * installed, need to indicate to the FW that
+ * multicast data frames can be forwarded to the driver
+ */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
+ /* Multicast data frames are no longer allowed */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
/* disable beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
WARN_ON(ret &&
@@ -3494,11 +3516,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
return ret;
}
-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
- enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -3553,8 +3575,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
- mutex_lock(&mvm->mutex);
-
switch (cmd) {
case SET_KEY:
if ((vif->type == NL80211_IFTYPE_ADHOC ||
@@ -3700,7 +3720,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = -EINVAL;
}
+ return ret;
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
mutex_unlock(&mvm->mutex);
+
return ret;
}
@@ -5041,7 +5076,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
- lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm))
return;
@@ -5052,13 +5086,15 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
+ size, !notif->sync);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
if (notif->sync) {
+ lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0 ||
iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 48c77af54e99..a263cc629d75 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1664,9 +1664,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const u8 *data, u32 count);
-void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- int queue);
+ const u8 *data, u32 count, bool async);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -1813,7 +1813,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 719f793b3487..a9bb43a2f27b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -620,7 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
enum iwl_mcc_source src;
char mcc[3];
struct ieee80211_regdomain *regd;
- u32 wgds_tbl_idx;
+ int wgds_tbl_idx;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d7d6f3398f86..4888054dc3d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1088,7 +1088,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
- iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
@@ -1812,7 +1812,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
- iwl_mvm_rx_queue_notif(mvm, rxb, queue);
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 8c9069f28a58..d3f04acfbacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1197,239 +1197,6 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
return tid;
}
-void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, struct ieee80211_tx_info *info, bool ndp)
-{
- int legacy_success;
- int retries;
- int i;
- struct iwl_lq_cmd *table;
- u32 lq_hwrate;
- struct rs_rate lq_rate, tx_resp_rate;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
- u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
- u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
- u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->pers.drv) {
- IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
- return;
- }
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
- &tx_resp_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- /* Disable last tx check if we are debugging with fixed rate but
- * update tx stats */
- if (lq_sta->pers.dbg_fixed_rate) {
- int index = tx_resp_rate.index;
- enum rs_column column;
- int attempts, success;
-
- column = rs_get_column_from_rate(&tx_resp_rate);
- if (WARN_ONCE(column == RS_COLUMN_INVALID,
- "Can't map rate 0x%x to column",
- tx_resp_hwrate))
- return;
-
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- attempts = info->status.ampdu_len;
- success = info->status.ampdu_ack_len;
- } else {
- attempts = info->status.rates[0].count;
- success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- }
-
- lq_sta->pers.tx_stats[column][index].total += attempts;
- lq_sta->pers.tx_stats[column][index].success += success;
-
- IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
- tx_resp_hwrate, success, attempts);
- return;
- }
-#endif
-
- if (time_after(jiffies,
- (unsigned long)(lq_sta->last_tx +
- (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
- IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
- iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
- return;
- }
- lq_sta->last_tx = jiffies;
-
- /* Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- lq_hwrate = le32_to_cpu(table->rs_table[0]);
- if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
- /* Here we actually compare this rate to the latest LQ command */
- if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
- IWL_DEBUG_RATE(mvm,
- "tx resp color 0x%x does not match 0x%x\n",
- lq_color, LQ_FLAG_COLOR_GET(table->flags));
-
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- IWL_DEBUG_RATE(mvm,
- "Too many rates mismatch. Send sync LQ. rs_state %d\n",
- lq_sta->rs_state);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- if (!lq_sta->search_better_tbl) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- }
-
- if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
- IWL_DEBUG_RATE(mvm,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
- rs_dump_rate(mvm, &lq_rate, "ACTUAL");
-
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len,
- reduced_txp);
-
- /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
- * it as a single frame loss as we don't want the success ratio
- * to dip too quickly because a BA wasn't received.
- * For TPC, there's no need for this optimisation since we want
- * to recover very quickly from a bad power reduction and,
- * therefore we'd like the success ratio to get an immediate hit
- * when failing to get a BA, so we'd switch back to a lower or
- * zero power reduction. When FW transmits agg with a rate
- * different from the initial rate, it will not use reduced txp
- * and will send BA notification twice (one empty with reduced
- * txp equal to the value from LQ and one with reduced txp 0).
- * We need to update counters for each txp level accordingly.
- */
- if (info->status.ampdu_ack_len == 0)
- info->status.ampdu_len = 1;
-
- rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /* For legacy, update frame history with for each Tx retry. */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- lq_hwrate = le32_to_cpu(table->rs_table[i]);
- if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
- &lq_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
- tmp_tbl = curr_tbl;
- else if (rs_rate_column_match(&lq_rate,
- &other_tbl->rate))
- tmp_tbl = other_tbl;
- else
- continue;
-
- rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
- tx_resp_rate.index, 1,
- i < retries ? 0 : legacy_success,
- reduced_txp);
- rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
- tx_resp_rate.index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = lq_hwrate;
- IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[info->band])
- rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
-}
-
/*
* mac80211 sends us Tx status
*/
@@ -1442,8 +1209,9 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ if (!mvmsta->vif)
return;
if (!ieee80211_is_data(hdr->frame_control) ||
@@ -1584,6 +1352,18 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
+/* rs uses two tables, one is active and the second is for searching better
+ * configuration. This function, according to the index of the currently
+ * active table returns the search table, which is located at the
+ * index complementary to 1 according to the active table (active = 1,
+ * search = 0 or active = 0, search = 1).
+ * Since lq_info is an arary of size 2, make sure index cannot be out of bounds.
+ */
+static inline u8 rs_search_tbl(u8 active_tbl)
+{
+ return (active_tbl ^ 1) & 1;
+}
+
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, /* "search" */
@@ -1794,7 +1574,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl)
{
rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
}
static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
@@ -1931,9 +1711,9 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum rs_column col_id)
{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
struct rs_rate *rate = &search_tbl->rate;
const struct rs_tx_column *column = &rs_tx_columns[col_id];
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
@@ -2341,7 +2121,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
- active_tbl = 1 - lq_sta->active_tbl;
+ active_tbl = rs_search_tbl(lq_sta->active_tbl);
tbl = &(lq_sta->lq_info[active_tbl]);
rate = &tbl->rate;
@@ -2565,7 +2345,7 @@ lq_update:
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
rs_rate_scale_clear_tbl_windows(mvm, tbl);
/* Use new "search" start rate */
@@ -2896,7 +2676,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum nl80211_band band, bool update)
+ enum nl80211_band band)
{
struct iwl_scale_tbl_info *tbl;
struct rs_rate *rate;
@@ -2908,7 +2688,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
- active_tbl = 1 - lq_sta->active_tbl;
+ active_tbl = rs_search_tbl(lq_sta->active_tbl);
tbl = &(lq_sta->lq_info[active_tbl]);
rate = &tbl->rate;
@@ -2926,7 +2706,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
}
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3175,7 +2955,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band, bool update)
+ enum nl80211_band band)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -3186,6 +2966,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock);
+
/* clear all non-persistent lq data */
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -3255,7 +3037,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
- rs_initialize_lq(mvm, sta, lq_sta, band, update);
+ rs_initialize_lq(mvm, sta, lq_sta, band);
}
static void rs_drv_rate_update(void *mvm_r,
@@ -3278,6 +3060,258 @@ static void rs_drv_rate_update(void *mvm_r,
iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
}
+static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info,
+ bool ndp)
+{
+ int legacy_success;
+ int retries;
+ int i;
+ struct iwl_lq_cmd *table;
+ u32 lq_hwrate;
+ struct rs_rate lq_rate, tx_resp_rate;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+ u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+ u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
+ u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->pers.drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* Disable last tx check if we are debugging with fixed rate but
+ * update tx stats
+ */
+ if (lq_sta->pers.dbg_fixed_rate) {
+ int index = tx_resp_rate.index;
+ enum rs_column column;
+ int attempts, success;
+
+ column = rs_get_column_from_rate(&tx_resp_rate);
+ if (WARN_ONCE(column == RS_COLUMN_INVALID,
+ "Can't map rate 0x%x to column",
+ tx_resp_hwrate))
+ return;
+
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ attempts = info->status.ampdu_len;
+ success = info->status.ampdu_ack_len;
+ } else {
+ attempts = info->status.rates[0].count;
+ success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ }
+
+ lq_sta->pers.tx_stats[column][index].total += attempts;
+ lq_sta->pers.tx_stats[column][index].success += success;
+
+ IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+ tx_resp_hwrate, success, attempts);
+ return;
+ }
+#endif
+
+ if (time_after(jiffies,
+ (unsigned long)(lq_sta->last_tx +
+ (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
+ IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+ /* reach here only in case of driver RS, call directly
+ * the unlocked version
+ */
+ rs_drv_rate_init(mvm, sta, info->band);
+ return;
+ }
+ lq_sta->last_tx = jiffies;
+
+ /* Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ lq_hwrate = le32_to_cpu(table->rs_table[0]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Here we actually compare this rate to the latest LQ command */
+ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
+ IWL_DEBUG_RATE(mvm,
+ "tx resp color 0x%x does not match 0x%x\n",
+ lq_color, LQ_FLAG_COLOR_GET(table->flags));
+
+ /* Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ }
+
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ if (!lq_sta->search_better_tbl) {
+ curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ } else {
+ curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ other_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ }
+
+ if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
+ IWL_DEBUG_RATE(mvm,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
+ tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &lq_rate, "ACTUAL");
+
+ /* no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /* Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len,
+ reduced_txp);
+
+ /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+ * it as a single frame loss as we don't want the success ratio
+ * to dip too quickly because a BA wasn't received.
+ * For TPC, there's no need for this optimisation since we want
+ * to recover very quickly from a bad power reduction and,
+ * therefore we'd like the success ratio to get an immediate hit
+ * when failing to get a BA, so we'd switch back to a lower or
+ * zero power reduction. When FW transmits agg with a rate
+ * different from the initial rate, it will not use reduced txp
+ * and will send BA notification twice (one empty with reduced
+ * txp equal to the value from LQ and one with reduced txp 0).
+ * We need to update counters for each txp level accordingly.
+ */
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
+ rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
+ tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /* For legacy, update frame history with for each Tx retry. */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ lq_hwrate = le32_to_cpu(table->rs_table[i]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
+ tmp_tbl = curr_tbl;
+ else if (rs_rate_column_match(&lq_rate,
+ &other_tbl->rate))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+
+ rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
+ rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = lq_hwrate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[info->band])
+ rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
+}
+
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info, bool ndp)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* If it's locked we are in middle of init flow
+ * just wait for next tx status to update the lq_sta data
+ */
+ if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock))
+ return;
+
+ __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
+ spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd,
@@ -3569,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
- iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
ss_params |= LQ_SS_BFER_ALLOWED;
IWL_DEBUG_RATE(mvm,
@@ -3735,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
if (lq_sta->pers.dbg_fixed_rate) {
rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
- iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
+ iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
}
}
@@ -4132,10 +4166,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update)
{
- if (iwl_mvm_has_tlc_offload(mvm))
+ if (iwl_mvm_has_tlc_offload(mvm)) {
rs_fw_rate_init(mvm, sta, band, update);
- else
- rs_drv_rate_init(mvm, sta, band, update);
+ } else {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock);
+ rs_drv_rate_init(mvm, sta, band);
+ spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
+ }
}
int iwl_mvm_rate_control_register(void)
@@ -4165,7 +4204,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
}
- return iwl_mvm_send_lq_cmd(mvm, lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, lq);
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index f7eb60dbaf20..428642e66658 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -4,7 +4,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -390,6 +390,7 @@ struct iwl_lq_sta {
s8 last_rssi;
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
struct iwl_mvm *drv;
+ spinlock_t lock; /* for races in reinit/update table */
} pers;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 64f950501287..854edd7d7103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -463,20 +463,22 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
}
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const u8 *data, u32 count)
+ const u8 *data, u32 count, bool async)
{
- struct iwl_rxq_sync_cmd *cmd;
+ u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
+ sizeof(struct iwl_mvm_rss_sync_notif)];
+ struct iwl_rxq_sync_cmd *cmd = (void *)buf;
u32 data_size = sizeof(*cmd) + count;
int ret;
- /* should be DWORD aligned */
- if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
+ /*
+ * size must be a multiple of DWORD
+ * Ensure we don't overflow buf
+ */
+ if (WARN_ON(count & 3 ||
+ count > sizeof(struct iwl_mvm_rss_sync_notif)))
return -EINVAL;
- cmd = kzalloc(data_size, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
cmd->rxq_mask = cpu_to_le32(rxq_mask);
cmd->count = cpu_to_le32(count);
cmd->flags = 0;
@@ -485,9 +487,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP,
TRIGGER_RX_QUEUES_NOTIF_CMD),
- 0, data_size, cmd);
+ async ? CMD_ASYNC : 0, data_size, cmd);
- kfree(cmd);
return ret;
}
@@ -503,14 +504,31 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
!ieee80211_sn_less(sn1, sn2 - buffer_size);
}
+static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
+{
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+}
+
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+enum iwl_mvm_release_flags {
+ IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
+ IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
+};
+
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
- u16 nssn)
+ u16 nssn, u32 flags)
{
struct iwl_mvm_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue *
@@ -519,6 +537,18 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
+ /*
+ * We keep the NSSN not too far behind, if we are sync'ing it and it
+ * is more than 2048 ahead of us, it must be behind us. Discard it.
+ * This can happen if the queue that hit the 0 / 2048 seqno was lagging
+ * behind and this queue already processed packets. The next if
+ * would have caught cases where this queue would have processed less
+ * than 64 packets, but it may have processed more than 64 packets.
+ */
+ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
+ ieee80211_sn_less(nssn, ssn))
+ goto set_timer;
+
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
goto set_timer;
@@ -529,6 +559,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
+ if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
+ (ssn == 2048 || ssn == 0))
+ iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
/*
* Empty the list. Will have more than one frame for A-MSDU.
@@ -615,7 +648,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
sta_id, sn);
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
sta, baid_data->tid);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
+ buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
rcu_read_unlock();
} else {
/*
@@ -657,7 +691,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size));
+ reorder_buf->buf_size),
+ 0);
spin_unlock_bh(&reorder_buf->lock);
del_timer_sync(&reorder_buf->reorder_timer);
@@ -665,8 +700,54 @@ out:
rcu_read_unlock();
}
-void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- int queue)
+static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ u8 baid, u16 nssn, int queue,
+ u32 flags)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+ baid, nssn);
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map)))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data,
+ reorder_buf, nssn, flags);
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
+ struct napi_struct *napi, int queue,
+ const struct iwl_mvm_nssn_sync_data *data)
+{
+ iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
+ data->nssn, queue,
+ IWL_MVM_RELEASE_FROM_RSS_SYNC);
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rxq_sync_notification *notif;
@@ -687,6 +768,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
case IWL_MVM_RXQ_NOTIF_DEL_BA:
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
+ case IWL_MVM_RXQ_NSSN_SYNC:
+ iwl_mvm_nssn_sync(mvm, napi, queue,
+ (void *)internal_notif->data);
+ break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
@@ -785,7 +870,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ buffer, nssn, 0);
goto drop;
}
@@ -794,7 +880,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the
* rest of the function will take care of storing it and releasing up to
- * the nssn
+ * the nssn.
+ * This should not happen. This queue has been lagging and it should
+ * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
+ * and update the other queues.
*/
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
buffer->buf_size) ||
@@ -802,7 +891,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
- min_sn);
+ min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
}
/* drop any oudated packets */
@@ -813,8 +902,23 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
buffer->buf_size) &&
- (!amsdu || last_subframe))
+ (!amsdu || last_subframe)) {
+ /*
+ * If we crossed the 2048 or 0 SN, notify all the
+ * queues. This is done in order to avoid having a
+ * head_sn that lags behind for too long. When that
+ * happens, we can get to a situation where the head_sn
+ * is within the interval [nssn - buf_size : nssn]
+ * which will make us think that the nssn is a packet
+ * that we already freed because of the reordering
+ * buffer and we will ignore it. So maintain the
+ * head_sn somewhat updated across all the queues:
+ * when it crosses 0 and 2048.
+ */
+ if (sn == 2048 || sn == 0)
+ iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = nssn;
+ }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -829,8 +933,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
- if (!amsdu || last_subframe)
+ if (!amsdu || last_subframe) {
+ if (sn == 2048 || sn == 0)
+ iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -875,7 +982,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* release notification with up to date NSSN.
*/
if (!amsdu || last_subframe)
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ buffer, nssn,
+ IWL_MVM_RELEASE_SEND_RSS_SYNC);
spin_unlock_bh(&buffer->lock);
return true;
@@ -1840,40 +1949,14 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
out:
rcu_read_unlock();
}
+
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data;
- struct ieee80211_sta *sta;
- struct iwl_mvm_reorder_buffer *reorder_buf;
- struct iwl_mvm_baid_data *ba_data;
-
- int baid = release->baid;
-
- IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
- release->baid, le16_to_cpu(release->nssn));
- if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
- return;
-
- rcu_read_lock();
-
- ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN_ON_ONCE(!ba_data))
- goto out;
-
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
- goto out;
-
- reorder_buf = &ba_data->reorder_buf[queue];
-
- spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
- le16_to_cpu(release->nssn));
- spin_unlock_bh(&reorder_buf->lock);
-
-out:
- rcu_read_unlock();
+ iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
+ le16_to_cpu(release->nssn),
+ queue, 0);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f545a737a92d..10f18536dd0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1684,6 +1684,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
*/
if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta);
+ else
+ spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
@@ -2421,7 +2423,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
- struct iwl_mvm_delba_notif notif = {
+ struct iwl_mvm_rss_sync_notif notif = {
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
.metadata.sync = 1,
.delba.baid = baid,
@@ -2972,7 +2974,7 @@ out:
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
}
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4487cc3e07c1..8d70093847cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -343,9 +343,17 @@ struct iwl_mvm_delba_data {
u32 baid;
} __packed;
-struct iwl_mvm_delba_notif {
+struct iwl_mvm_nssn_sync_data {
+ u32 baid;
+ u32 nssn;
+} __packed;
+
+struct iwl_mvm_rss_sync_notif {
struct iwl_mvm_internal_rxq_notif metadata;
- struct iwl_mvm_delba_data delba;
+ union {
+ struct iwl_mvm_delba_data delba;
+ struct iwl_mvm_nssn_sync_data nssn_sync;
+ };
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a3e5d88f1c07..6ac114a393cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
unsigned int tcp_payload_len;
unsigned int mss = skb_shinfo(skb)->gso_size;
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ bool qos = ieee80211_is_data_qos(hdr->frame_control);
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
skb_shinfo(skb)->gso_size = num_subframes * mss;
@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
} else {
- if (ieee80211_is_data_qos(hdr->frame_control)) {
+ if (qos) {
u8 *qc;
if (ipv4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 9ecd5f09615a..b8e20a01c192 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -653,12 +653,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_ASYNC,
.data = { lq, },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ea2a03d4bf55..3b12e7ad35e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -604,10 +604,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
@@ -1059,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
+
+ /* same thing for QuZ... */
+ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ }
+
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..db62c8314603 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- ((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg &&
- trans->cfg != &iwl_ax201_cfg_quz_hr) ||
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
u16 len = byte_cnt;
__le16 bc_ent;
- if (trans_pcie->bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ /* Starting from 22560, the HW expects bytes */
+ WARN_ON(trans_pcie->bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
- else
+ } else {
+ /* Until 22560, the HW expects DW */
+ WARN_ON(!trans_pcie->bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fa4245d0d4a8..2f0ba7ef53b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
DMA_TO_DEVICE);
}
+ meta->tbs = 0;
+
if (trans->cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 519b4ee88c5c..772e54f0696f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3617,10 +3617,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &hwsim_genl_family,
NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
- if (!hdr)
+ if (hdr) {
+ genl_dump_check_consistent(cb, hdr);
+ genlmsg_end(skb, hdr);
+ } else {
res = -EMSGSIZE;
- genl_dump_check_consistent(cb, hdr);
- genlmsg_end(skb, hdr);
+ }
}
done:
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 653d347a9a19..580387f9f12a 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
}
vs_ie = (struct ieee_types_header *)vendor_ie;
+ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+ IEEE_MAX_IE_SIZE)
+ return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 3e442c7f7882..095837fba300 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define WPA_GTK_OUI_OFFSET 2
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0d6d41727037..21dda385f6c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+ WPA_GTK_OUI_OFFSET);
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18f7d9bf30b2..0939a8c8f3ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+ return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
- if (rate_ie)
+ if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+ return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+ }
return;
}
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = vendor_ie;
+ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+ return;
memcpy(&bss_cfg->wmm_info, wmm_ie +
sizeof(struct ieee_types_header), *(wmm_ie + 1));
priv->wmm_enabled = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 40c0d536e20d..9d4426f6905f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
+ if (is_mt7630(dev)) {
+ dev->mt76.cap.has_5ghz = false;
+ dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
+ }
+
if (!mt76x02_field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 4585e1b756c2..6117e6ca08cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -62,6 +62,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mt76x0e_stop_hw(dev);
}
+static int
+mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (is_mt7630(dev))
+ return -EOPNOTSUPP;
+
+ return mt76x02_set_key(hw, cmd, vif, sta, key);
+}
+
static void
mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
@@ -78,7 +91,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
- .set_key = mt76x02_set_key,
+ .set_key = mt76x0e_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
};
-static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
{
int err;
- mt76x0_chip_onoff(dev, true, true);
+ mt76x0_chip_onoff(dev, true, reset);
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto out_err;
- err = mt76x0u_init_hardware(dev);
+ err = mt76x0u_init_hardware(dev, true);
if (err < 0)
goto out_err;
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
if (ret < 0)
goto err;
- ret = mt76x0u_init_hardware(dev);
+ ret = mt76x0u_init_hardware(dev, false);
if (ret)
goto err;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..f1cdcd61c54a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
- rt2800_register_multiread(rt2x00dev, offset,
- &iveiv_entry, sizeof(iveiv_entry));
- if ((crypto->cipher == CIPHER_TKIP) ||
- (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
- (crypto->cipher == CIPHER_AES))
- iveiv_entry.iv[3] |= 0x20;
- iveiv_entry.iv[3] |= key->keyidx << 6;
+ if (crypto->cmd == SET_KEY) {
+ rt2800_register_multiread(rt2x00dev, offset,
+ &iveiv_entry, sizeof(iveiv_entry));
+ if ((crypto->cipher == CIPHER_TKIP) ||
+ (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
+ (crypto->cipher == CIPHER_AES))
+ iveiv_entry.iv[3] |= 0x20;
+ iveiv_entry.iv[3] |= key->keyidx << 6;
+ } else {
+ memset(&iveiv_entry, 0, sizeof(iveiv_entry));
+ }
+
rt2800_register_multiwrite(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
}
@@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
/* Turn on tertiary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
/* fall-through */
case 2:
/* Turn on secondary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
/* fall-through */
case 1:
/* Turn on primary LNAs */
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
- rf->channel > 14);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
- rf->channel <= 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
break;
}
@@ -6095,6 +6094,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
}
/*
+ * Clear encryption initialization vectors on start, but keep them
+ * for watchdog reset. Otherwise we will have wrong IVs and not be
+ * able to keep connections after reset.
+ */
+ if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
+ for (i = 0; i < 256; i++)
+ rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
+
+ /*
* Clear all beacons
*/
for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
DEVICE_STATE_ENABLED_RADIO,
DEVICE_STATE_SCANNING,
DEVICE_STATE_FLUSHING,
+ DEVICE_STATE_RESET,
/*
* Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
{
- int retval;
+ int retval = 0;
if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
/*
* This is special case for ieee80211_restart_hw(), otherwise
* mac80211 never call start() two times in row without stop();
*/
+ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
rt2x00lib_stop(rt2x00dev);
}
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
*/
retval = rt2x00lib_load_firmware(rt2x00dev);
if (retval)
- return retval;
+ goto out;
/*
* Initialize the device.
*/
retval = rt2x00lib_initialize(rt2x00dev);
if (retval)
- return retval;
+ goto out;
rt2x00dev->intf_ap_count = 0;
rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
/* Enable the radio */
retval = rt2x00lib_enable_radio(rt2x00dev);
if (retval)
- return retval;
+ goto out;
set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
- return 0;
+out:
+ clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
+ return retval;
}
void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index f5048d4b8cb6..760eaffeebd6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -645,7 +645,6 @@ fail_rx:
kfree(rsi_dev->tx_buffer);
fail_eps:
- kfree(rsi_dev);
return status;
}
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 5cf0b32c413b..e1bd344c4ebc 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -163,6 +163,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_start,
+ .policy = wlcore_vendor_attr_policy,
},
{
.info = {
@@ -172,6 +173,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_stop,
+ .policy = wlcore_vendor_attr_policy,
},
{
.info = {
@@ -181,6 +183,7 @@ static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_set_group_key,
+ .policy = wlcore_vendor_attr_policy,
},
};
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1d9940d4e8c7..c9262ffeefe4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
kfree_skb(nskb);
break;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index e65d027b91fa..529be35ac178 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
/* Reset possible fault of previous session */
clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
- if (priv->config.reset_n_io) {
+ if (gpio_is_valid(priv->config.reset_n_io)) {
nfc_info(priv->dev, "reset the chip\n");
gpio_set_value(priv->config.reset_n_io, 0);
usleep_range(5000, 10000);
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
{
- if (priv->config.reset_n_io)
+ if (gpio_is_valid(priv->config.reset_n_io))
gpio_set_value(priv->config.reset_n_io, 0);
}
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 9a22056e8d9e..e5a622ce4b95 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -26,7 +26,7 @@
static unsigned int hci_muxed;
static unsigned int flow_control;
static unsigned int break_control;
-static unsigned int reset_n_io;
+static int reset_n_io = -EINVAL;
/*
** NFCMRVL NCI OPS
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
module_param(hci_muxed, uint, 0);
MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
-module_param(reset_n_io, uint, 0);
+module_param(reset_n_io, int, 0);
MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 945cc903d8f1..888e298f610b 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
/* No configuration for USB */
memset(&config, 0, sizeof(config));
+ config.reset_n_io = -EINVAL;
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index c3e10b6ab3a4..f25f1ec5f9e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 06fc542fd198..6586378cacb0 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index e42850095892..7eda62a9e0df 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context)
&echo_response);
if (result) {
dev_err(&st95context->spicontext.spidev->dev,
- "err: echo response receieve error = 0x%x\n", result);
+ "err: echo response receive error = 0x%x\n", result);
return result;
}
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 9dddf133658f..0a5e884a920c 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -6,11 +6,6 @@
#include <linux/msi.h>
#include <linux/pci.h>
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("0.1");
-MODULE_AUTHOR("Logan Gunthorpe <logang@deltatee.com>");
-MODULE_DESCRIPTION("NTB MSI Interrupt Library");
-
struct ntb_msi {
u64 base_addr;
u64 end_addr;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 62d00fffa4af..3508a79110c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = sprintf(buf, "%llu\n", nd_btt->size);
else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2dca3034fee0..798c5c4aea9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -26,7 +26,7 @@
int nvdimm_major;
static int nvdimm_bus_major;
-static struct class *nd_class;
+struct class *nd_class;
static DEFINE_IDA(nd_ida);
static int to_nd_device_type(struct device *dev)
@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
- wake_up(&nvdimm_bus->probe_wait);
+ wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
@@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev)
dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
+ debug_nvdimm_lock(dev);
rc = nd_drv->probe(dev);
+ debug_nvdimm_unlock(dev);
+
if (rc == 0)
nd_region_probe_success(nvdimm_bus, dev);
else
@@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
int rc = 0;
- if (nd_drv->remove)
+ if (nd_drv->remove) {
+ debug_nvdimm_lock(dev);
rc = nd_drv->remove(dev);
+ debug_nvdimm_unlock(dev);
+ }
nd_region_disable(nvdimm_bus, dev);
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
@@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_device_driver *nd_drv;
@@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event)
if (nd_drv->notify)
nd_drv->notify(dev, event);
}
- device_unlock(dev);
+ nd_device_unlock(dev);
}
EXPORT_SYMBOL(nd_device_notify);
@@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
-static bool is_nvdimm_bus(struct device *dev)
+bool is_nvdimm_bus(struct device *dev)
{
return dev->release == nvdimm_bus_release;
}
@@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- init_waitqueue_head(&nvdimm_bus->probe_wait);
+ init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
@@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev)
list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex);
+ wait_event(nvdimm_bus->wait,
+ atomic_read(&nvdimm_bus->ioctl_active) == 0);
+
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
{
+ bool killed;
+
switch (mode) {
case ND_ASYNC:
+ /*
+ * In the async case this is being triggered with the
+ * device lock held and the unregistration work needs to
+ * be moved out of line iff this is thread has won the
+ * race to schedule the deletion.
+ */
+ if (!kill_device(dev))
+ return;
+
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
&nd_async_domain);
break;
case ND_SYNC:
+ /*
+ * In the sync case the device is being unregistered due
+ * to a state change of the parent. Claim the kill state
+ * to synchronize against other unregistration requests,
+ * or otherwise let the async path handle it if the
+ * unregistration was already queued.
+ */
+ nd_device_lock(dev);
+ killed = kill_device(dev);
+ nd_device_unlock(dev);
+
+ if (!killed)
+ return;
+
nd_synchronize();
device_unregister(dev);
break;
@@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
do {
if (nvdimm_bus->probe_active == 0)
break;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- wait_event(nvdimm_bus->probe_wait,
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+ wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
} while (true);
}
@@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- static char out_env[ND_CMD_MAX_ENVELOPE];
- static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
+ char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
+ void *buf = NULL;
u64 buf_len = 0;
- void *buf;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
- dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ dev_dbg(dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
@@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
+ in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!in_env)
+ return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
u32 in_size, copy;
@@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
- if (in_len < sizeof(in_env))
- copy = min_t(u32, sizeof(in_env) - in_len, in_size);
+ if (in_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
- return -EFAULT;
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
in_len += in_size;
}
@@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
+ out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env, 0);
@@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
- if (out_len < sizeof(out_env))
- copy = min_t(u32, sizeof(out_env) - out_len, out_size);
+ if (out_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
- p + in_len + out_len, copy))
- return -EFAULT;
+ p + in_len + out_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
out_len += out_size;
}
@@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
buf = vmalloc(buf_len);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, p, buf_len)) {
rc = -EFAULT;
goto out;
}
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
@@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
clear_err->cleared);
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
- vfree(buf);
- return rc;
-
- out_unlock:
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- out:
+out_unlock:
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+out:
+ kfree(in_env);
+ kfree(out_env);
vfree(buf);
return rc;
}
-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long id = (long) file->private_data;
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
-
- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
- mutex_lock(&nvdimm_bus_list_mutex);
- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- if (nvdimm_bus->id == id) {
- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
- break;
- }
- }
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- return rc;
-}
+enum nd_ioctl_mode {
+ BUS_IOCTL,
+ DIMM_IOCTL,
+};
static int match_dimm(struct device *dev, void *data)
{
@@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data)
return 0;
}
-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ enum nd_ioctl_mode mode)
+
{
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus *nvdimm_bus, *found = NULL;
+ long id = (long) file->private_data;
+ struct nvdimm *nvdimm = NULL;
+ int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- struct device *dev = device_find_child(&nvdimm_bus->dev,
- file->private_data, match_dimm);
- struct nvdimm *nvdimm;
-
- if (!dev)
- continue;
+ if (mode == DIMM_IOCTL) {
+ struct device *dev;
+
+ dev = device_find_child(&nvdimm_bus->dev,
+ file->private_data, match_dimm);
+ if (!dev)
+ continue;
+ nvdimm = to_nvdimm(dev);
+ found = nvdimm_bus;
+ } else if (nvdimm_bus->id == id) {
+ found = nvdimm_bus;
+ }
- nvdimm = to_nvdimm(dev);
- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
- put_device(dev);
- break;
+ if (found) {
+ atomic_inc(&nvdimm_bus->ioctl_active);
+ break;
+ }
}
mutex_unlock(&nvdimm_bus_list_mutex);
+ if (!found)
+ return -ENXIO;
+
+ nvdimm_bus = found;
+ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+
+ if (nvdimm)
+ put_device(&nvdimm->dev);
+ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
+ wake_up(&nvdimm_bus->wait);
+
return rc;
}
+static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, BUS_IOCTL);
+}
+
+static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
+}
+
static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
@@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file)
static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nd_ioctl,
- .compat_ioctl = nd_ioctl,
+ .unlocked_ioctl = bus_ioctl,
+ .compat_ioctl = bus_ioctl,
.llseek = noop_llseek,
};
static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nvdimm_ioctl,
- .compat_ioctl = nvdimm_ioctl,
+ .unlocked_ioctl = dimm_ioctl,
+ .compat_ioctl = dimm_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 5e1f060547bf..9204f1e9fd14 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
*
* Enforce that uuids can only be changed while the device is disabled
* (driver detached)
- * LOCKING: expects device_lock() is held on entry
+ * LOCKING: expects nd_device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
size_t len)
@@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider);
static int flush_namespaces(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
return 0;
}
static int flush_regions_dimms(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
device_for_each_child(dev, NULL, flush_namespaces);
return 0;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index dfecd6e17043..29a065e769ea 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev,
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2d8d7e554877..a16e52251a30 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
@@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
@@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
@@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev,
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
@@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
@@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev,
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev,
char *mode;
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
@@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev,
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
- device_lock(&ndns->dev);
- device_unlock(&ndns->dev);
+ nd_device_lock(&ndns->dev);
+ nd_device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 391e88de3a29..0ac52b6eb00e 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
+#include "nd.h"
extern struct list_head nvdimm_bus_list;
extern struct mutex nvdimm_bus_list_mutex;
@@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq;
struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc;
- wait_queue_head_t probe_wait;
+ wait_queue_head_t wait;
struct list_head list;
struct device dev;
int id, probe_active;
+ atomic_t ioctl_active;
struct list_head mapping_list;
struct mutex reconfig_mutex;
struct badrange badrange;
@@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_namespace_common **_ndns, const char *buf,
size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
+bool is_nvdimm_bus(struct device *dev);
+
+#ifdef CONFIG_PROVE_LOCKING
+extern struct class *nd_class;
+
+enum {
+ LOCK_BUS,
+ LOCK_NDCTL,
+ LOCK_REGION,
+ LOCK_DIMM = LOCK_REGION,
+ LOCK_NAMESPACE,
+ LOCK_CLAIM,
+};
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+ if (is_nd_region(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
+ else if (is_nvdimm(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
+ else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
+ else if (dev->parent && (is_nd_region(dev->parent)))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
+ else if (is_nvdimm_bus(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
+ else if (dev->class && dev->class == nd_class)
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
+ else
+ dev_WARN(dev, "unknown lock level\n");
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->lockdep_mutex);
+}
+
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+ debug_nvdimm_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ debug_nvdimm_unlock(dev);
+ device_unlock(dev);
+}
+#else
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ device_unlock(dev);
+}
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+}
+#endif
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index df2bdbd22450..cb98b8fe786e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
@@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments());
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev,
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
resource_size_t start, size;
struct nd_region *nd_region;
unsigned long npfns, align;
+ u32 end_trunc;
struct nd_pfn_sb *pfn_sb;
phys_addr_t offset;
const char *sig;
@@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+ end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
@@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- npfns = PHYS_PFN(size - offset);
+ npfns = PHYS_PFN(size - offset - end_trunc);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
@@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
pfn_sb->version_minor = cpu_to_le16(3);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2bf3acd69613..4c121dd03dd9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev)
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
- * Note, this assumes device_lock() context to not race
- * nd_pmem_notify()
+ * Note, this assumes nd_device_lock() context to not
+ * race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index ef46cc3a71ae..37bf8719a2a4 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev)
if (rc)
return rc;
- rc = nd_region_register_namespaces(nd_region, &err);
- if (rc < 0)
- return rc;
-
- ndrd = dev_get_drvdata(dev);
- ndrd->ns_active = rc;
- ndrd->ns_count = rc + err;
-
- if (rc && err && rc == err)
- return -ENODEV;
-
if (is_nd_pmem(&nd_region->dev)) {
struct resource ndr_res;
@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev)
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
}
+ rc = nd_region_register_namespaces(nd_region, &err);
+ if (rc < 0)
+ return rc;
+
+ ndrd = dev_get_drvdata(dev);
+ ndrd->ns_active = rc;
+ ndrd->ns_count = rc + err;
+
+ if (rc && err && rc == err)
+ return -ENODEV;
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
@@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev)
nvdimm_bus_unlock(dev);
/*
- * Note, this assumes device_lock() context to not race
+ * Note, this assumes nd_device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 56f2227f192a..af30cbe7a8ea 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
@@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev,
}
}
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
if (rc)
return rc;
@@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index ec43ac9199e2..2b36f052bfb9 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -64,6 +64,7 @@ config NVME_TCP
depends on INET
depends on BLK_DEV_NVME
select NVME_FABRICS
+ select CRYPTO_CRC32C
help
This provides support for the NVMe over Fabrics protocol using
the TCP transport. This allows you to use remote block devices
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cc09b81fc7f4..1ede1763a5ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -22,12 +22,12 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
#include "nvme.h"
#include "fabrics.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#define NVME_MINORS (1U << MINORBITS)
unsigned int admin_timeout = 60;
@@ -81,7 +81,6 @@ EXPORT_SYMBOL_GPL(nvme_reset_wq);
struct workqueue_struct *nvme_delete_wq;
EXPORT_SYMBOL_GPL(nvme_delete_wq);
-static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -197,9 +196,9 @@ static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
}
-static blk_status_t nvme_error_status(struct request *req)
+static blk_status_t nvme_error_status(u16 status)
{
- switch (nvme_req(req)->status & 0x7ff) {
+ switch (status & 0x7ff) {
case NVME_SC_SUCCESS:
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
@@ -226,6 +225,8 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_PROTECTION;
case NVME_SC_RESERVATION_CONFLICT:
return BLK_STS_NEXUS;
+ case NVME_SC_HOST_PATH_ERROR:
+ return BLK_STS_TRANSPORT;
default:
return BLK_STS_IOERR;
}
@@ -260,7 +261,7 @@ static void nvme_retry_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
- blk_status_t status = nvme_error_status(req);
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
trace_nvme_complete_rq(req);
@@ -279,6 +280,8 @@ void nvme_complete_rq(struct request *req)
return;
}
}
+
+ nvme_trace_bio_complete(req, status);
blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -288,8 +291,12 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
- nvme_req(req)->status = NVME_SC_ABORT_REQ;
- blk_mq_complete_request_sync(req);
+ /* don't abort one completed request */
+ if (blk_mq_request_completed(req))
+ return true;
+
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_complete_request(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -1088,10 +1095,9 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
NVME_IDENTIFY_DATA_SIZE);
}
-static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
- unsigned nsid)
+static int nvme_identify_ns(struct nvme_ctrl *ctrl,
+ unsigned nsid, struct nvme_id_ns **id)
{
- struct nvme_id_ns *id;
struct nvme_command c = { };
int error;
@@ -1100,18 +1106,17 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
c.identify.nsid = cpu_to_le32(nsid);
c.identify.cns = NVME_ID_CNS_NS;
- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (!id)
- return NULL;
+ *id = kmalloc(sizeof(**id), GFP_KERNEL);
+ if (!*id)
+ return -ENOMEM;
- error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
- kfree(id);
- return NULL;
+ kfree(*id);
}
- return id;
+ return error;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1180,7 +1185,8 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
#define NVME_AEN_SUPPORTED \
- (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
+ NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
@@ -1195,6 +1201,8 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
if (status)
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
supported_aens);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1286,6 +1294,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@@ -1316,6 +1327,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_update_formats(ctrl);
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1589,9 +1602,11 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
}
-static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
+static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{
+ int ret = 0;
+
memset(ids, 0, sizeof(*ids));
if (ctrl->vs >= NVME_VS(1, 1, 0))
@@ -1602,10 +1617,12 @@ static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
/* Don't treat error as fatal we potentially
* already have a NGUID or EUI-64
*/
- if (nvme_identify_ns_descs(ctrl, nsid, ids))
+ ret = nvme_identify_ns_descs(ctrl, nsid, ids);
+ if (ret)
dev_warn(ctrl->device,
- "%s: Identify Descriptors failed\n", __func__);
+ "Identify Descriptors failed (%d)\n", ret);
}
+ return ret;
}
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
@@ -1715,6 +1732,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ revalidate_disk(ns->head->disk);
}
#endif
}
@@ -1732,25 +1750,37 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
- id = nvme_identify_ns(ctrl, ns->head->ns_id);
- if (!id)
- return -ENODEV;
+ ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
+ if (ret)
+ goto out;
if (id->ncap == 0) {
ret = -ENODEV;
- goto out;
+ goto free_id;
}
__nvme_revalidate_disk(disk, id);
- nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
+ ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
+ if (ret)
+ goto free_id;
+
if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
dev_err(ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
ret = -ENODEV;
}
-out:
+free_id:
kfree(id);
+out:
+ /*
+ * Only fail the function if we got a fatal error back from the
+ * device, otherwise ignore the error and just move on.
+ */
+ if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
+ ret = 0;
+ else if (ret > 0)
+ ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
@@ -1946,7 +1976,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
* bits', but doing so may cause the device to complete commands to the
* admin queue ... and we don't know what memory that might be pointing at!
*/
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
{
int ret;
@@ -1960,20 +1990,27 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
- return nvme_wait_ready(ctrl, cap, false);
+ return nvme_wait_ready(ctrl, ctrl->cap, false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
-int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
/*
* Default to a 4K page size, with the intention to update this
* path in the future to accomodate architectures with differing
* kernel and IO page sizes.
*/
- unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
+ unsigned dev_page_min, page_shift = 12;
int ret;
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
+ return ret;
+ }
+ dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
if (page_shift < dev_page_min) {
dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
@@ -1992,7 +2029,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
- return nvme_wait_ready(ctrl, cap, true);
+ return nvme_wait_ready(ctrl, ctrl->cap, true);
}
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
@@ -2251,6 +2288,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x1179,
.mn = "THNSF5256GPUK TOSHIBA",
.quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * This LiteON CL1-3D*-Q11 firmware version has a race
+ * condition associated with actions related to suspend to idle
+ * LiteON has resolved the problem in future firmware
+ */
+ .vid = 0x14a4,
+ .fr = "22301111",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@@ -2311,15 +2358,14 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}
-static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
-{
- ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
- kfree(subsys);
-}
-
static void nvme_release_subsystem(struct device *dev)
{
- __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ if (subsys->instance >= 0)
+ ida_simple_remove(&nvme_instance_ida, subsys->instance);
+ kfree(subsys);
}
static void nvme_destroy_subsystem(struct kref *ref)
@@ -2347,6 +2393,17 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
lockdep_assert_held(&nvme_subsystems_lock);
+ /*
+ * Fail matches for discovery subsystems. This results
+ * in each discovery controller bound to a unique subsystem.
+ * This avoids issues with validating controller values
+ * that can only be true when there is a single unique subsystem.
+ * There may be multiple and completely independent entities
+ * that provide discovery controllers.
+ */
+ if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
+ return NULL;
+
list_for_each_entry(subsys, &nvme_subsystems, entry) {
if (strcmp(subsys->subnqn, subsysnqn))
continue;
@@ -2447,12 +2504,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
return -ENOMEM;
- ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- kfree(subsys);
- return ret;
- }
- subsys->instance = ret;
+
+ subsys->instance = -1;
mutex_init(&subsys->lock);
kref_init(&subsys->ref);
INIT_LIST_HEAD(&subsys->ctrls);
@@ -2471,13 +2524,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
subsys->dev.class = nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
subsys->dev.groups = nvme_subsys_attrs_groups;
- dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
+ dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
device_initialize(&subsys->dev);
mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) {
- __nvme_release_subsystem(subsys);
+ put_device(&subsys->dev);
subsys = found;
if (!nvme_validate_cntlid(subsys, ctrl, id)) {
@@ -2489,6 +2542,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
if (ret) {
dev_err(ctrl->device,
"failed to register subsystem device.\n");
+ put_device(&subsys->dev);
goto out_unlock;
}
ida_init(&subsys->ns_ida);
@@ -2502,6 +2556,8 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
goto out_put_subsystem;
}
+ if (!found)
+ subsys->instance = ctrl->instance;
ctrl->subsys = subsys;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
mutex_unlock(&nvme_subsystems_lock);
@@ -2511,7 +2567,6 @@ out_put_subsystem:
nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
- put_device(&subsys->dev);
return ret;
}
@@ -2560,7 +2615,6 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
int nvme_init_identify(struct nvme_ctrl *ctrl)
{
struct nvme_id_ctrl *id;
- u64 cap;
int ret, page_shift;
u32 max_hw_sectors;
bool prev_apst_enabled;
@@ -2570,16 +2624,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
return ret;
}
-
- ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
- if (ret) {
- dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
- return ret;
- }
- page_shift = NVME_CAP_MPSMIN(cap) + 12;
+ page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+ ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
if (ctrl->vs >= NVME_VS(1, 1, 0))
- ctrl->subsystem = NVME_CAP_NSSRC(cap);
+ ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
ret = nvme_identify_ctrl(ctrl, &id);
if (ret) {
@@ -2593,6 +2642,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
+ if (!(ctrl->ops->flags & NVME_F_FABRICS))
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+
if (!ctrl->identified) {
int i;
@@ -2693,7 +2745,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
} else {
- ctrl->cntlid = le16_to_cpu(id->cntlid);
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
ctrl->hmminds = le32_to_cpu(id->hmminds);
@@ -3168,7 +3219,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ns_id = nsid;
kref_init(&head->ref);
- nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
+ ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
+ if (ret)
+ goto out_cleanup_srcu;
ret = __nvme_check_ids(ctrl->subsys, head);
if (ret) {
@@ -3193,6 +3246,8 @@ out_ida_remove:
out_free_head:
kfree(head);
out:
+ if (ret > 0)
+ ret = blk_status_to_errno(nvme_error_status(ret));
return ERR_PTR(ret);
}
@@ -3216,7 +3271,10 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
} else {
struct nvme_ns_ids ids;
- nvme_report_ns_ids(ctrl, nsid, id, &ids);
+ ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
+ if (ret)
+ goto out_unlock;
+
if (!nvme_ns_ids_equal(&head->ids, &ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
@@ -3231,6 +3289,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
out_unlock:
mutex_unlock(&ctrl->subsys->lock);
+ if (ret > 0)
+ ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
@@ -3322,11 +3382,9 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- id = nvme_identify_ns(ctrl, nsid);
- if (!id) {
- ret = -EIO;
+ ret = nvme_identify_ns(ctrl, nsid, &id);
+ if (ret)
goto out_free_queue;
- }
if (id->ncap == 0) {
ret = -EINVAL;
@@ -3388,6 +3446,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_cleanup_queue(ns->queue);
out_free_ns:
kfree(ns);
+ if (ret > 0)
+ ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
@@ -3573,6 +3633,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
+ /*
+ * make sure to requeue I/O to all namespaces as these
+ * might result from the scan itself and must complete
+ * for the scan_work to make progress
+ */
+ nvme_mpath_clear_ctrl_paths(ctrl);
+
/* prevent racing with ns scanning */
flush_work(&ctrl->scan_work);
@@ -3594,6 +3661,33 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ret;
+
+ ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
+ if (ret)
+ return ret;
+
+ if (opts) {
+ ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_TRSVCID=%s",
+ opts->trsvcid ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
+ opts->host_traddr ?: "none");
+ }
+ return ret;
+}
+
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
char *envp[2] = { NULL, NULL };
@@ -3700,6 +3794,9 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
queue_work(nvme_wq, &ctrl->ana_work);
break;
#endif
+ case NVME_AER_NOTICE_DISC_CHANGED:
+ ctrl->aen_result = result;
+ break;
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
@@ -3746,10 +3843,10 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->kato)
nvme_start_keep_alive(ctrl);
+ nvme_enable_aen(ctrl);
+
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
- nvme_enable_aen(ctrl);
- queue_work(nvme_wq, &ctrl->async_event_work);
nvme_start_queues(ctrl);
}
}
@@ -3769,7 +3866,9 @@ static void nvme_free_ctrl(struct device *dev)
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ if (subsys && ctrl->instance != subsys->instance)
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+
kfree(ctrl->effects);
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
@@ -3969,6 +4068,9 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
up_read(&ctrl->namespaces_rwsem);
+
+ if (ctrl->admin_q)
+ blk_sync_queue(ctrl->admin_q);
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);
@@ -4027,6 +4129,7 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_class);
goto unregister_chrdev;
}
+ nvme_class->dev_uevent = nvme_class_uevent;
nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
if (IS_ERR(nvme_subsys_class)) {
@@ -4051,7 +4154,6 @@ out:
static void __exit nvme_core_exit(void)
{
- ida_destroy(&nvme_subsystems_ida);
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 1994d5b42f94..74b8818ac9a1 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -150,7 +150,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0, false);
if (ret >= 0)
@@ -197,7 +197,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0, false);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
NVME_QID_ANY, 0, 0, false);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -381,8 +381,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
* Set keep-alive timeout in seconds granularity (ms * 1000)
* and add a grace period for controller kato enforcement
*/
- cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
- cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
+ cmd.connect.kato = ctrl->kato ?
+ cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
@@ -396,7 +396,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
- ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
if (ret) {
@@ -611,6 +611,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_DATA_DIGEST, "data_digest" },
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
+ { NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_ERR, NULL }
};
@@ -632,6 +633,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->duplicate_connect = false;
opts->hdr_digest = false;
opts->data_digest = false;
+ opts->tos = -1; /* < 0 == use transport default */
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -738,13 +740,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
}
opts->kato = token;
-
- if (opts->discovery_nqn && opts->kato) {
- pr_err("Discovery controllers cannot accept KATO != 0\n");
- ret = -EINVAL;
- goto out;
- }
-
break;
case NVMF_OPT_CTRL_LOSS_TMO:
if (match_int(args, &token)) {
@@ -856,6 +851,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->nr_poll_queues = token;
break;
+ case NVMF_OPT_TOS:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < 0) {
+ pr_err("Invalid type of service %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token > 255) {
+ pr_warn("Clamping type of service to 255\n");
+ token = 255;
+ }
+ opts->tos = token;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -865,7 +876,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
if (opts->discovery_nqn) {
- opts->kato = 0;
opts->nr_io_queues = 0;
opts->nr_write_queues = 0;
opts->nr_poll_queues = 0;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 3044d8b99a24..93f08d77c896 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -55,6 +55,7 @@ enum {
NVMF_OPT_DATA_DIGEST = 1 << 16,
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
+ NVMF_OPT_TOS = 1 << 19,
};
/**
@@ -87,6 +88,7 @@ enum {
* @data_digest: generate/verify data digest (TCP)
* @nr_write_queues: number of queues for write I/O
* @nr_poll_queues: number of queues for polling I/O
+ * @tos: type of service
*/
struct nvmf_ctrl_options {
unsigned mask;
@@ -108,6 +110,7 @@ struct nvmf_ctrl_options {
bool data_digest;
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
+ int tos;
};
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 232d8094091b..265f89e11d8b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1608,9 +1608,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
if (opstate == FCPOP_STATE_ABORTED)
- status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
- else if (freq->status)
- status = cpu_to_le16(NVME_SC_INTERNAL << 1);
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ else if (freq->status) {
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to lldd error %d\n",
+ ctrl->cnum, freq->status);
+ }
/*
* For the linux implementation, if we have an unsuccesful
@@ -1637,8 +1641,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
* no payload in the CQE by the transport.
*/
if (freq->transferred_length !=
- be32_to_cpu(op->cmd_iu.data_len)) {
- status = cpu_to_le16(NVME_SC_INTERNAL << 1);
+ be32_to_cpu(op->cmd_iu.data_len)) {
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to bad transfer "
+ "length: %d vs expected %d\n",
+ ctrl->cnum, freq->transferred_length,
+ be32_to_cpu(op->cmd_iu.data_len));
goto done;
}
result.u64 = 0;
@@ -1655,7 +1664,17 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
freq->transferred_length ||
op->rsp_iu.status_code ||
sqe->common.command_id != cqe->command_id)) {
- status = cpu_to_le16(NVME_SC_INTERNAL << 1);
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
+ "iu len %d, xfr len %d vs %d, status code "
+ "%d, cmdid %d vs %d\n",
+ ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
+ be32_to_cpu(op->rsp_iu.xfrd_len),
+ freq->transferred_length,
+ op->rsp_iu.status_code,
+ sqe->common.command_id,
+ cqe->command_id);
goto done;
}
result = cqe->result;
@@ -1663,7 +1682,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
break;
default:
- status = cpu_to_le16(NVME_SC_INTERNAL << 1);
+ status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
+ "len %d\n",
+ ctrl->cnum, freq->rcv_rsplen);
goto done;
}
@@ -2006,6 +2029,7 @@ nvme_fc_ctrl_free(struct kref *ref)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
kfree(ctrl->queues);
@@ -2107,7 +2131,6 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
struct nvme_fc_fcp_op *op)
{
struct nvmefc_fcp_req *freq = &op->fcp_req;
- enum dma_data_direction dir;
int ret;
freq->sg_cnt = 0;
@@ -2124,9 +2147,8 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
- dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
- op->nents, dir);
+ op->nents, rq_dma_dir(rq));
if (unlikely(freq->sg_cnt <= 0)) {
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
freq->sg_cnt = 0;
@@ -2149,8 +2171,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
return;
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
- ((rq_data_dir(rq) == WRITE) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ rq_dma_dir(rq));
nvme_cleanup_cmd(rq);
@@ -2633,8 +2654,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queue;
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
-
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (ret)
goto out_disconnect_admin_queue;
@@ -2648,23 +2667,15 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
* prior connection values
*/
- ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
- if (ret) {
- dev_err(ctrl->ctrl.device,
- "prop_get NVME_REG_CAP failed\n");
- goto out_disconnect_admin_queue;
- }
-
- ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
-
- ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+ ret = nvme_enable_ctrl(&ctrl->ctrl);
if (ret)
goto out_disconnect_admin_queue;
ctrl->ctrl.max_hw_sectors =
(ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
ret = nvme_init_identify(&ctrl->ctrl);
if (ret)
goto out_disconnect_admin_queue;
@@ -2774,6 +2785,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
}
/*
@@ -2796,6 +2808,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
/* kill the aens as they are a separate path */
nvme_fc_abort_aen_ops(ctrl);
@@ -3109,10 +3122,16 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_free_queues;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
+ ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+ ret = PTR_ERR(ctrl->ctrl.fabrics_q);
+ goto out_free_admin_tag_set;
+ }
+
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
ret = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_free_admin_tag_set;
+ goto out_cleanup_fabrics_q;
}
/*
@@ -3184,6 +3203,8 @@ fail_ctrl:
out_cleanup_admin_q:
blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
out_free_admin_tag_set:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_queues:
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba009d4c9dfa..ec46693f6b64 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -667,11 +667,14 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
return rq;
}
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
+ void *buf)
{
+ struct nvm_geo *geo = &dev->geo;
struct request_queue *q = dev->q;
struct nvme_nvm_command *cmd;
struct request *rq;
+ int ret;
cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
if (!cmd)
@@ -679,8 +682,15 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
rq = nvme_nvm_alloc_request(q, rqd, cmd);
if (IS_ERR(rq)) {
- kfree(cmd);
- return PTR_ERR(rq);
+ ret = PTR_ERR(rq);
+ goto err_free_cmd;
+ }
+
+ if (buf) {
+ ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
+ GFP_KERNEL);
+ if (ret)
+ goto err_free_cmd;
}
rq->end_io_data = rqd;
@@ -688,33 +698,9 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
return 0;
-}
-
-static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct nvme_nvm_command cmd;
- int ret = 0;
-
- memset(&cmd, 0, sizeof(struct nvme_nvm_command));
-
- rq = nvme_nvm_alloc_request(q, rqd, &cmd);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- /* I/Os can fail and the error is signaled through rqd. Callers must
- * handle the error accordingly.
- */
- blk_execute_rq(q, NULL, rq, 0);
- if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
-
- rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
- rqd->error = nvme_req(rq)->status;
-
- blk_mq_free_request(rq);
+err_free_cmd:
+ kfree(cmd);
return ret;
}
@@ -754,7 +740,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.get_chk_meta = nvme_nvm_get_chk_meta,
.submit_io = nvme_nvm_submit_io,
- .submit_io_sync = nvme_nvm_submit_io_sync,
.create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool,
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a9a927677970..30de7efef003 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,9 +12,34 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
{
- return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_unfreeze_queue(h->disk->queue);
+}
+
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_mq_freeze_queue_wait(h->disk->queue);
+}
+
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+ list_for_each_entry(h, &subsys->nsheads, entry)
+ if (h->disk)
+ blk_freeze_queue_start(h->disk->queue);
}
/*
@@ -109,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
[NVME_ANA_CHANGE] = "change",
};
-void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ bool changed = false;
int node;
if (!head)
- return;
+ goto out;
for_each_node(node) {
- if (ns == rcu_access_pointer(head->current_path[node]))
+ if (ns == rcu_access_pointer(head->current_path[node])) {
rcu_assign_pointer(head->current_path[node], NULL);
+ changed = true;
+ }
}
+out:
+ return changed;
+}
+
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->scan_lock);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (nvme_mpath_clear_current_path(ns))
+ kblockd_schedule_work(&ns->head->requeue_work);
+ mutex_unlock(&ctrl->scan_lock);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
@@ -231,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
return ns;
}
+static bool nvme_available_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ switch (ns->ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ /* fallthru */
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
struct bio *bio)
{
@@ -257,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
ret = direct_make_request(bio);
- } else if (!list_empty_careful(&head->list)) {
- dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
+ } else if (nvme_available_path(head)) {
+ dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio);
spin_unlock_irq(&head->requeue_lock);
} else {
- dev_warn_ratelimited(dev, "no path - failing I/O\n");
+ dev_warn_ratelimited(dev, "no available path - failing I/O\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
@@ -369,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ synchronize_srcu(&ns->head->srcu);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -449,14 +509,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
down_write(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
+ unsigned nsid = le32_to_cpu(desc->nsids[n]);
+
+ if (ns->head->ns_id < nsid)
continue;
- nvme_update_ns_ana_state(desc, ns);
+ if (ns->head->ns_id == nsid)
+ nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
}
up_write(&ctrl->namespaces_rwsem);
- WARN_ON_ONCE(n < nr_nsids);
return 0;
}
@@ -622,7 +684,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
int error;
- if (!nvme_ctrl_use_ana(ctrl))
+ /* check if multipath is enabled and we have the capability */
+ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
return 0;
ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 716a876119c8..b5013c101b35 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -16,6 +16,8 @@
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
+#include <trace/events/block.h>
+
extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
@@ -92,6 +94,26 @@ enum nvme_quirks {
* Broken Write Zeroes.
*/
NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
+
+ /*
+ * Force simple suspend/resume path.
+ */
+ NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
+
+ /*
+ * Use only one interrupt vector for all queues
+ */
+ NVME_QUIRK_SINGLE_VECTOR = (1 << 11),
+
+ /*
+ * Use non-standard 128 bytes SQEs.
+ */
+ NVME_QUIRK_128_BYTES_SQES = (1 << 12),
+
+ /*
+ * Prevent tag overlap between queues
+ */
+ NVME_QUIRK_SHARED_TAGS = (1 << 13),
};
/*
@@ -164,6 +186,7 @@ struct nvme_ctrl {
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct request_queue *connect_q;
+ struct request_queue *fabrics_q;
struct device *dev;
int instance;
int numa_node;
@@ -426,8 +449,8 @@ void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
-int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
+int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
+int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
@@ -485,7 +508,14 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return ctrl->ana_log_buf != NULL;
+}
+
+void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
+void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
@@ -496,7 +526,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-void nvme_mpath_clear_current_path(struct nvme_ns *ns);
+bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -507,6 +538,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
+static inline void nvme_trace_bio_complete(struct request *req,
+ blk_status_t status)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ trace_block_bio_complete(ns->head->disk->queue,
+ req->bio, status);
+}
+
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
@@ -544,12 +585,20 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ return false;
+}
+static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
+static inline void nvme_trace_bio_complete(struct request *req,
+ blk_status_t status)
+{
+}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
@@ -564,6 +613,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
{
}
+static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+{
+}
+static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb970ca82517..6b4d7b064b38 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -28,8 +28,8 @@
#include "trace.h"
#include "nvme.h"
-#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
-#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
+#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
@@ -100,6 +100,7 @@ struct nvme_dev {
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
int q_depth;
+ int io_sqes;
u32 db_stride;
void __iomem *bar;
unsigned long bar_mapped_size;
@@ -162,7 +163,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
- struct nvme_command *sq_cmds;
+ void *sq_cmds;
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
@@ -178,6 +179,7 @@ struct nvme_queue {
u16 last_cq_head;
u16 qid;
u8 cq_phase;
+ u8 sqes;
unsigned long flags;
#define NVMEQ_ENABLED 0
#define NVMEQ_SQ_CMB 1
@@ -488,7 +490,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
bool write_sq)
{
spin_lock(&nvmeq->sq_lock);
- memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
+ memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
+ cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
nvme_write_sq_db(nvmeq, write_sq);
@@ -534,14 +537,13 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
int i;
if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len, dma_dir);
+ dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
+ rq_dma_dir(req));
return;
}
@@ -1344,16 +1346,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
- dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
if (!nvmeq->sq_cmds)
return;
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
- nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
+ nvmeq->sq_cmds, SQ_SIZE(nvmeq));
} else {
- dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1403,7 +1405,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
else
- nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
+ nvme_disable_ctrl(&dev->ctrl);
nvme_poll_irqdisable(nvmeq, -1);
}
@@ -1433,12 +1435,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
}
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- int qid, int depth)
+ int qid)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
if (nvmeq->sq_cmds) {
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
@@ -1447,11 +1449,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
- pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
+ pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
}
}
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
&nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
@@ -1465,12 +1467,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (dev->ctrl.queue_count > qid)
return 0;
- nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
+ nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
+ nvmeq->q_depth = depth;
+ nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
&nvmeq->cq_dma_addr, GFP_KERNEL);
if (!nvmeq->cqes)
goto free_nvmeq;
- if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
+ if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
goto free_cqdma;
nvmeq->dev = dev;
@@ -1479,15 +1483,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
- nvmeq->q_depth = depth;
nvmeq->qid = qid;
dev->ctrl.queue_count++;
return 0;
free_cqdma:
- dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
- nvmeq->cq_dma_addr);
+ dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
free_nvmeq:
return -ENOMEM;
}
@@ -1515,7 +1518,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
- memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
nvme_dbbuf_init(dev, nvmeq, qid);
dev->online_queues++;
wmb(); /* ensure the first interrupt sees the initialization */
@@ -1552,7 +1555,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
nvme_init_queue(nvmeq, qid);
if (!polled) {
- nvmeq->cq_vector = vector;
result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1679,7 +1681,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
- result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
+ result = nvme_disable_ctrl(&dev->ctrl);
if (result < 0)
return result;
@@ -1695,7 +1697,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
- result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
+ result = nvme_enable_ctrl(&dev->ctrl);
if (result)
return result;
@@ -2077,6 +2079,13 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
+ /*
+ * Some Apple controllers require all queues to use the
+ * first vector.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
+ irq_queues = 1;
+
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
}
@@ -2095,6 +2104,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
unsigned long size;
nr_io_queues = max_io_queues();
+
+ /*
+ * If tags are shared with admin queue (Apple bug), then
+ * make sure we only use one IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ nr_io_queues = 1;
+
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -2254,9 +2271,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 1; /* default */
- if (dev->io_queues[HCTX_TYPE_READ])
- dev->tagset.nr_maps++;
+ dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
@@ -2267,6 +2282,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+
ret = blk_mq_alloc_tag_set(&dev->tagset);
if (ret) {
dev_warn(dev->ctrl.device,
@@ -2316,10 +2339,21 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
+ dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
dev->dbs = dev->bar + 4096;
/*
+ * Some Apple controllers require a non-standard SQE size.
+ * Interestingly they also seem to ignore the CC:IOSQES register
+ * so we don't bother updating it here.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
+ dev->io_sqes = 7;
+ else
+ dev->io_sqes = NVME_NVM_IOSQES;
+
+ /*
* Temporary fix for the Apple controller found in the MacBook8,1 and
* some MacBook7,1 to avoid controller resets and data loss.
*/
@@ -2336,6 +2370,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
+ /*
+ * Controllers with the shared tags quirk need the IO queue to be
+ * big enough so that we get 32 tags for the admin queue
+ */
+ if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
+ (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
+ dev->q_depth = NVME_AQ_DEPTH + 2;
+ dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
+ dev->q_depth);
+ }
+
+
nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
@@ -2403,6 +2449,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
+ blk_mq_tagset_wait_completed_request(&dev->tagset);
+ blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2697,7 +2745,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
{
struct nvme_dev *dev = data;
- nvme_reset_ctrl_sync(&dev->ctrl);
+ flush_work(&dev->ctrl.reset_work);
flush_work(&dev->ctrl.scan_work);
nvme_put_ctrl(&dev->ctrl);
}
@@ -2763,6 +2811,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+ nvme_reset_ctrl(&dev->ctrl);
nvme_get_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
@@ -2848,7 +2897,7 @@ static int nvme_resume(struct device *dev)
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
struct nvme_ctrl *ctrl = &ndev->ctrl;
- if (pm_resume_via_firmware() || !ctrl->npss ||
+ if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
nvme_reset_ctrl(ctrl);
return 0;
@@ -2861,6 +2910,8 @@ static int nvme_suspend(struct device *dev)
struct nvme_ctrl *ctrl = &ndev->ctrl;
int ret = -EBUSY;
+ ndev->last_ps = U32_MAX;
+
/*
* The platform does not remove power for a kernel managed suspend so
* use host managed nvme power settings for lowest idle power if
@@ -2868,8 +2919,15 @@ static int nvme_suspend(struct device *dev)
* shutdown. But if the firmware is involved after the suspend or the
* device does not support any non-default power states, shut down the
* device fully.
+ *
+ * If ASPM is not enabled for the device, shut down the device and allow
+ * the PCI bus layer to put it into D3 in order to take the PCIe link
+ * down, so as to allow the platform to achieve its minimum low-power
+ * state (which may not be possible if the link is up).
*/
- if (pm_suspend_via_firmware() || !ctrl->npss) {
+ if (pm_suspend_via_firmware() || !ctrl->npss ||
+ !pcie_aspm_enabled(pdev) ||
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
nvme_dev_disable(ndev, true);
return 0;
}
@@ -2882,7 +2940,6 @@ static int nvme_suspend(struct device *dev)
ctrl->state != NVME_CTRL_ADMIN_ONLY)
goto unfreeze;
- ndev->last_ps = 0;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
@@ -3029,9 +3086,15 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
.driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR |
+ NVME_QUIRK_128_BYTES_SQES |
+ NVME_QUIRK_SHARED_TAGS },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a249db528d54..dfa07bb9dfeb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
return ret;
}
+static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
return;
-
- rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->qp);
+ __nvme_rdma_stop_queue(queue);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
- if (!ret)
+ if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
- else
+ } else {
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
+ }
return ret;
}
@@ -751,6 +757,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
@@ -792,10 +799,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_async_qe;
}
+ ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+ error = PTR_ERR(ctrl->ctrl.fabrics_q);
+ goto out_free_tagset;
+ }
+
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_free_tagset;
+ goto out_cleanup_fabrics_q;
}
}
@@ -803,24 +816,15 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_cleanup_queue;
- error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
- &ctrl->ctrl.cap);
- if (error) {
- dev_err(ctrl->ctrl.device,
- "prop_get NVME_REG_CAP failed\n");
- goto out_stop_queue;
- }
-
- ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
-
- error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+ error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
goto out_stop_queue;
ctrl->ctrl.max_hw_sectors =
(ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
error = nvme_init_identify(&ctrl->ctrl);
if (error)
goto out_stop_queue;
@@ -832,6 +836,9 @@ out_stop_queue:
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+ if (new)
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
@@ -901,10 +908,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- if (ctrl->ctrl.admin_tagset)
+ if (ctrl->ctrl.admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
nvme_cancel_request, &ctrl->ctrl);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
+ }
+ if (remove)
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
}
@@ -914,9 +924,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
- if (ctrl->ctrl.tagset)
+ if (ctrl->ctrl.tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
+ }
if (remove)
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
@@ -1053,6 +1065,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we're in DELETING state */
@@ -1139,9 +1152,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
- req->nents, rq_data_dir(rq) ==
- WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
@@ -1267,7 +1278,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
- rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ rq_dma_dir(rq));
if (unlikely(count <= 0)) {
ret = -EIO;
goto out_free_table;
@@ -1296,9 +1307,7 @@ out:
return 0;
out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
- req->nents, rq_data_dir(rq) ==
- WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
out_free_table:
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
return ret;
@@ -1541,16 +1550,18 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
{
+ struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
int ret;
ret = nvme_rdma_create_queue_ib(queue);
if (ret)
return ret;
+ if (ctrl->opts->tos >= 0)
+ rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
if (ret) {
- dev_err(queue->ctrl->ctrl.device,
- "rdma_resolve_route failed (%d).\n",
+ dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
goto out_destroy_queue;
}
@@ -1863,10 +1874,11 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&ctrl->reconnect_work);
nvme_rdma_teardown_io_queues(ctrl, shutdown);
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
- nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+ nvme_disable_ctrl(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, shutdown);
}
@@ -2045,7 +2057,8 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
- NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES,
+ NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
+ NVMF_OPT_TOS,
.create_ctrl = nvme_rdma_create_ctrl,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 606b13d35d16..4ffd5957637a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -13,6 +13,7 @@
#include <net/tcp.h>
#include <linux/blk-mq.h>
#include <crypto/hash.h>
+#include <net/busy_poll.h>
#include "nvme.h"
#include "fabrics.h"
@@ -72,6 +73,7 @@ struct nvme_tcp_queue {
int pdu_offset;
size_t data_remaining;
size_t ddgst_remaining;
+ unsigned int nr_cqe;
/* send state */
struct nvme_tcp_request *request;
@@ -438,6 +440,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
}
nvme_end_request(rq, cqe->status, cqe->result);
+ queue->nr_cqe++;
return 0;
}
@@ -608,23 +611,18 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
switch (hdr->type) {
case nvme_tcp_c2h_data:
- ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
- break;
+ return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
case nvme_tcp_rsp:
nvme_tcp_init_recv_ctx(queue);
- ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
- break;
+ return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
case nvme_tcp_r2t:
nvme_tcp_init_recv_ctx(queue);
- ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
- break;
+ return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
dev_err(queue->ctrl->ctrl.device,
"unsupported pdu type (%d)\n", hdr->type);
return -EINVAL;
}
-
- return ret;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -701,8 +699,10 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
- if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
+ if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ queue->nr_cqe++;
+ }
nvme_tcp_init_recv_ctx(queue);
}
}
@@ -742,6 +742,7 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
pdu->command_id);
nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ queue->nr_cqe++;
}
nvme_tcp_init_recv_ctx(queue);
@@ -841,7 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
@@ -1023,14 +1024,16 @@ done:
static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
{
- struct sock *sk = queue->sock->sk;
+ struct socket *sock = queue->sock;
+ struct sock *sk = sock->sk;
read_descriptor_t rd_desc;
int consumed;
rd_desc.arg.data = queue;
rd_desc.count = 1;
lock_sock(sk);
- consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
+ queue->nr_cqe = 0;
+ consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
release_sock(sk);
return consumed;
}
@@ -1255,7 +1258,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->queue_size = queue_size;
if (qid > 0)
- queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ queue->cmnd_capsule_len = nctrl->ioccsz * 16;
else
queue->cmnd_capsule_len = sizeof(struct nvme_command) +
NVME_TCP_ADMIN_CCSZ;
@@ -1263,7 +1266,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
IPPROTO_TCP, &queue->sock);
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
return ret;
}
@@ -1273,7 +1276,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
(char *)&opt, sizeof(opt));
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to set TCP_SYNCNT sock opt %d\n", ret);
goto err_sock;
}
@@ -1283,7 +1286,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
TCP_NODELAY, (char *)&opt, sizeof(opt));
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to set TCP_NODELAY sock opt %d\n", ret);
goto err_sock;
}
@@ -1296,11 +1299,23 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
(char *)&sol, sizeof(sol));
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to set SO_LINGER sock opt %d\n", ret);
goto err_sock;
}
+ /* Set socket type of service */
+ if (nctrl->opts->tos >= 0) {
+ opt = nctrl->opts->tos;
+ ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
+ (char *)&opt, sizeof(opt));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to set IP_TOS sock opt %d\n", ret);
+ goto err_sock;
+ }
+ }
+
queue->sock->sk->sk_allocation = GFP_ATOMIC;
if (!qid)
n = 0;
@@ -1314,11 +1329,11 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->pdu_offset = 0;
sk_set_memalloc(queue->sock->sk);
- if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
+ if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
sizeof(ctrl->src_addr));
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to bind queue %d socket %d\n",
qid, ret);
goto err_sock;
@@ -1330,7 +1345,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (queue->hdr_digest || queue->data_digest) {
ret = nvme_tcp_alloc_crypto(queue);
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to allocate queue %d crypto\n", qid);
goto err_sock;
}
@@ -1344,13 +1359,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
goto err_crypto;
}
- dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
+ dev_dbg(nctrl->device, "connecting queue %d\n",
nvme_tcp_queue_id(queue));
ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
sizeof(ctrl->addr), 0);
if (ret) {
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"failed to connect socket: %d\n", ret);
goto err_rcv_pdu;
}
@@ -1371,6 +1386,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+ queue->sock->sk->sk_ll_usec = 1;
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
@@ -1469,7 +1485,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = 2 /* default + read */;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
}
ret = blk_mq_alloc_tag_set(set);
@@ -1568,6 +1584,7 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
+ nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
return nr_io_queues;
}
@@ -1599,6 +1616,12 @@ static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
min(opts->nr_io_queues, nr_io_queues);
nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
}
+
+ if (opts->nr_poll_queues && nr_io_queues) {
+ /* map dedicated poll queues only if we have queues left */
+ ctrl->io_queues[HCTX_TYPE_POLL] =
+ min(opts->nr_poll_queues, nr_io_queues);
+ }
}
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
@@ -1680,6 +1703,7 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
nvme_tcp_stop_queue(ctrl, 0);
if (remove) {
blk_cleanup_queue(ctrl->admin_q);
+ blk_cleanup_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
nvme_tcp_free_admin_queue(ctrl);
@@ -1700,10 +1724,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
goto out_free_queue;
}
+ ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
+ if (IS_ERR(ctrl->fabrics_q)) {
+ error = PTR_ERR(ctrl->fabrics_q);
+ goto out_free_tagset;
+ }
+
ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->admin_q)) {
error = PTR_ERR(ctrl->admin_q);
- goto out_free_tagset;
+ goto out_cleanup_fabrics_q;
}
}
@@ -1711,19 +1741,12 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_cleanup_queue;
- error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
- if (error) {
- dev_err(ctrl->device,
- "prop_get NVME_REG_CAP failed\n");
- goto out_stop_queue;
- }
-
- ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
-
- error = nvme_enable_ctrl(ctrl, ctrl->cap);
+ error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+
error = nvme_init_identify(ctrl);
if (error)
goto out_stop_queue;
@@ -1735,6 +1758,9 @@ out_stop_queue:
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->admin_q);
+out_cleanup_fabrics_q:
+ if (new)
+ blk_cleanup_queue(ctrl->fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->admin_tagset);
@@ -1748,10 +1774,13 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
{
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
- if (ctrl->admin_tagset)
+ if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+ if (remove)
+ blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1762,9 +1791,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
return;
nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
- if (ctrl->tagset)
+ if (ctrl->tagset) {
blk_mq_tagset_busy_iter(ctrl->tagset,
nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
@@ -1793,7 +1824,7 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{
struct nvmf_ctrl_options *opts = ctrl->opts;
- int ret = -EINVAL;
+ int ret;
ret = nvme_tcp_configure_admin_queue(ctrl, new);
if (ret)
@@ -1876,6 +1907,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
+ blk_mq_unquiesce_queue(ctrl->admin_q);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we're in DELETING state */
@@ -1892,10 +1924,11 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
nvme_tcp_teardown_io_queues(ctrl, shutdown);
+ blk_mq_quiesce_queue(ctrl->admin_q);
if (shutdown)
nvme_shutdown_ctrl(ctrl);
else
- nvme_disable_ctrl(ctrl, ctrl->cap);
+ nvme_disable_ctrl(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, shutdown);
}
@@ -2151,14 +2184,36 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+ if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
+ /* map dedicated poll queues only if we have queues left */
+ set->map[HCTX_TYPE_POLL].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_POLL];
+ set->map[HCTX_TYPE_POLL].queue_offset =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
+ blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
+ }
+
dev_info(ctrl->ctrl.device,
- "mapped %d/%d default/read queues.\n",
+ "mapped %d/%d/%d default/read/poll queues.\n",
ctrl->io_queues[HCTX_TYPE_DEFAULT],
- ctrl->io_queues[HCTX_TYPE_READ]);
+ ctrl->io_queues[HCTX_TYPE_READ],
+ ctrl->io_queues[HCTX_TYPE_POLL]);
return 0;
}
+static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+ struct sock *sk = queue->sock->sk;
+
+ if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+ sk_busy_loop(sk, true);
+ nvme_tcp_try_recv(queue);
+ return queue->nr_cqe;
+}
+
static struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
@@ -2167,6 +2222,7 @@ static struct blk_mq_ops nvme_tcp_mq_ops = {
.init_hctx = nvme_tcp_init_hctx,
.timeout = nvme_tcp_timeout,
.map_queues = nvme_tcp_map_queues,
+ .poll = nvme_tcp_poll,
};
static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
@@ -2220,7 +2276,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
INIT_LIST_HEAD(&ctrl->list);
ctrl->ctrl.opts = opts;
- ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
+ ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
+ opts->nr_poll_queues + 1;
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
@@ -2314,7 +2371,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
- NVMF_OPT_NR_WRITE_QUEUES,
+ NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
+ NVMF_OPT_TOS,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 9778eb0406b3..5c3cb6928f3c 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -86,6 +86,22 @@ static const char *nvme_trace_admin_get_features(struct trace_seq *p,
return ret;
}
+static const char *nvme_trace_get_lba_status(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 mndw = get_unaligned_le32(cdw10 + 8);
+ u16 rl = get_unaligned_le16(cdw10 + 12);
+ u8 atype = cdw10[15];
+
+ trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
+ slba, mndw, rl, atype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -141,6 +157,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_admin_identify(p, cdw10);
case nvme_admin_get_features:
return nvme_trace_admin_get_features(p, cdw10);
+ case nvme_admin_get_lba_status:
+ return nvme_trace_get_lba_status(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 4dc12ea52f23..831a062d27cb 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -37,7 +37,6 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
- u16 status = NVME_SC_SUCCESS;
unsigned long flags;
off_t offset = 0;
u64 slot;
@@ -47,9 +46,8 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
- status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
- sizeof(struct nvme_error_slot));
- if (status)
+ if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
+ sizeof(struct nvme_error_slot)))
break;
if (slot == 0)
@@ -59,7 +57,7 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
offset += sizeof(struct nvme_error_slot);
}
spin_unlock_irqrestore(&ctrl->error_lock, flags);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, 0);
}
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
@@ -81,9 +79,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
goto out;
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+ sectors[READ]), 1000);
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+ data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+ sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -111,11 +111,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
if (!ns->bdev)
continue;
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read +=
- part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ data_units_read += DIV_ROUND_UP(
+ part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written +=
- part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+ data_units_written += DIV_ROUND_UP(
+ part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
}
rcu_read_unlock();
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index cd52b9f15376..98613a45bd3b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
found:
list_del(&p->entry);
+ nvmet_port_del_ctrls(port, subsys);
nvmet_port_disc_changed(port, subsys);
if (list_empty(&port->subsystems))
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index dad0243c7c96..3a67e244e568 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
u16 status;
switch (errno) {
+ case 0:
+ status = NVME_SC_SUCCESS;
+ break;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
@@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->port == port)
+ ctrl->ops->delete_ctrl(ctrl);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
int nvmet_enable_port(struct nvmet_port *port)
{
const struct nvmet_fabrics_ops *ops;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 8efca26b4776..3764a8900850 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -381,9 +381,7 @@ int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
- if (IS_ERR(nvmet_disc_subsys))
- return PTR_ERR(nvmet_disc_subsys);
- return 0;
+ return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}
void nvmet_exit_discovery(void)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b16dc3981c69..748a39fca771 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -253,6 +253,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
}
@@ -357,10 +358,16 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
goto out_free_sq;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
+ ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+ error = PTR_ERR(ctrl->ctrl.fabrics_q);
+ goto out_free_tagset;
+ }
+
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_free_tagset;
+ goto out_cleanup_fabrics_q;
}
error = nvmf_connect_admin_queue(&ctrl->ctrl);
@@ -369,23 +376,15 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
- if (error) {
- dev_err(ctrl->ctrl.device,
- "prop_get NVME_REG_CAP failed\n");
- goto out_cleanup_queue;
- }
-
- ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
-
- error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+ error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
goto out_cleanup_queue;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
error = nvme_init_identify(&ctrl->ctrl);
if (error)
goto out_cleanup_queue;
@@ -394,6 +393,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+ blk_cleanup_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
@@ -407,16 +408,17 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
nvme_loop_destroy_io_queues(ctrl);
}
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
nvme_loop_destroy_admin_queue(ctrl);
}
@@ -654,6 +656,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
mutex_lock(&nvme_loop_ports_mutex);
list_del_init(&port->entry);
mutex_unlock(&nvme_loop_ports_mutex);
+
+ /*
+ * Ensure any ctrls that are in the process of being
+ * deleted are in fact deleted before we return
+ * and free the port. This is to prevent active
+ * ctrls from using a port after it's freed.
+ */
+ flush_workqueue(nvme_delete_wq);
}
static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ee66c610739..c51f8dd01dc4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+void nvmet_port_del_ctrls(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+
int nvmet_enable_port(struct nvmet_port *port);
void nvmet_disable_port(struct nvmet_port *port);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 69b83fa0c76c..bf4f03474e89 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,7 +348,8 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- sgl_free(cmd->req.sg);
+ if (cmd->req.sg_cnt)
+ sgl_free(cmd->req.sg);
return NVME_SC_INTERNAL;
}
@@ -553,7 +554,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
if (queue->nvme_sq.sqhd_disabled) {
kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ if (cmd->req.sg_cnt)
+ sgl_free(cmd->req.sg);
}
return 1;
@@ -584,7 +586,8 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
return -EAGAIN;
kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ if (cmd->req.sg_cnt)
+ sgl_free(cmd->req.sg);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -1306,7 +1309,9 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
{
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
- sgl_free(cmd->req.sg);
+ kfree(cmd->iov);
+ if (cmd->req.sg_cnt)
+ sgl_free(cmd->req.sg);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
@@ -1410,6 +1415,7 @@ done:
static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
{
struct socket *sock = queue->sock;
+ struct inet_sock *inet = inet_sk(sock->sk);
struct linger sol = { .l_onoff = 1, .l_linger = 0 };
int ret;
@@ -1433,6 +1439,16 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (ret)
return ret;
+ /* Set socket type of service */
+ if (inet->rcv_tos > 0) {
+ int tos = inet->rcv_tos;
+
+ ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
+ (char *)&tos, sizeof(tos));
+ if (ret)
+ return ret;
+ }
+
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = queue;
queue->data_ready = sock->sk->sk_data_ready;
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 6af11d493271..1373a3c67962 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -33,6 +33,22 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 mndw = get_unaligned_le32(cdw10 + 8);
+ u16 rl = get_unaligned_le16(cdw10 + 12);
+ u8 atype = cdw10[15];
+
+ trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
+ slba, mndw, rl, atype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -80,6 +96,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
return nvmet_trace_admin_identify(p, cdw10);
case nvme_admin_get_features:
return nvmet_trace_admin_get_features(p, cdw10);
+ case nvme_admin_get_lba_status:
+ return nvmet_trace_get_lba_status(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index ac5d945be88a..057d1ff87d5d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -76,11 +76,6 @@ static struct bus_type nvmem_bus_type = {
.name = "nvmem",
};
-static int of_nvmem_match(struct device *dev, const void *nvmem_np)
-{
- return dev->of_node == nvmem_np;
-}
-
static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
{
struct device *d;
@@ -88,7 +83,7 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
if (!nvmem_np)
return NULL;
- d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
+ d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
if (!d)
return NULL;
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index d9dc482ecb2f..61a17f943f47 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -16,6 +16,7 @@
enum ocotp_devtype {
IMX8QXP,
+ IMX8QM,
};
struct ocotp_devtype_data {
@@ -39,6 +40,11 @@ static struct ocotp_devtype_data imx8qxp_data = {
.nregs = 800,
};
+static struct ocotp_devtype_data imx8qm_data = {
+ .devtype = IMX8QM,
+ .nregs = 800,
+};
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -118,6 +124,7 @@ static struct nvmem_config imx_scu_ocotp_nvmem_config = {
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
+ { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data },
{ },
};
MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 42d4451e7d67..dff2f3c357f5 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -479,6 +479,12 @@ static const struct ocotp_params imx8mm_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx8mn_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -490,6 +496,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
{ .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
+ { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index b9f9ce089de9..07c9f38c1c60 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -155,7 +155,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
if (err)
break;
- memcpy(buf + i, &tmp, efuse->config.word_size);
+ memcpy(buf + i, &tmp,
+ min_t(size_t, bytes - i, efuse->config.word_size));
}
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index c34d9fecfb10..8e4898dec002 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -200,6 +200,6 @@ static struct platform_driver mxs_ocotp_driver = {
};
module_platform_driver(mxs_ocotp_driver);
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net");
MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
index 6f303b91f6e7..9e0c429cd08a 100644
--- a/drivers/nvmem/nvmem-sysfs.c
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -224,10 +224,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_root_nvmem;
+ if (nvmem->read_only) {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_ro_nvmem;
+ } else {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_nvmem;
+ }
nvmem->eeprom.attr.name = "eeprom";
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index a079a80ddf2c..e26ef1bbf198 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -186,6 +186,7 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
+ .need_register_readout = true,
};
static const struct sunxi_sid_cfg sun50i_h6_cfg = {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 9cdf14b9aaab..223d617ecfe1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <linux/serial_core.h>
#include <linux/sysfs.h>
+#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
{
int l;
const char *p;
+ const void *rng_seed;
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
@@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
pr_debug("Command line is: %s\n", (char*)data);
+ rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
+ if (rng_seed && l > 0) {
+ add_bootloader_randomness(rng_seed, l);
+
+ /* try to clear seed so it won't be found. */
+ fdt_nop_property(initial_boot_params, node, "rng-seed");
+
+ /* update CRC check value */
+ of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+ fdt_totalsize(initial_boot_params));
+ }
+
/* break now */
return 1;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
- * @out_irq: structure of_irq filled by this function
+ * @out_irq: structure of_phandle_args filled by this function
*
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 44f53496cab1..000b95787df1 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -280,12 +280,6 @@ unregister:
}
EXPORT_SYMBOL(of_mdiobus_register);
-/* Helper function for of_phy_find_device */
-static int of_phy_match(struct device *dev, const void *phy_np)
-{
- return dev->of_node == phy_np;
-}
-
/**
* of_phy_find_device - Give a PHY node, find the phy_device
* @phy_np: Pointer to the phy's device tree node
@@ -301,7 +295,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
if (!phy_np)
return NULL;
- d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
+ d = bus_find_device_by_of_node(&mdio_bus_type, phy_np);
if (d) {
mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7801e25e6895..b47a2292fe8e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -37,11 +37,6 @@ static const struct of_device_id of_skipped_node_table[] = {
{} /* Empty terminated list */
};
-static int of_dev_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/**
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
@@ -55,7 +50,7 @@ struct platform_device *of_find_device_by_node(struct device_node *np)
{
struct device *dev;
- dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match);
+ dev = bus_find_device_by_of_node(&platform_bus_type, np);
return dev ? to_platform_device(dev) : NULL;
}
EXPORT_SYMBOL(of_find_device_by_node);
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
for_each_child_of_node(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
- if (!node_name_cmp(child, overlay_child))
+ if (!node_name_cmp(child, overlay_child)) {
+ of_node_put(overlay_child);
break;
+ }
- if (!overlay_child)
+ if (!overlay_child) {
+ of_node_put(child);
return -EINVAL;
+ }
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index c094d5d20fd7..3b7ffd0234e9 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -401,6 +401,54 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
+/**
+ * dev_pm_opp_find_level_exact() - search for an exact level
+ * @dev: device for which we do this operation
+ * @level: level to search for
+ *
+ * Return: Searches for exact match in the opp table and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
+ return ERR_PTR(r);
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->level == level) {
+ opp = temp_opp;
+
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -940,6 +988,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
+ kref_init(&opp_table->list_kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
@@ -1577,6 +1626,12 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
goto free_regulators;
}
+ ret = regulator_enable(reg);
+ if (ret < 0) {
+ regulator_put(reg);
+ goto free_regulators;
+ }
+
opp_table->regulators[i] = reg;
}
@@ -1590,8 +1645,10 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
return opp_table;
free_regulators:
- while (i != 0)
- regulator_put(opp_table->regulators[--i]);
+ while (i--) {
+ regulator_disable(opp_table->regulators[i]);
+ regulator_put(opp_table->regulators[i]);
+ }
kfree(opp_table->regulators);
opp_table->regulators = NULL;
@@ -1617,8 +1674,10 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ for (i = opp_table->regulator_count - 1; i >= 0; i--) {
+ regulator_disable(opp_table->regulators[i]);
regulator_put(opp_table->regulators[i]);
+ }
_free_set_opp_data(opp_table);
@@ -1771,6 +1830,7 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
* @dev: Consumer device for which the genpd is getting attached.
* @names: Null terminated array of pointers containing names of genpd to attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
*
* Multiple generic power domains for a device are supported with the help of
* virtual genpd devices, which are created for each consumer device - genpd
@@ -1784,12 +1844,16 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
*
* This helper needs to be called once with a list of all genpd to attach.
* Otherwise the original device structure will be used instead by the OPP core.
+ *
+ * The order of entries in the names array must match the order in which
+ * "required-opps" are added in DT.
*/
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names)
+struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
+ const char **names, struct device ***virt_devs)
{
struct opp_table *opp_table;
struct device *virt_dev;
- int index, ret = -EINVAL;
+ int index = 0, ret = -EINVAL;
const char **name = names;
opp_table = dev_pm_opp_get_opp_table(dev);
@@ -1815,14 +1879,6 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
goto unlock;
while (*name) {
- index = of_property_match_string(dev->of_node,
- "power-domain-names", *name);
- if (index < 0) {
- dev_err(dev, "Failed to find power domain: %s (%d)\n",
- *name, index);
- goto err;
- }
-
if (index >= opp_table->required_opp_count) {
dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
*name, opp_table->required_opp_count, index);
@@ -1843,9 +1899,12 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
}
opp_table->genpd_virt_devs[index] = virt_dev;
+ index++;
name++;
}
+ if (virt_devs)
+ *virt_devs = opp_table->genpd_virt_devs;
mutex_unlock(&opp_table->genpd_virt_dev_lock);
return opp_table;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index b313aca9894f..1813f5ad5fa2 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -617,9 +617,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
if (opp_table->suspend_opp) {
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, opp_table->suspend_opp->rate,
- new_opp->rate);
+ /* Pick the OPP with higher rate as suspend OPP */
+ if (new_opp->rate > opp_table->suspend_opp->rate) {
+ opp_table->suspend_opp->suspend = false;
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
} else {
new_opp->suspend = true;
opp_table->suspend_opp = new_opp;
@@ -662,8 +665,6 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
return 0;
}
- kref_init(&opp_table->list_kref);
-
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
opp = _opp_add_static_v2(opp_table, dev, np);
@@ -672,17 +673,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
of_node_put(np);
- goto put_list_kref;
+ return ret;
} else if (opp) {
count++;
}
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
- ret = -ENOENT;
- goto put_list_kref;
- }
+ if (WARN_ON(!count))
+ return -ENOENT;
list_for_each_entry(opp, &opp_table->opp_list, node)
pstate_count += !!opp->pstate;
@@ -691,8 +690,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
if (pstate_count && pstate_count != count) {
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
- ret = -ENOENT;
- goto put_list_kref;
+ return -ENOENT;
}
if (pstate_count)
@@ -701,11 +699,6 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
opp_table->parsed_static_opps = true;
return 0;
-
-put_list_kref:
- _put_opp_list_kref(opp_table);
-
- return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
@@ -731,8 +724,6 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
return -EINVAL;
}
- kref_init(&opp_table->list_kref);
-
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
@@ -742,7 +733,6 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
- _put_opp_list_kref(opp_table);
return ret;
}
nr -= 2;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 3c730103e637..2f1cac89ddf5 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -6,7 +6,7 @@
** (c) Copyright 1999 SuSE GmbH
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
-** (c) Copyright 2006 Helge Deller
+** (c) Copyright 2006-2019 Helge Deller
**
**
** This module provides access to Dino PCI bus (config/IOport spaces)
@@ -156,6 +156,15 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
return container_of(hba, struct dino_device, hba);
}
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+ struct dino_device *dino_dev;
+
+ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+ return is_card_dino(&dino_dev->hba.dev->id);
+}
+
/*
* Dino Configuration Space Accessor Functions
*/
@@ -437,6 +446,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
+#ifdef CONFIG_TULIP
+static void pci_fixup_tulip(struct pci_dev *dev)
+{
+ if (!pci_dev_is_behind_card_dino(dev))
+ return;
+ if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
+ return;
+ pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
+ pci_name(dev));
+ /* Disable this card by zeroing the PCI resources */
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+ memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
+#endif /* CONFIG_TULIP */
static void __init
dino_bios_init(void)
@@ -863,14 +887,14 @@ static int __init dino_common_init(struct parisc_device *dev,
#define CUJO_RAVEN_BADPAGE 0x01003000UL
#define CUJO_FIREHAWK_BADPAGE 0x01607000UL
-static const char *dino_vers[] = {
+static const char dino_vers[][4] = {
"2.0",
"2.1",
"3.0",
"3.1"
};
-static const char *cujo_vers[] = {
+static const char cujo_vers[][4] = {
"1.0",
"2.0"
};
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index 9c08222c0cc6..f54a6f450391 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -93,7 +93,7 @@ static int configure_memory(const unsigned char *buf,
res->start = mem_parent->start + get_24(buf+len+2);
res->end = res->start + get_16(buf+len+5)*1024;
res->flags = IORESOURCE_MEM;
- printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
+ pr_cont("memory %pR ", res);
result = request_resource(mem_parent, res);
if (result < 0) {
printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
@@ -123,7 +123,7 @@ static int configure_irq(const unsigned char *buf)
for (i=0;i<HPEE_IRQ_MAX_ENT;i++) {
c = get_8(buf+len);
- printk("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK);
+ pr_cont("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK);
if (c & HPEE_IRQ_TRIG_LEVEL) {
eisa_make_irq_level(c & HPEE_IRQ_CHANNEL_MASK);
} else {
@@ -153,7 +153,7 @@ static int configure_dma(const unsigned char *buf)
for (i=0;i<HPEE_DMA_MAX_ENT;i++) {
c = get_8(buf+len);
- printk("DMA %d ", c&HPEE_DMA_CHANNEL_MASK);
+ pr_cont("DMA %d ", c&HPEE_DMA_CHANNEL_MASK);
/* fixme: maybe initialize the dma channel withthe timing ? */
len+=2;
if (!(c & HPEE_DMA_MORE)) {
@@ -183,7 +183,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
res->flags = IORESOURCE_IO;
- printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
+ pr_cont("ioports %pR ", res);
result = request_resource(io_parent, res);
if (result < 0) {
printk(KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
@@ -401,7 +401,7 @@ static int parse_slot_config(int slot,
}
pos = p0 + function_len;
}
- printk("\n");
+ pr_cont("\n");
if (!id_string_used) {
kfree(board);
}
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 44e12c83cfa8..e60e68664654 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -61,8 +61,6 @@ static int __init hppb_probe(struct parisc_device *dev)
}
card = card->next;
}
- printk(KERN_INFO "Found GeckoBoa at 0x%llx\n",
- (unsigned long long) dev->hpa.start);
card->hpa = dev->hpa.start;
card->mmio_region.name = "HP-PB Bus";
@@ -72,10 +70,11 @@ static int __init hppb_probe(struct parisc_device *dev)
card->mmio_region.end = gsc_readl(dev->hpa.start + IO_IO_HIGH) - 1;
status = ccio_request_resource(dev, &card->mmio_region);
- if(status < 0) {
- printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n",
- __FILE__, &card->mmio_region);
- }
+
+ pr_info("Found GeckoBoa at %pap, bus space %pR,%s claimed.\n",
+ &dev->hpa.start,
+ &card->mmio_region,
+ (status < 0) ? " not":"" );
return 0;
}
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 6fa41f8173b6..022c566c0f32 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
-obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o \ No newline at end of file
+obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 461fd8a24278..96b888bb49c6 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -61,7 +61,10 @@ enum parport_pc_pci_cards {
wch_ch382_0s1p,
wch_ch382_2s1p,
brainboxes_5s1p,
- sunix_2s1p,
+ sunix_4008a,
+ sunix_5069a,
+ sunix_5079a,
+ sunix_5099a,
};
/* each element directly indexed from enum list, above */
@@ -151,7 +154,10 @@ static struct parport_pc_pci cards[] = {
/* wch_ch382_0s1p*/ { 1, { { 2, -1}, } },
/* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
/* brainboxes_5s1p */ { 1, { { 3, -1 }, } },
- /* sunix_2s1p */ { 1, { { 3, -1 }, } },
+ /* sunix_4008a */ { 1, { { 1, 2 }, } },
+ /* sunix_5069a */ { 1, { { 1, 2 }, } },
+ /* sunix_5079a */ { 1, { { 1, 2 }, } },
+ /* sunix_5099a */ { 1, { { 1, 2 }, } },
};
static struct pci_device_id parport_serial_pci_tbl[] = {
@@ -261,13 +267,15 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_5s1p },
- /*
- * More SUNIX variations. At least one of these has part number
- * '5079A but subdevice 0x102. That board reports 0x0708 as
- * its PCI Class.
- */
+ /* Sunix boards */
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
- 0x0102, 0, 0, sunix_2s1p },
+ 0x0100, 0, 0, sunix_4008a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0101, 0, 0, sunix_5069a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0102, 0, 0, sunix_5079a },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
+ 0x0104, 0, 0, sunix_5099a },
{ 0, } /* terminate list */
};
@@ -516,11 +524,23 @@ static struct pciserial_board pci_parport_serial_boards[] = {
.base_baud = 921600,
.uart_offset = 8,
},
- [sunix_2s1p] = {
- .flags = FL_BASE0|FL_BASE_BARS,
+ [sunix_4008a] = {
+ .num_ports = 0,
+ },
+ [sunix_5069a] = {
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [sunix_5079a] = {
.num_ports = 2,
- .base_baud = 921600,
- .uart_offset = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [sunix_5099a] = {
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
},
};
@@ -660,8 +680,7 @@ static void parport_serial_pci_remove(struct pci_dev *dev)
static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct parport_serial_private *priv = pci_get_drvdata(pdev);
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_suspend_ports(priv->serial);
@@ -672,8 +691,7 @@ static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
static int __maybe_unused parport_serial_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct parport_serial_private *priv = pci_get_drvdata(pdev);
+ struct parport_serial_private *priv = dev_get_drvdata(dev);
if (priv->serial)
pciserial_resume_ports(priv->serial);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 40b625458afa..97056f3dd317 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2521,6 +2521,7 @@ static int hv_pci_probe(struct hv_device *hdev,
const struct hv_vmbus_device_id *dev_id)
{
struct hv_pcibus_device *hbus;
+ char *name;
int ret;
/*
@@ -2589,7 +2590,14 @@ static int hv_pci_probe(struct hv_device *hdev,
goto free_config;
}
- hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus);
+ name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
+ if (!name) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
+ kfree(name);
if (!hbus->sysdata.fwnode) {
ret = -ENOMEM;
goto unmap;
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index e9f78eb390d2..e7b493c22bf3 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -147,15 +147,6 @@ config HOTPLUG_PCI_RPA_DLPAR
When in doubt, say N.
-config HOTPLUG_PCI_SGI
- tristate "SGI PCI Hotplug Support"
- depends on IA64_SGI_SN2 || IA64_GENERIC
- help
- Say Y here if you want to use the SGI Altix Hotplug
- Driver for PCI devices.
-
- When in doubt, say N.
-
config HOTPLUG_PCI_S390
bool "System z PCI Hotplug Support"
depends on S390 && 64BIT
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 7e3331603714..5196983220df 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
obj-$(CONFIG_HOTPLUG_PCI_POWERNV) += pnv-php.o
obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
-obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
deleted file mode 100644
index 231f5bdd3d2d..000000000000
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ /dev/null
@@ -1,700 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved.
- *
- * This work was based on the 2.4/2.6 kernel development by Dick Reigner.
- * Work to add BIOS PROM support was completed by Mike Habeck.
- */
-
-#include <linux/acpi.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/pci_hotplug.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/mutex.h>
-
-#include <asm/sn/addrs.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/l1.h>
-#include <asm/sn/module.h>
-#include <asm/sn/pcibr_provider.h>
-#include <asm/sn/pcibus_provider_defs.h>
-#include <asm/sn/pcidev.h>
-#include <asm/sn/sn_feature_sets.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/types.h>
-#include <asm/sn/acpi.h>
-
-#include "../pci.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
-MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
-
-
-/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
-#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
-#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
-#define PCI_L1_ERR 7 /* L1 console command error */
-#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
-
-
-#define PCIIO_ASIC_TYPE_TIOCA 4
-#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
-#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
-#define SN_SLOT_NAME_SIZE 33 /* size of name string */
-
-/* internal list head */
-static struct list_head sn_hp_list;
-
-/* hotplug_slot struct's private pointer */
-struct slot {
- int device_num;
- struct pci_bus *pci_bus;
- /* this struct for glue internal only */
- struct hotplug_slot hotplug_slot;
- struct list_head hp_list;
- char physical_path[SN_SLOT_NAME_SIZE];
-};
-
-struct pcibr_slot_enable_resp {
- int resp_sub_errno;
- char resp_l1_msg[PCI_L1_QSIZE + 1];
-};
-
-struct pcibr_slot_disable_resp {
- int resp_sub_errno;
- char resp_l1_msg[PCI_L1_QSIZE + 1];
-};
-
-enum sn_pci_req_e {
- PCI_REQ_SLOT_ELIGIBLE,
- PCI_REQ_SLOT_DISABLE
-};
-
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
-static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
-
-static const struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .enable_slot = enable_slot,
- .disable_slot = disable_slot,
- .get_power_status = get_power_status,
-};
-
-static DEFINE_MUTEX(sn_hotplug_mutex);
-
-static struct slot *to_slot(struct hotplug_slot *bss_hotplug_slot)
-{
- return container_of(bss_hotplug_slot, struct slot, hotplug_slot);
-}
-
-static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
-{
- int retval = -ENOENT;
- struct slot *slot = to_slot(pci_slot->hotplug);
-
- if (!slot)
- return retval;
-
- retval = sprintf(buf, "%s\n", slot->physical_path);
- return retval;
-}
-
-static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path);
-
-static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device)
-{
- struct pcibus_info *pcibus_info;
- u16 busnum, segment, ioboard_type;
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
-
- /* Check to see if this is a valid slot on 'pci_bus' */
- if (!(pcibus_info->pbi_valid_devices & (1 << device)))
- return -EPERM;
-
- ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
- busnum = pcibus_info->pbi_buscommon.bs_persist_busnum;
- segment = pci_domain_nr(pci_bus) & 0xf;
-
- /* Do not allow hotplug operations on base I/O cards */
- if ((ioboard_type == L1_BRICKTYPE_IX ||
- ioboard_type == L1_BRICKTYPE_IA) &&
- (segment == 1 && busnum == 0 && device != 1))
- return -EPERM;
-
- return 1;
-}
-
-static int sn_pci_bus_valid(struct pci_bus *pci_bus)
-{
- struct pcibus_info *pcibus_info;
- u32 asic_type;
- u16 ioboard_type;
-
- /* Don't register slots hanging off the TIOCA bus */
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
- asic_type = pcibus_info->pbi_buscommon.bs_asic_type;
- if (asic_type == PCIIO_ASIC_TYPE_TIOCA)
- return -EPERM;
-
- /* Only register slots in I/O Bricks that support hotplug */
- ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
- switch (ioboard_type) {
- case L1_BRICKTYPE_IX:
- case L1_BRICKTYPE_PX:
- case L1_BRICKTYPE_IA:
- case L1_BRICKTYPE_PA:
- case L1_BOARDTYPE_PCIX3SLOT:
- return 1;
- break;
- default:
- return -EPERM;
- break;
- }
-
- return -EIO;
-}
-
-static int sn_hp_slot_private_alloc(struct hotplug_slot **bss_hotplug_slot,
- struct pci_bus *pci_bus, int device,
- char *name)
-{
- struct pcibus_info *pcibus_info;
- struct slot *slot;
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- return -ENOMEM;
-
- slot->device_num = device;
- slot->pci_bus = pci_bus;
- sprintf(name, "%04x:%02x:%02x",
- pci_domain_nr(pci_bus),
- ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
- device + 1);
-
- sn_generate_path(pci_bus, slot->physical_path);
-
- list_add(&slot->hp_list, &sn_hp_list);
- *bss_hotplug_slot = &slot->hotplug_slot;
-
- return 0;
-}
-
-static struct hotplug_slot *sn_hp_destroy(void)
-{
- struct slot *slot;
- struct pci_slot *pci_slot;
- struct hotplug_slot *bss_hotplug_slot = NULL;
-
- list_for_each_entry(slot, &sn_hp_list, hp_list) {
- bss_hotplug_slot = &slot->hotplug_slot;
- pci_slot = bss_hotplug_slot->pci_slot;
- list_del(&slot->hp_list);
- sysfs_remove_file(&pci_slot->kobj,
- &sn_slot_path_attr.attr);
- break;
- }
- return bss_hotplug_slot;
-}
-
-static void sn_bus_free_data(struct pci_dev *dev)
-{
- struct pci_bus *subordinate_bus;
- struct pci_dev *child;
-
- /* Recursively clean up sn_irq_info structs */
- if (dev->subordinate) {
- subordinate_bus = dev->subordinate;
- list_for_each_entry(child, &subordinate_bus->devices, bus_list)
- sn_bus_free_data(child);
- }
- /*
- * Some drivers may use dma accesses during the
- * driver remove function. We release the sysdata
- * areas after the driver remove functions have
- * been called.
- */
- sn_bus_store_sysdata(dev);
- sn_pci_unfixup_slot(dev);
-}
-
-static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
- int device_num, char **ssdt)
-{
- struct slot *slot = to_slot(bss_hotplug_slot);
- struct pcibus_info *pcibus_info;
- struct pcibr_slot_enable_resp resp;
- int rc;
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
-
- /*
- * Power-on and initialize the slot in the SN
- * PCI infrastructure.
- */
- rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
-
-
- if (rc == PCI_SLOT_ALREADY_UP) {
- pci_dbg(slot->pci_bus->self, "is already active\n");
- return 1; /* return 1 to user */
- }
-
- if (rc == PCI_L1_ERR) {
- pci_dbg(slot->pci_bus->self, "L1 failure %d with message: %s",
- resp.resp_sub_errno, resp.resp_l1_msg);
- return -EPERM;
- }
-
- if (rc) {
- pci_dbg(slot->pci_bus->self, "insert failed with error %d sub-error %d\n",
- rc, resp.resp_sub_errno);
- return -EIO;
- }
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
- pcibus_info->pbi_enabled_devices |= (1 << device_num);
-
- return 0;
-}
-
-static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
- int device_num, int action)
-{
- struct slot *slot = to_slot(bss_hotplug_slot);
- struct pcibus_info *pcibus_info;
- struct pcibr_slot_disable_resp resp;
- int rc;
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
-
- rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp);
-
- if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
- (rc == PCI_SLOT_ALREADY_DOWN)) {
- pci_dbg(slot->pci_bus->self, "Slot %s already inactive\n", slot->physical_path);
- return 1; /* return 1 to user */
- }
-
- if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
- pci_dbg(slot->pci_bus->self, "Cannot remove last 33MHz card\n");
- return -EPERM;
- }
-
- if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
- pci_dbg(slot->pci_bus->self, "L1 failure %d with message \n%s\n",
- resp.resp_sub_errno, resp.resp_l1_msg);
- return -EPERM;
- }
-
- if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
- pci_dbg(slot->pci_bus->self, "remove failed with error %d sub-error %d\n",
- rc, resp.resp_sub_errno);
- return -EIO;
- }
-
- if ((action == PCI_REQ_SLOT_ELIGIBLE) && !rc)
- return 0;
-
- if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
- pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
- pci_dbg(slot->pci_bus->self, "remove successful\n");
- return 0;
- }
-
- if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
- pci_dbg(slot->pci_bus->self, "remove failed rc = %d\n", rc);
- }
-
- return rc;
-}
-
-/*
- * Power up and configure the slot via a SAL call to PROM.
- * Scan slot (and any children), do any platform specific fixup,
- * and find device driver.
- */
-static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
-{
- struct slot *slot = to_slot(bss_hotplug_slot);
- struct pci_bus *new_bus = NULL;
- struct pci_dev *dev;
- int num_funcs;
- int new_ppb = 0;
- int rc;
- char *ssdt = NULL;
- void pcibios_fixup_device_resources(struct pci_dev *);
-
- /* Serialize the Linux PCI infrastructure */
- mutex_lock(&sn_hotplug_mutex);
-
- /*
- * Power-on and initialize the slot in the SN
- * PCI infrastructure. Also, retrieve the ACPI SSDT
- * table for the slot (if ACPI capable PROM).
- */
- rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
- if (rc) {
- mutex_unlock(&sn_hotplug_mutex);
- return rc;
- }
-
- if (ssdt)
- ssdt = __va(ssdt);
- /* Add the new SSDT for the slot to the ACPI namespace */
- if (SN_ACPI_BASE_SUPPORT() && ssdt) {
- acpi_status ret;
-
- ret = acpi_load_table((struct acpi_table_header *)ssdt);
- if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
- __func__, ret);
- /* try to continue on */
- }
- }
-
- num_funcs = pci_scan_slot(slot->pci_bus,
- PCI_DEVFN(slot->device_num + 1, 0));
- if (!num_funcs) {
- pci_dbg(slot->pci_bus->self, "no device in slot\n");
- mutex_unlock(&sn_hotplug_mutex);
- return -ENODEV;
- }
-
- /*
- * Map SN resources for all functions on the card
- * to the Linux PCI interface and tell the drivers
- * about them.
- */
- list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
- continue;
-
- /* Need to do slot fixup on PPB before fixup of children
- * (PPB's pcidev_info needs to be in pcidev_info list
- * before child's SN_PCIDEV_INFO() call to setup
- * pdi_host_pcidev_info).
- */
- pcibios_fixup_device_resources(dev);
- if (SN_ACPI_BASE_SUPPORT())
- sn_acpi_slot_fixup(dev);
- else
- sn_io_slot_fixup(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_hp_add_bridge(dev);
- if (dev->subordinate) {
- new_bus = dev->subordinate;
- new_ppb = 1;
- }
- }
- }
-
- /*
- * Add the slot's devices to the ACPI infrastructure */
- if (SN_ACPI_BASE_SUPPORT() && ssdt) {
- unsigned long long adr;
- struct acpi_device *pdevice;
- acpi_handle phandle;
- acpi_handle chandle = NULL;
- acpi_handle rethandle;
- acpi_status ret;
-
- phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
-
- if (acpi_bus_get_device(phandle, &pdevice)) {
- pci_dbg(slot->pci_bus->self, "no parent device, assuming NULL\n");
- pdevice = NULL;
- }
-
- acpi_scan_lock_acquire();
- /*
- * Walk the rootbus node's immediate children looking for
- * the slot's device node(s). There can be more than
- * one for multifunction devices.
- */
- for (;;) {
- rethandle = NULL;
- ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
- phandle, chandle,
- &rethandle);
-
- if (ret == AE_NOT_FOUND || rethandle == NULL)
- break;
-
- chandle = rethandle;
-
- ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
- NULL, &adr);
-
- if (ACPI_SUCCESS(ret) &&
- (adr>>16) == (slot->device_num + 1)) {
-
- ret = acpi_bus_scan(chandle);
- if (ACPI_FAILURE(ret)) {
- printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n",
- __func__, ret, (int)(adr>>16),
- (int)(adr&0xffff));
- /* try to continue on */
- }
- }
- }
- acpi_scan_lock_release();
- }
-
- pci_lock_rescan_remove();
-
- /* Call the driver for the new device */
- pci_bus_add_devices(slot->pci_bus);
- /* Call the drivers for the new devices subordinate to PPB */
- if (new_ppb)
- pci_bus_add_devices(new_bus);
-
- pci_unlock_rescan_remove();
- mutex_unlock(&sn_hotplug_mutex);
-
- if (rc == 0)
- pci_dbg(slot->pci_bus->self, "insert operation successful\n");
- else
- pci_dbg(slot->pci_bus->self, "insert operation failed rc = %d\n", rc);
-
- return rc;
-}
-
-static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
-{
- struct slot *slot = to_slot(bss_hotplug_slot);
- struct pci_dev *dev, *temp;
- int rc;
- acpi_handle ssdt_hdl = NULL;
-
- /* Acquire update access to the bus */
- mutex_lock(&sn_hotplug_mutex);
-
- /* is it okay to bring this slot down? */
- rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
- PCI_REQ_SLOT_ELIGIBLE);
- if (rc)
- goto leaving;
-
- /* free the ACPI resources for the slot */
- if (SN_ACPI_BASE_SUPPORT() &&
- PCI_CONTROLLER(slot->pci_bus)->companion) {
- unsigned long long adr;
- struct acpi_device *device;
- acpi_handle phandle;
- acpi_handle chandle = NULL;
- acpi_handle rethandle;
- acpi_status ret;
-
- /* Get the rootbus node pointer */
- phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
-
- acpi_scan_lock_acquire();
- /*
- * Walk the rootbus node's immediate children looking for
- * the slot's device node(s). There can be more than
- * one for multifunction devices.
- */
- for (;;) {
- rethandle = NULL;
- ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
- phandle, chandle,
- &rethandle);
-
- if (ret == AE_NOT_FOUND || rethandle == NULL)
- break;
-
- chandle = rethandle;
-
- ret = acpi_evaluate_integer(chandle,
- METHOD_NAME__ADR,
- NULL, &adr);
- if (ACPI_SUCCESS(ret) &&
- (adr>>16) == (slot->device_num + 1)) {
- /* retain the owner id */
- ssdt_hdl = chandle;
-
- ret = acpi_bus_get_device(chandle,
- &device);
- if (ACPI_SUCCESS(ret))
- acpi_bus_trim(device);
- }
- }
- acpi_scan_lock_release();
- }
-
- pci_lock_rescan_remove();
- /* Free the SN resources assigned to the Linux device.*/
- list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
- continue;
-
- pci_dev_get(dev);
- sn_bus_free_data(dev);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
- pci_unlock_rescan_remove();
-
- /* Remove the SSDT for the slot from the ACPI namespace */
- if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
- acpi_status ret;
- ret = acpi_unload_parent_table(ssdt_hdl);
- if (ACPI_FAILURE(ret)) {
- acpi_handle_err(ssdt_hdl,
- "%s: acpi_unload_parent_table failed (0x%x)\n",
- __func__, ret);
- /* try to continue on */
- }
- }
-
- /* free the collected sysdata pointers */
- sn_bus_free_sysdata();
-
- /* Deactivate slot */
- rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
- PCI_REQ_SLOT_DISABLE);
- leaving:
- /* Release the bus lock */
- mutex_unlock(&sn_hotplug_mutex);
-
- return rc;
-}
-
-static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
- u8 *value)
-{
- struct slot *slot = to_slot(bss_hotplug_slot);
- struct pcibus_info *pcibus_info;
- u32 power;
-
- pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
- mutex_lock(&sn_hotplug_mutex);
- power = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
- *value = power ? 1 : 0;
- mutex_unlock(&sn_hotplug_mutex);
- return 0;
-}
-
-static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
-{
- kfree(to_slot(bss_hotplug_slot));
-}
-
-static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
-{
- int device;
- struct pci_slot *pci_slot;
- struct hotplug_slot *bss_hotplug_slot;
- char name[SN_SLOT_NAME_SIZE];
- int rc = 0;
-
- /*
- * Currently only four devices are supported,
- * in the future there maybe more -- up to 32.
- */
-
- for (device = 0; device < SN_MAX_HP_SLOTS ; device++) {
- if (sn_pci_slot_valid(pci_bus, device) != 1)
- continue;
-
- if (sn_hp_slot_private_alloc(&bss_hotplug_slot,
- pci_bus, device, name)) {
- rc = -ENOMEM;
- goto alloc_err;
- }
- bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
-
- rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
- if (rc)
- goto register_err;
-
- pci_slot = bss_hotplug_slot->pci_slot;
- rc = sysfs_create_file(&pci_slot->kobj,
- &sn_slot_path_attr.attr);
- if (rc)
- goto alloc_err;
- }
- pci_dbg(pci_bus->self, "Registered bus with hotplug\n");
- return rc;
-
-register_err:
- pci_dbg(pci_bus->self, "bus failed to register with err = %d\n",
- rc);
-
- /* destroy THIS element */
- sn_hp_destroy();
- sn_release_slot(bss_hotplug_slot);
-
-alloc_err:
- /* destroy anything else on the list */
- while ((bss_hotplug_slot = sn_hp_destroy())) {
- pci_hp_deregister(bss_hotplug_slot);
- sn_release_slot(bss_hotplug_slot);
- }
-
- return rc;
-}
-
-static int __init sn_pci_hotplug_init(void)
-{
- struct pci_bus *pci_bus = NULL;
- int rc;
- int registered = 0;
-
- if (!sn_prom_feature_available(PRF_HOTPLUG_SUPPORT)) {
- printk(KERN_ERR "%s: PROM version does not support hotplug.\n",
- __func__);
- return -EPERM;
- }
-
- INIT_LIST_HEAD(&sn_hp_list);
-
- while ((pci_bus = pci_find_next_bus(pci_bus))) {
- if (!pci_bus->sysdata)
- continue;
-
- rc = sn_pci_bus_valid(pci_bus);
- if (rc != 1) {
- pci_dbg(pci_bus->self, "not a valid hotplug bus\n");
- continue;
- }
- pci_dbg(pci_bus->self, "valid hotplug bus\n");
-
- rc = sn_hotplug_slot_register(pci_bus);
- if (!rc) {
- registered = 1;
- } else {
- registered = 0;
- break;
- }
- }
-
- return registered == 1 ? 0 : -ENODEV;
-}
-
-static void __exit sn_pci_hotplug_exit(void)
-{
- struct hotplug_slot *bss_hotplug_slot;
-
- while ((bss_hotplug_slot = sn_hp_destroy())) {
- pci_hp_deregister(bss_hotplug_slot);
- sn_release_slot(bss_hotplug_slot);
- }
-
- if (!list_empty(&sn_hp_list))
- printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
-}
-
-module_init(sn_pci_hotplug_init);
-module_exit(sn_pci_hotplug_exit);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ed5ec1ac27..1b27b5af3d55 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1025,10 +1025,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
/*
- * Mandatory power management transition delays are
- * handled in the PCIe portdrv resume hooks.
+ * Mandatory power management transition delays, see
+ * PCI Express Base Specification Revision 2.0 Section
+ * 6.6.1: Conventional Reset. Do not delay for
+ * devices powered on/off by corresponding bridge,
+ * because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
+ if (dev->d3cold_delay && !dev->imm_ready)
+ msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -4602,16 +4607,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
-
/**
- * pcie_wait_for_link_delay - Wait until link is active or inactive
+ * pcie_wait_for_link - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
- * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
{
int timeout = 1000;
bool ret;
@@ -4648,25 +4651,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
timeout -= 10;
}
if (active && ret)
- msleep(delay);
+ msleep(100);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
-/**
- * pcie_wait_for_link - Wait until link is active or inactive
- * @pdev: Bridge device
- * @active: waiting for active or inactive?
- *
- * Use this to wait till link becomes active or inactive.
- */
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
-{
- return pcie_wait_for_link_delay(pdev, active, 100);
-}
-
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1be03a97cb92..d22d1b807701 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -497,7 +497,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
u32 service);
-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
NULL, 0644);
+/**
+ * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
+ * @pdev: Target device.
+ */
+bool pcie_aspm_enabled(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+ bool ret;
+
+ if (!bridge)
+ return false;
+
+ mutex_lock(&aspm_lock);
+ ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
+ mutex_unlock(&aspm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
+
#ifdef CONFIG_PCIEASPM_DEBUG
static ssize_t link_state_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 308c3e0c4a34..1b330129089f 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data)
return 0;
}
-static int get_downstream_delay(struct pci_bus *bus)
-{
- struct pci_dev *pdev;
- int min_delay = 100;
- int max_delay = 0;
-
- list_for_each_entry(pdev, &bus->devices, bus_list) {
- if (!pdev->imm_ready)
- min_delay = 0;
- else if (pdev->d3cold_delay < min_delay)
- min_delay = pdev->d3cold_delay;
- if (pdev->d3cold_delay > max_delay)
- max_delay = pdev->d3cold_delay;
- }
-
- return max(min_delay, max_delay);
-}
-
-/*
- * wait_for_downstream_link - Wait for downstream link to establish
- * @pdev: PCIe port whose downstream link is waited
- *
- * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
- * access to the downstream component is permitted.
- *
- * This blocks PCI core resume of the hierarchy below this port until the
- * link is trained. Should be called before resuming port services to
- * prevent pciehp from starting to tear-down the hierarchy too soon.
- */
-static void wait_for_downstream_link(struct pci_dev *pdev)
-{
- int delay;
-
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
- return;
-
- if (pci_dev_is_disconnected(pdev))
- return;
-
- if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
- !pdev->bridge_d3)
- return;
-
- delay = get_downstream_delay(pdev->subordinate);
- if (!delay)
- return;
-
- dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
-
- /*
- * If downstream port does not support speeds greater than 5 GT/s
- * need to wait 100ms. For higher speeds (gen3) we need to wait
- * first for the data link layer to become active.
- */
- if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
- msleep(delay);
- else
- pcie_wait_for_link_delay(pdev, true, delay);
-}
-
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev)
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
-
- wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
@@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
-
- wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a3c7338fad86..dbeeb385fb9f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -64,11 +64,6 @@ static struct resource *get_pci_domain_busn_res(int domain_nr)
return &r->res;
}
-static int find_anything(struct device *dev, const void *data)
-{
- return 1;
-}
-
/*
* Some device drivers need know if PCI is initiated.
* Basically, we think PCI is not initiated when there
@@ -79,7 +74,7 @@ int no_pci_devices(void)
struct device *dev;
int no_devices;
- dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
+ dev = bus_find_next_device(&pci_bus_type, NULL);
no_devices = (dev == NULL);
put_device(dev);
return no_devices;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
*/
if (ioread32(map + 0x2240c) & 0x2) {
pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
- ret = pci_reset_function(pdev);
+ ret = pci_reset_bus(pdev);
if (ret < 0)
pci_err(pdev, "Failed to reset GPU: %d\n", ret);
}
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index eb6168e6ac43..590e594092f2 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -255,8 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
switch (state->Vcc) {
case 50:
++v;
+ /* fall through */
case 33:
++v;
+ /* fall through */
case 0:
break;
default:
@@ -267,9 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
switch (state->Vpp) {
case 12:
++p;
+ /* fall through */
case 33:
case 50:
++p;
+ /* fall through */
case 0:
break;
default:
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index ec54a2aa5cb8..245d60189375 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -117,9 +117,9 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
if (card_present(i)) {
sockets[i].card_state = 3;
- dprintk(KERN_DEBUG "i82092aa: slot %i is occupied\n",i);
+ dev_dbg(&dev->dev, "i82092aa: slot %i is occupied\n", i);
} else {
- dprintk(KERN_DEBUG "i82092aa: slot %i is vacant\n",i);
+ dev_dbg(&dev->dev, "i82092aa: slot %i is vacant\n", i);
}
}
@@ -128,7 +128,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */
/* Register the interrupt handler */
- dprintk(KERN_DEBUG "Requesting interrupt %i \n",dev->irq);
+ dev_dbg(&dev->dev, "Requesting interrupt %i\n", dev->irq);
if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) {
printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq);
goto err_out_free_res;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d06b8095a19..df352b334ea7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index da71c741cb46..abcf54f7d19c 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -113,8 +113,6 @@ struct smmu_pmu {
u64 counter_mask;
u32 options;
bool global_filter;
- u32 global_filter_span;
- u32 global_filter_sid;
};
#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
smmu_pmu_set_smr(smmu_pmu, idx, sid);
}
+static bool smmu_pmu_check_global_filter(struct perf_event *curr,
+ struct perf_event *new)
+{
+ if (get_filter_enable(new) != get_filter_enable(curr))
+ return false;
+
+ if (!get_filter_enable(new))
+ return true;
+
+ return get_filter_span(new) == get_filter_span(curr) &&
+ get_filter_stream_id(new) == get_filter_stream_id(curr);
+}
+
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx)
{
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
}
/* Requested settings same as current global settings*/
- if (span == smmu_pmu->global_filter_span &&
- sid == smmu_pmu->global_filter_sid)
+ idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+ if (idx == num_ctrs ||
+ smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+ smmu_pmu_set_event_filter(event, 0, span, sid);
return 0;
+ }
- if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
- return -EAGAIN;
-
- smmu_pmu_set_event_filter(event, 0, span, sid);
- smmu_pmu->global_filter_span = span;
- smmu_pmu->global_filter_sid = sid;
- return 0;
+ return -EAGAIN;
}
static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
return idx;
}
+static bool smmu_pmu_events_compatible(struct perf_event *curr,
+ struct perf_event *new)
+{
+ if (new->pmu != curr->pmu)
+ return false;
+
+ if (to_smmu_pmu(new->pmu)->global_filter &&
+ !smmu_pmu_check_global_filter(curr, new))
+ return false;
+
+ return true;
+}
+
/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct device *dev = smmu_pmu->dev;
struct perf_event *sibling;
+ int group_num_events = 1;
u16 event_id;
if (event->attr.type != event->pmu->type)
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event)
}
/* Don't allow groups with mixed PMUs, except for s/w events */
- if (event->group_leader->pmu != event->pmu &&
- !is_software_event(event->group_leader)) {
- dev_dbg(dev, "Can't create mixed PMU group\n");
- return -EINVAL;
+ if (!is_software_event(event->group_leader)) {
+ if (!smmu_pmu_events_compatible(event->group_leader, event))
+ return -EINVAL;
+
+ if (++group_num_events > smmu_pmu->num_counters)
+ return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
- if (sibling->pmu != event->pmu &&
- !is_software_event(sibling)) {
- dev_dbg(dev, "Can't create mixed PMU group\n");
+ if (is_software_event(sibling))
+ continue;
+
+ if (!smmu_pmu_events_compatible(sibling, event))
+ return -EINVAL;
+
+ if (++group_num_events > smmu_pmu->num_counters)
return -EINVAL;
- }
}
hwc->idx = -1;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 63fe21600072..ce7345745b42 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -35,6 +35,8 @@
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
+#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
+
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define DDR_PERF_DEV_NAME "imx8_ddr"
@@ -42,11 +44,25 @@
static DEFINE_IDA(ddr_ida);
+/* DDR Perf hardware feature */
+#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+
+struct fsl_ddr_devtype_data {
+ unsigned int quirks; /* quirks needed for different DDR Perf core */
+};
+
+static const struct fsl_ddr_devtype_data imx8_devtype_data;
+
+static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_FILTER,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
- { .compatible = "fsl,imx8-ddr-pmu",},
- { .compatible = "fsl,imx8m-ddr-pmu",},
+ { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
+ { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
struct ddr_pmu {
struct pmu pmu;
@@ -57,6 +73,7 @@ struct ddr_pmu {
struct perf_event *events[NUM_COUNTERS];
int active_events;
enum cpuhp_state cpuhp_state;
+ const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
};
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
NULL,
};
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(axi_id, "config1:0-15");
+PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_axi_id.attr,
+ &format_attr_axi_mask.attr,
NULL,
};
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
}
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+ return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+ return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+ struct perf_event *b)
+{
+ if (!ddr_perf_is_filtered(a))
+ return true;
+ if (!ddr_perf_is_filtered(b))
+ return true;
+ return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
static int ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event)
!is_software_event(event->group_leader))
return -EINVAL;
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ if (!ddr_perf_filters_compatible(event, event->group_leader))
+ return -EINVAL;
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (!ddr_perf_filters_compatible(event, sibling))
+ return -EINVAL;
+ }
+ }
+
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int counter;
int cfg = event->attr.config;
+ int cfg1 = event->attr.config1;
+
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ int i;
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] &&
+ !ddr_perf_filters_compatible(event, pmu->events[i]))
+ return -EINVAL;
+ }
+
+ if (ddr_perf_is_filtered(event)) {
+ /* revert axi id masking(axi_mask) value */
+ cfg1 ^= AXI_MASKING_REVERT;
+ writel(cfg1, pmu->base + COUNTER_DPCR1);
+ }
+ }
counter = ddr_perf_alloc_counter(pmu, cfg);
if (counter < 0) {
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
+ pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DDR_CPUHP_CB_NAME,
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 6ad0823bcf23..e42d4464c2cf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 4f2917f3e25e..f28063873e11 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 9153e093f9df..078b8dc57250 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index d06182fe14b8..21d6991dbe0b 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
cluster->cluster_id = fw_cluster_id;
irq = platform_get_irq(sdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Failed to get valid irq for cluster %ld\n",
- fw_cluster_id);
+ if (irq < 0)
return irq;
- }
irq_set_status_flags(irq, IRQ_NOAUTOEN);
cluster->irq = irq;
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 3259e2ebeb39..7e328d6385c3 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (irq < 0)
return -EINVAL;
- }
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 0d9fddc498a6..c96a1afc95bd 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_ARCH_SUNXI) += allwinner/
obj-$(CONFIG_ARCH_MESON) += amlogic/
-obj-$(CONFIG_LANTIQ) += lantiq/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
@@ -19,6 +18,7 @@ obj-y += broadcom/ \
cadence/ \
freescale/ \
hisilicon/ \
+ lantiq/ \
marvell/ \
motorola/ \
mscc/ \
diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig
index eb66c857ce25..c4df9709d53f 100644
--- a/drivers/phy/lantiq/Kconfig
+++ b/drivers/phy/lantiq/Kconfig
@@ -2,6 +2,17 @@
#
# Phy drivers for Lantiq / Intel platforms
#
+config PHY_LANTIQ_VRX200_PCIE
+ tristate "Lantiq VRX200/ARX300 PCIe PHY"
+ depends on SOC_TYPE_XWAY || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Support for the PCIe PHY(s) on the Lantiq / Intel VRX200 and ARX300
+ family SoCs.
+ If unsure, say N.
+
config PHY_LANTIQ_RCU_USB2
tristate "Lantiq XWAY SoC RCU based USB PHY"
depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST)
diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile
index 540049039092..7c14eb24ab73 100644
--- a/drivers/phy/lantiq/Makefile
+++ b/drivers/phy/lantiq/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o
+obj-$(CONFIG_PHY_LANTIQ_VRX200_PCIE) += phy-lantiq-vrx200-pcie.o
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
new file mode 100644
index 000000000000..544d64a84cc0
--- /dev/null
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe PHY driver for Lantiq VRX200 and ARX300 SoCs.
+ *
+ * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on the BSP (called "UGW") driver:
+ * Copyright (C) 2009-2015 Lei Chuanhua <chuanhua.lei@lantiq.com>
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * TODO: PHY modes other than 36MHz (without "SSC")
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
+
+#define PCIE_PHY_PLL_CTRL1 0x44
+
+#define PCIE_PHY_PLL_CTRL2 0x46
+#define PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK GENMASK(7, 0)
+#define PCIE_PHY_PLL_CTRL2_CONST_SDM_EN BIT(8)
+#define PCIE_PHY_PLL_CTRL2_PLL_SDM_EN BIT(9)
+
+#define PCIE_PHY_PLL_CTRL3 0x48
+#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN BIT(1)
+#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK GENMASK(6, 4)
+
+#define PCIE_PHY_PLL_CTRL4 0x4a
+#define PCIE_PHY_PLL_CTRL5 0x4c
+#define PCIE_PHY_PLL_CTRL6 0x4e
+#define PCIE_PHY_PLL_CTRL7 0x50
+#define PCIE_PHY_PLL_A_CTRL1 0x52
+
+#define PCIE_PHY_PLL_A_CTRL2 0x54
+#define PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN BIT(14)
+
+#define PCIE_PHY_PLL_A_CTRL3 0x56
+#define PCIE_PHY_PLL_A_CTRL3_MMD_MASK GENMASK(15, 13)
+
+#define PCIE_PHY_PLL_STATUS 0x58
+
+#define PCIE_PHY_TX1_CTRL1 0x60
+#define PCIE_PHY_TX1_CTRL1_FORCE_EN BIT(3)
+#define PCIE_PHY_TX1_CTRL1_LOAD_EN BIT(4)
+
+#define PCIE_PHY_TX1_CTRL2 0x62
+#define PCIE_PHY_TX1_CTRL3 0x64
+#define PCIE_PHY_TX1_A_CTRL1 0x66
+#define PCIE_PHY_TX1_A_CTRL2 0x68
+#define PCIE_PHY_TX1_MOD1 0x6a
+#define PCIE_PHY_TX1_MOD2 0x6c
+#define PCIE_PHY_TX1_MOD3 0x6e
+
+#define PCIE_PHY_TX2_CTRL1 0x70
+#define PCIE_PHY_TX2_CTRL1_LOAD_EN BIT(4)
+
+#define PCIE_PHY_TX2_CTRL2 0x72
+#define PCIE_PHY_TX2_A_CTRL1 0x76
+#define PCIE_PHY_TX2_A_CTRL2 0x78
+#define PCIE_PHY_TX2_MOD1 0x7a
+#define PCIE_PHY_TX2_MOD2 0x7c
+#define PCIE_PHY_TX2_MOD3 0x7e
+
+#define PCIE_PHY_RX1_CTRL1 0xa0
+#define PCIE_PHY_RX1_CTRL1_LOAD_EN BIT(1)
+
+#define PCIE_PHY_RX1_CTRL2 0xa2
+#define PCIE_PHY_RX1_CDR 0xa4
+#define PCIE_PHY_RX1_EI 0xa6
+#define PCIE_PHY_RX1_A_CTRL 0xaa
+
+struct ltq_vrx200_pcie_phy_priv {
+ struct phy *phy;
+ unsigned int mode;
+ struct device *dev;
+ struct regmap *phy_regmap;
+ struct regmap *rcu_regmap;
+ struct clk *pdi_clk;
+ struct clk *phy_clk;
+ struct reset_control *phy_reset;
+ struct reset_control *pcie_reset;
+ u32 rcu_ahb_endian_offset;
+ u32 rcu_ahb_endian_big_endian_mask;
+};
+
+static void ltq_vrx200_pcie_phy_common_setup(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ /* PLL Setting */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL1, 0x120e);
+
+ /* increase the bias reference voltage */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2, 0x39d7);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3, 0x0900);
+
+ /* Endcnt */
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_EI, 0x0004);
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_A_CTRL, 0x6803);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX1_CTRL1,
+ PCIE_PHY_TX1_CTRL1_FORCE_EN,
+ PCIE_PHY_TX1_CTRL1_FORCE_EN);
+
+ /* predrv_ser_en */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL2, 0x0706);
+
+ /* ctrl_lim */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL3, 0x1fff);
+
+ /* ctrl */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL1, 0x0810);
+
+ /* predrv_ser_en */
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x7f00,
+ 0x4700);
+
+ /* RTERM */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL2, 0x2e00);
+
+ /* Improved 100MHz clock output */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_CTRL2, 0x3096);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x4707);
+
+ /* Reduced CDR BW to avoid glitches */
+ regmap_write(priv->phy_regmap, PCIE_PHY_RX1_CDR, 0x0235);
+}
+
+static void pcie_phy_36mhz_mode_setup(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
+ PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN, 0x0000);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
+ PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK, 0x0000);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_PLL_SDM_EN,
+ PCIE_PHY_PLL_CTRL2_PLL_SDM_EN);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_EN,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_EN);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3,
+ PCIE_PHY_PLL_A_CTRL3_MMD_MASK,
+ FIELD_PREP(PCIE_PHY_PLL_A_CTRL3_MMD_MASK, 0x1));
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2,
+ PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN, 0x0000);
+
+ /* const_sdm */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL1, 0x38e4);
+
+ regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
+ PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
+ FIELD_PREP(PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
+ 0xee));
+
+ /* pllmod */
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL7, 0x0002);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL6, 0x3a04);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL5, 0xfae3);
+ regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL4, 0x1b72);
+}
+
+static int ltq_vrx200_pcie_phy_wait_for_pll(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ unsigned int tmp;
+ int ret;
+
+ ret = regmap_read_poll_timeout(priv->phy_regmap, PCIE_PHY_PLL_STATUS,
+ tmp, ((tmp & 0x0070) == 0x0070), 10,
+ 10000);
+ if (ret) {
+ dev_err(priv->dev, "PLL Link timeout, PLL status = 0x%04x\n",
+ tmp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltq_vrx200_pcie_phy_apply_workarounds(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ static const struct reg_default slices[] = {
+ {
+ .reg = PCIE_PHY_TX1_CTRL1,
+ .def = PCIE_PHY_TX1_CTRL1_LOAD_EN,
+ },
+ {
+ .reg = PCIE_PHY_TX2_CTRL1,
+ .def = PCIE_PHY_TX2_CTRL1_LOAD_EN,
+ },
+ {
+ .reg = PCIE_PHY_RX1_CTRL1,
+ .def = PCIE_PHY_RX1_CTRL1_LOAD_EN,
+ }
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(slices); i++) {
+ /* enable load_en */
+ regmap_update_bits(priv->phy_regmap, slices[i].reg,
+ slices[i].def, slices[i].def);
+
+ udelay(1);
+
+ /* disable load_en */
+ regmap_update_bits(priv->phy_regmap, slices[i].reg,
+ slices[i].def, 0x0);
+ }
+
+ for (i = 0; i < 5; i++) {
+ /* TX2 modulation */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD1, 0x1ffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD2, 0xfffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0601);
+ usleep_range(1000, 2000);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0001);
+
+ /* TX1 modulation */
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD1, 0x1ffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD2, 0xfffe);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0601);
+ usleep_range(1000, 2000);
+ regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0001);
+ }
+}
+
+static int ltq_vrx200_pcie_phy_init(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ if (of_device_is_big_endian(priv->dev->of_node))
+ regmap_update_bits(priv->rcu_regmap,
+ priv->rcu_ahb_endian_offset,
+ priv->rcu_ahb_endian_big_endian_mask,
+ priv->rcu_ahb_endian_big_endian_mask);
+ else
+ regmap_update_bits(priv->rcu_regmap,
+ priv->rcu_ahb_endian_offset,
+ priv->rcu_ahb_endian_big_endian_mask, 0x0);
+
+ ret = reset_control_assert(priv->phy_reset);
+ if (ret)
+ goto err;
+
+ udelay(1);
+
+ ret = reset_control_deassert(priv->phy_reset);
+ if (ret)
+ goto err;
+
+ udelay(1);
+
+ ret = reset_control_deassert(priv->pcie_reset);
+ if (ret)
+ goto err_assert_phy_reset;
+
+ /* Make sure PHY PLL is stable */
+ usleep_range(20, 40);
+
+ return 0;
+
+err_assert_phy_reset:
+ reset_control_assert(priv->phy_reset);
+err:
+ return ret;
+}
+
+static int ltq_vrx200_pcie_phy_exit(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_assert(priv->pcie_reset);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(priv->phy_reset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Enable PDI to access PCIe PHY register */
+ ret = clk_prepare_enable(priv->pdi_clk);
+ if (ret)
+ goto err;
+
+ /* Configure PLL and PHY clock */
+ ltq_vrx200_pcie_phy_common_setup(phy);
+
+ pcie_phy_36mhz_mode_setup(phy);
+
+ /* Enable the PCIe PHY and make PLL setting take effect */
+ ret = clk_prepare_enable(priv->phy_clk);
+ if (ret)
+ goto err_disable_pdi_clk;
+
+ /* Check if we are in "startup ready" status */
+ if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ goto err_disable_phy_clk;
+
+ ltq_vrx200_pcie_phy_apply_workarounds(phy);
+
+ return 0;
+
+err_disable_phy_clk:
+ clk_disable_unprepare(priv->phy_clk);
+err_disable_pdi_clk:
+ clk_disable_unprepare(priv->pdi_clk);
+err:
+ return ret;
+}
+
+static int ltq_vrx200_pcie_phy_power_off(struct phy *phy)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->phy_clk);
+ clk_disable_unprepare(priv->pdi_clk);
+
+ return 0;
+}
+
+static struct phy_ops ltq_vrx200_pcie_phy_ops = {
+ .init = ltq_vrx200_pcie_phy_init,
+ .exit = ltq_vrx200_pcie_phy_exit,
+ .power_on = ltq_vrx200_pcie_phy_power_on,
+ .power_off = ltq_vrx200_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct ltq_vrx200_pcie_phy_priv *priv = dev_get_drvdata(dev);
+ unsigned int mode;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mode = args->args[0];
+
+ switch (mode) {
+ case LANTIQ_PCIE_PHY_MODE_36MHZ:
+ priv->mode = mode;
+ break;
+
+ case LANTIQ_PCIE_PHY_MODE_25MHZ:
+ case LANTIQ_PCIE_PHY_MODE_25MHZ_SSC:
+ case LANTIQ_PCIE_PHY_MODE_36MHZ_SSC:
+ case LANTIQ_PCIE_PHY_MODE_100MHZ:
+ case LANTIQ_PCIE_PHY_MODE_100MHZ_SSC:
+ dev_err(dev, "PHY mode not implemented yet: %u\n", mode);
+ return ERR_PTR(-EINVAL);
+
+ default:
+ dev_err(dev, "invalid PHY mode %u\n", mode);
+ return ERR_PTR(-EINVAL);
+ };
+
+ return priv->phy;
+}
+
+static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
+{
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = PCIE_PHY_RX1_A_CTRL,
+ };
+ struct ltq_vrx200_pcie_phy_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->phy_regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(priv->phy_regmap))
+ return PTR_ERR(priv->phy_regmap);
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ ret = device_property_read_u32(dev, "lantiq,rcu-endian-offset",
+ &priv->rcu_ahb_endian_offset);
+ if (ret) {
+ dev_err(dev,
+ "failed to parse the 'lantiq,rcu-endian-offset' property\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "lantiq,rcu-big-endian-mask",
+ &priv->rcu_ahb_endian_big_endian_mask);
+ if (ret) {
+ dev_err(dev,
+ "failed to parse the 'lantiq,rcu-big-endian-mask' property\n");
+ return ret;
+ }
+
+ priv->pdi_clk = devm_clk_get(dev, "pdi");
+ if (IS_ERR(priv->pdi_clk))
+ return PTR_ERR(priv->pdi_clk);
+
+ priv->phy_clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->phy_clk))
+ return PTR_ERR(priv->phy_clk);
+
+ priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(priv->phy_reset))
+ return PTR_ERR(priv->phy_reset);
+
+ priv->pcie_reset = devm_reset_control_get_shared(dev, "pcie");
+ if (IS_ERR(priv->pcie_reset))
+ return PTR_ERR(priv->pcie_reset);
+
+ priv->dev = dev;
+
+ priv->phy = devm_phy_create(dev, dev->of_node,
+ &ltq_vrx200_pcie_phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ phy_set_drvdata(priv->phy, priv);
+ dev_set_drvdata(dev, priv);
+
+ provider = devm_of_phy_provider_register(dev,
+ ltq_vrx200_pcie_phy_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id ltq_vrx200_pcie_phy_of_match[] = {
+ { .compatible = "lantiq,vrx200-pcie-phy", },
+ { .compatible = "lantiq,arx300-pcie-phy", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ltq_vrx200_pcie_phy_of_match);
+
+static struct platform_driver ltq_vrx200_pcie_phy_driver = {
+ .probe = ltq_vrx200_pcie_phy_probe,
+ .driver = {
+ .name = "ltq-vrx200-pcie-phy",
+ .of_match_table = ltq_vrx200_pcie_phy_of_match,
+ }
+};
+module_platform_driver(ltq_vrx200_pcie_phy_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Lantiq VRX200 and ARX300 PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 0e1642419c0b..4053ba6cd0fb 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -57,6 +57,7 @@ config PHY_MVEBU_CP110_COMPHY
tristate "Marvell CP110 comphy driver"
depends on ARCH_MVEBU || COMPILE_TEST
depends on OF
+ depends on HAVE_ARM_SMCCC
select GENERIC_PHY
help
This driver allows to control the comphy, an hardware block providing
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 3e00bc679d4e..6960dfd8ad8c 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -200,8 +200,10 @@ static int a38x_comphy_probe(struct platform_device *pdev)
}
phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ of_node_put(child);
return PTR_ERR(phy);
+ }
priv->lane[val].base = base + 0x28 * val;
priv->lane[val].priv = priv;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 8812a104c233..1a138be8bd6a 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -26,6 +26,7 @@
#define COMPHY_SIP_POWER_ON 0x82000001
#define COMPHY_SIP_POWER_OFF 0x82000002
#define COMPHY_SIP_PLL_LOCK 0x82000003
+#define COMPHY_FW_NOT_SUPPORTED (-1)
#define COMPHY_FW_MODE_SATA 0x1
#define COMPHY_FW_MODE_SGMII 0x2
@@ -169,6 +170,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
u32 fw_param;
int fw_mode;
+ int ret;
fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port,
lane->mode, lane->submode);
@@ -217,7 +219,12 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
return -ENOTSUPP;
}
- return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+ ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+ if (ret == COMPHY_FW_NOT_SUPPORTED)
+ dev_err(lane->dev,
+ "unsupported SMC call, try updating your firmware\n");
+
+ return ret;
}
static int mvebu_a3700_comphy_power_off(struct phy *phy)
@@ -277,13 +284,17 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
}
lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
- if (!lane)
+ if (!lane) {
+ of_node_put(child);
return -ENOMEM;
+ }
phy = devm_phy_create(&pdev->dev, child,
&mvebu_a3700_comphy_ops);
- if (IS_ERR(phy))
+ if (IS_ERR(phy)) {
+ of_node_put(child);
return PTR_ERR(phy);
+ }
lane->dev = &pdev->dev;
lane->mode = PHY_MODE_INVALID;
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index d98e0451f6a1..e3b87c94aaf6 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -5,6 +5,8 @@
* Antoine Tenart <antoine.tenart@free-electrons.com>
*/
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
@@ -22,6 +24,7 @@
#define MVEBU_COMPHY_SERDES_CFG0_PU_RX BIT(11)
#define MVEBU_COMPHY_SERDES_CFG0_PU_TX BIT(12)
#define MVEBU_COMPHY_SERDES_CFG0_HALF_BUS BIT(14)
+#define MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE BIT(15)
#define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000)
#define MVEBU_COMPHY_SERDES_CFG1_RESET BIT(3)
#define MVEBU_COMPHY_SERDES_CFG1_RX_INIT BIT(4)
@@ -77,8 +80,8 @@
#define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000)
#define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5)
#define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10)
-#define MVEBU_COMPHY_DLT_CTRL(n) (0x984 + (n) * 0x1000)
-#define MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN BIT(2)
+#define MVEBU_COMPHY_DTL_CTRL(n) (0x984 + (n) * 0x1000)
+#define MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN BIT(2)
#define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000)
#define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7)
#define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000)
@@ -111,55 +114,151 @@
#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
+#define MVEBU_COMPHY_SD1_CTRL1 0x1148
+#define MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN BIT(26)
+#define MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN BIT(27)
#define MVEBU_COMPHY_LANES 6
#define MVEBU_COMPHY_PORTS 3
+#define COMPHY_SIP_POWER_ON 0x82000001
+#define COMPHY_SIP_POWER_OFF 0x82000002
+#define COMPHY_FW_NOT_SUPPORTED (-1)
+
+/*
+ * A lane is described by the following bitfields:
+ * [ 1- 0]: COMPHY polarity invertion
+ * [ 2- 7]: COMPHY speed
+ * [ 5-11]: COMPHY port index
+ * [12-16]: COMPHY mode
+ * [17]: Clock source
+ * [18-20]: PCIe width (x1, x2, x4)
+ */
+#define COMPHY_FW_POL_OFFSET 0
+#define COMPHY_FW_POL_MASK GENMASK(1, 0)
+#define COMPHY_FW_SPEED_OFFSET 2
+#define COMPHY_FW_SPEED_MASK GENMASK(7, 2)
+#define COMPHY_FW_SPEED_MAX COMPHY_FW_SPEED_MASK
+#define COMPHY_FW_SPEED_1250 0
+#define COMPHY_FW_SPEED_3125 2
+#define COMPHY_FW_SPEED_5000 3
+#define COMPHY_FW_SPEED_103125 6
+#define COMPHY_FW_PORT_OFFSET 8
+#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
+#define COMPHY_FW_MODE_OFFSET 12
+#define COMPHY_FW_MODE_MASK GENMASK(16, 12)
+#define COMPHY_FW_WIDTH_OFFSET 18
+#define COMPHY_FW_WIDTH_MASK GENMASK(20, 18)
+
+#define COMPHY_FW_PARAM_FULL(mode, port, speed, pol, width) \
+ ((((pol) << COMPHY_FW_POL_OFFSET) & COMPHY_FW_POL_MASK) | \
+ (((mode) << COMPHY_FW_MODE_OFFSET) & COMPHY_FW_MODE_MASK) | \
+ (((port) << COMPHY_FW_PORT_OFFSET) & COMPHY_FW_PORT_MASK) | \
+ (((speed) << COMPHY_FW_SPEED_OFFSET) & COMPHY_FW_SPEED_MASK) | \
+ (((width) << COMPHY_FW_WIDTH_OFFSET) & COMPHY_FW_WIDTH_MASK))
+
+#define COMPHY_FW_PARAM(mode, port) \
+ COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_MAX, 0, 0)
+
+#define COMPHY_FW_PARAM_ETH(mode, port, speed) \
+ COMPHY_FW_PARAM_FULL(mode, port, speed, 0, 0)
+
+#define COMPHY_FW_PARAM_PCIE(mode, port, width) \
+ COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_5000, 0, width)
+
+#define COMPHY_FW_MODE_SATA 0x1
+#define COMPHY_FW_MODE_SGMII 0x2 /* SGMII 1G */
+#define COMPHY_FW_MODE_HS_SGMII 0x3 /* SGMII 2.5G */
+#define COMPHY_FW_MODE_USB3H 0x4
+#define COMPHY_FW_MODE_USB3D 0x5
+#define COMPHY_FW_MODE_PCIE 0x6
+#define COMPHY_FW_MODE_RXAUI 0x7
+#define COMPHY_FW_MODE_XFI 0x8 /* SFI: 0x9 (is treated like XFI) */
+
struct mvebu_comphy_conf {
enum phy_mode mode;
int submode;
unsigned lane;
unsigned port;
u32 mux;
+ u32 fw_mode;
};
-#define MVEBU_COMPHY_CONF(_lane, _port, _submode, _mux) \
+#define ETH_CONF(_lane, _port, _submode, _mux, _fw) \
{ \
.lane = _lane, \
.port = _port, \
.mode = PHY_MODE_ETHERNET, \
.submode = _submode, \
.mux = _mux, \
+ .fw_mode = _fw, \
+ }
+
+#define GEN_CONF(_lane, _port, _mode, _fw) \
+ { \
+ .lane = _lane, \
+ .port = _port, \
+ .mode = _mode, \
+ .submode = PHY_INTERFACE_MODE_NA, \
+ .mux = -1, \
+ .fw_mode = _fw, \
}
static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 0 */
- MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ GEN_CONF(0, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ GEN_CONF(0, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 1 */
- MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ GEN_CONF(1, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(1, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
+ GEN_CONF(1, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ GEN_CONF(1, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
/* lane 2 */
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1),
- MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
+ GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
/* lane 3 */
- MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2),
- MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2),
+ GEN_CONF(3, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(3, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(3, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 4 */
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2),
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2),
- MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2),
- MVEBU_COMPHY_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
+ GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
+ GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
- MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1),
- MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1),
+ ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
+ GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
+ ETH_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
+ ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ GEN_CONF(5, 2, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
};
struct mvebu_comphy_priv {
void __iomem *base;
struct regmap *regmap;
struct device *dev;
+ struct clk *mg_domain_clk;
+ struct clk *mg_core_clk;
+ struct clk *axi_clk;
+ unsigned long cp_phys;
};
struct mvebu_comphy_lane {
@@ -170,30 +269,59 @@ struct mvebu_comphy_lane {
int port;
};
-static int mvebu_comphy_get_mux(int lane, int port,
- enum phy_mode mode, int submode)
+static int mvebu_comphy_smc(unsigned long function, unsigned long phys,
+ unsigned long lane, unsigned long mode)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port,
+ enum phy_mode mode, int submode)
{
int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes);
+ /* Ignore PCIe submode: it represents the width */
+ bool ignore_submode = (mode == PHY_MODE_PCIE);
+ const struct mvebu_comphy_conf *conf;
/* Unused PHY mux value is 0x0 */
if (mode == PHY_MODE_INVALID)
return 0;
for (i = 0; i < n; i++) {
- if (mvebu_comphy_cp110_modes[i].lane == lane &&
- mvebu_comphy_cp110_modes[i].port == port &&
- mvebu_comphy_cp110_modes[i].mode == mode &&
- mvebu_comphy_cp110_modes[i].submode == submode)
+ conf = &mvebu_comphy_cp110_modes[i];
+ if (conf->lane == lane &&
+ conf->port == port &&
+ conf->mode == mode &&
+ (conf->submode == submode || ignore_submode))
break;
}
if (i == n)
return -EINVAL;
- return mvebu_comphy_cp110_modes[i].mux;
+ if (fw_mode)
+ return conf->fw_mode;
+ else
+ return conf->mux;
}
-static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
+static inline int mvebu_comphy_get_mux(int lane, int port,
+ enum phy_mode mode, int submode)
+{
+ return mvebu_comphy_get_mode(false, lane, port, mode, submode);
+}
+
+static inline int mvebu_comphy_get_fw_mode(int lane, int port,
+ enum phy_mode mode, int submode)
+{
+ return mvebu_comphy_get_mode(true, lane, port, mode, submode);
+}
+
+static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
{
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
@@ -210,20 +338,61 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_PU_TX |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS |
MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xf) |
- MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf));
- if (lane->submode == PHY_INTERFACE_MODE_10GKR)
+ MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf) |
+ MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
+
+ switch (lane->submode) {
+ case PHY_INTERFACE_MODE_10GKR:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
- else if (lane->submode == PHY_INTERFACE_MODE_2500BASEX)
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xb) |
+ MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xb) |
+ MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
- else if (lane->submode == PHY_INTERFACE_MODE_SGMII)
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
+ break;
+ default:
+ dev_err(priv->dev,
+ "unsupported comphy submode (%d) on lane %d\n",
+ lane->submode,
+ lane->id);
+ return -ENOTSUPP;
+ }
+
writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id));
+ if (lane->submode == PHY_INTERFACE_MODE_RXAUI) {
+ regmap_read(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, &val);
+
+ switch (lane->id) {
+ case 2:
+ case 3:
+ val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN;
+ break;
+ case 4:
+ case 5:
+ val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN;
+ break;
+ default:
+ dev_err(priv->dev,
+ "RXAUI is not supported on comphy lane %d\n",
+ lane->id);
+ return -EINVAL;
+ }
+
+ regmap_write(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, val);
+ }
+
/* reset */
val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id));
val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET |
@@ -264,6 +433,8 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
val &= ~MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x7);
val |= MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x1);
writel(val, priv->base + MVEBU_COMPHY_LOOPBACK(lane->id));
+
+ return 0;
}
static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane)
@@ -312,17 +483,20 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
+ int err;
- mvebu_comphy_ethernet_init_reset(lane);
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL;
writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
- val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
- val &= ~MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN;
- writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val &= ~MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val);
val &= ~MVEBU_COMPHY_CONF1_USB_PCIE;
@@ -337,22 +511,78 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
+static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ u32 val;
+ int err;
+
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
+
+ val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
+ val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
+ MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
+ writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
+ val |= MVEBU_COMPHY_SERDES_CFG2_DFE_EN;
+ writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
+ val |= MVEBU_COMPHY_DFE_RES_FORCE_GEN_TBL;
+ writel(val, priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
+ val &= ~MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xf);
+ val |= MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xd);
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
+ val &= ~(MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x7) |
+ MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x7));
+ val |= MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x1) |
+ MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x1) |
+ MVEBU_COMPHY_GEN1_S1_RX_DFE_EN;
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_COEF(lane->id));
+ val &= ~(MVEBU_COMPHY_COEF_DFE_EN | MVEBU_COMPHY_COEF_DFE_CTRL);
+ writel(val, priv->base + MVEBU_COMPHY_COEF(lane->id));
+
+ val = readl(priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
+ val &= ~MVEBU_COMPHY_GEN1_S4_DFE_RES(0x3);
+ val |= MVEBU_COMPHY_GEN1_S4_DFE_RES(0x1);
+ writel(val, priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
+
+ return mvebu_comphy_init_plls(lane);
+}
+
static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
u32 val;
+ int err;
- mvebu_comphy_ethernet_init_reset(lane);
+ err = mvebu_comphy_ethernet_init_reset(lane);
+ if (err)
+ return err;
val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
- val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
- val |= MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN;
- writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id));
+ val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
+ val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
+ writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
/* Speed divider */
val = readl(priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id));
@@ -476,7 +706,7 @@ static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
return mvebu_comphy_init_plls(lane);
}
-static int mvebu_comphy_power_on(struct phy *phy)
+static int mvebu_comphy_power_on_legacy(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -502,6 +732,9 @@ static int mvebu_comphy_power_on(struct phy *phy)
case PHY_INTERFACE_MODE_2500BASEX:
ret = mvebu_comphy_set_mode_sgmii(phy);
break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ ret = mvebu_comphy_set_mode_rxaui(phy);
+ break;
case PHY_INTERFACE_MODE_10GKR:
ret = mvebu_comphy_set_mode_10gkr(phy);
break;
@@ -517,26 +750,110 @@ static int mvebu_comphy_power_on(struct phy *phy)
return ret;
}
+static int mvebu_comphy_power_on(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ int fw_mode, fw_speed;
+ u32 fw_param = 0;
+ int ret;
+
+ fw_mode = mvebu_comphy_get_fw_mode(lane->id, lane->port,
+ lane->mode, lane->submode);
+ if (fw_mode < 0)
+ goto try_legacy;
+
+ /* Try SMC flow first */
+ switch (lane->mode) {
+ case PHY_MODE_ETHERNET:
+ switch (lane->submode) {
+ case PHY_INTERFACE_MODE_RXAUI:
+ dev_dbg(priv->dev, "set lane %d to RXAUI mode\n",
+ lane->id);
+ fw_speed = 0;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ dev_dbg(priv->dev, "set lane %d to 1000BASE-X mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_1250;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ dev_dbg(priv->dev, "set lane %d to 2500BASE-X mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_3125;
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_103125;
+ break;
+ default:
+ dev_err(priv->dev, "unsupported Ethernet mode (%d)\n",
+ lane->submode);
+ return -ENOTSUPP;
+ }
+ fw_param = COMPHY_FW_PARAM_ETH(fw_mode, lane->port, fw_speed);
+ break;
+ case PHY_MODE_USB_HOST_SS:
+ case PHY_MODE_USB_DEVICE_SS:
+ dev_dbg(priv->dev, "set lane %d to USB3 mode\n", lane->id);
+ fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
+ break;
+ case PHY_MODE_SATA:
+ dev_dbg(priv->dev, "set lane %d to SATA mode\n", lane->id);
+ fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
+ break;
+ case PHY_MODE_PCIE:
+ dev_dbg(priv->dev, "set lane %d to PCIe mode (x%d)\n", lane->id,
+ lane->submode);
+ fw_param = COMPHY_FW_PARAM_PCIE(fw_mode, lane->port,
+ lane->submode);
+ break;
+ default:
+ dev_err(priv->dev, "unsupported PHY mode (%d)\n", lane->mode);
+ return -ENOTSUPP;
+ }
+
+ ret = mvebu_comphy_smc(COMPHY_SIP_POWER_ON, priv->cp_phys, lane->id,
+ fw_param);
+ if (!ret)
+ return ret;
+
+ if (ret == COMPHY_FW_NOT_SUPPORTED)
+ dev_err(priv->dev,
+ "unsupported SMC call, try updating your firmware\n");
+
+ dev_warn(priv->dev,
+ "Firmware could not configure PHY %d with mode %d (ret: %d), trying legacy method\n",
+ lane->id, lane->mode, ret);
+
+try_legacy:
+ /* Fallback to Linux's implementation */
+ return mvebu_comphy_power_on_legacy(phy);
+}
+
static int mvebu_comphy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
- if (mode != PHY_MODE_ETHERNET)
- return -EINVAL;
-
if (submode == PHY_INTERFACE_MODE_1000BASEX)
submode = PHY_INTERFACE_MODE_SGMII;
- if (mvebu_comphy_get_mux(lane->id, lane->port, mode, submode) < 0)
+ if (mvebu_comphy_get_fw_mode(lane->id, lane->port, mode, submode) < 0)
return -EINVAL;
lane->mode = mode;
lane->submode = submode;
+
+ /* PCIe submode represents the width */
+ if (mode == PHY_MODE_PCIE && !lane->submode)
+ lane->submode = 1;
+
return 0;
}
-static int mvebu_comphy_power_off(struct phy *phy)
+static int mvebu_comphy_power_off_legacy(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
@@ -559,6 +876,21 @@ static int mvebu_comphy_power_off(struct phy *phy)
return 0;
}
+static int mvebu_comphy_power_off(struct phy *phy)
+{
+ struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
+ struct mvebu_comphy_priv *priv = lane->priv;
+ int ret;
+
+ ret = mvebu_comphy_smc(COMPHY_SIP_POWER_OFF, priv->cp_phys,
+ lane->id, 0);
+ if (!ret)
+ return ret;
+
+ /* Fallback to Linux's implementation */
+ return mvebu_comphy_power_off_legacy(phy);
+}
+
static const struct phy_ops mvebu_comphy_ops = {
.power_on = mvebu_comphy_power_on,
.power_off = mvebu_comphy_power_off,
@@ -585,12 +917,72 @@ static struct phy *mvebu_comphy_xlate(struct device *dev,
return phy;
}
+static int mvebu_comphy_init_clks(struct mvebu_comphy_priv *priv)
+{
+ int ret;
+
+ priv->mg_domain_clk = devm_clk_get(priv->dev, "mg_clk");
+ if (IS_ERR(priv->mg_domain_clk))
+ return PTR_ERR(priv->mg_domain_clk);
+
+ ret = clk_prepare_enable(priv->mg_domain_clk);
+ if (ret < 0)
+ return ret;
+
+ priv->mg_core_clk = devm_clk_get(priv->dev, "mg_core_clk");
+ if (IS_ERR(priv->mg_core_clk)) {
+ ret = PTR_ERR(priv->mg_core_clk);
+ goto dis_mg_domain_clk;
+ }
+
+ ret = clk_prepare_enable(priv->mg_core_clk);
+ if (ret < 0)
+ goto dis_mg_domain_clk;
+
+ priv->axi_clk = devm_clk_get(priv->dev, "axi_clk");
+ if (IS_ERR(priv->axi_clk)) {
+ ret = PTR_ERR(priv->axi_clk);
+ goto dis_mg_core_clk;
+ }
+
+ ret = clk_prepare_enable(priv->axi_clk);
+ if (ret < 0)
+ goto dis_mg_core_clk;
+
+ return 0;
+
+dis_mg_core_clk:
+ clk_disable_unprepare(priv->mg_core_clk);
+
+dis_mg_domain_clk:
+ clk_disable_unprepare(priv->mg_domain_clk);
+
+ priv->mg_domain_clk = NULL;
+ priv->mg_core_clk = NULL;
+ priv->axi_clk = NULL;
+
+ return ret;
+};
+
+static void mvebu_comphy_disable_unprepare_clks(struct mvebu_comphy_priv *priv)
+{
+ if (priv->axi_clk)
+ clk_disable_unprepare(priv->axi_clk);
+
+ if (priv->mg_core_clk)
+ clk_disable_unprepare(priv->mg_core_clk);
+
+ if (priv->mg_domain_clk)
+ clk_disable_unprepare(priv->mg_domain_clk);
+}
+
static int mvebu_comphy_probe(struct platform_device *pdev)
{
struct mvebu_comphy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
struct resource *res;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -607,10 +999,26 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ /*
+ * Ignore error if clocks have not been initialized properly for DT
+ * compatibility reasons.
+ */
+ ret = mvebu_comphy_init_clks(priv);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_warn(&pdev->dev, "cannot initialize clocks\n");
+ }
+
+ /*
+ * Hack to retrieve a physical offset relative to this CP that will be
+ * given to the firmware
+ */
+ priv->cp_phys = res->start;
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_comphy_lane *lane;
struct phy *phy;
- int ret;
u32 val;
ret = of_property_read_u32(child, "reg", &val);
@@ -626,30 +1034,45 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
}
lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
- if (!lane)
- return -ENOMEM;
+ if (!lane) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto disable_clks;
+ }
phy = devm_phy_create(&pdev->dev, child, &mvebu_comphy_ops);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
+ if (IS_ERR(phy)) {
+ of_node_put(child);
+ ret = PTR_ERR(phy);
+ goto disable_clks;
+ }
lane->priv = priv;
lane->mode = PHY_MODE_INVALID;
+ lane->submode = PHY_INTERFACE_MODE_NA;
lane->id = val;
lane->port = -1;
phy_set_drvdata(phy, lane);
/*
- * Once all modes are supported in this driver we should call
+ * All modes are supported in this driver so we could call
* mvebu_comphy_power_off(phy) here to avoid relying on the
- * bootloader/firmware configuration.
+ * bootloader/firmware configuration, but for compatibility
+ * reasons we cannot de-configure the COMPHY without being sure
+ * that the firmware is up-to-date and fully-featured.
*/
}
dev_set_drvdata(&pdev->dev, priv);
provider = devm_of_phy_provider_register(&pdev->dev,
mvebu_comphy_xlate);
+
return PTR_ERR_OR_ZERO(provider);
+
+disable_clks:
+ mvebu_comphy_disable_unprepare_clks(priv);
+
+ return ret;
}
static const struct of_device_id mvebu_comphy_of_match_table[] = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index e3880c4a15f2..b04f4fe85ac2 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -394,6 +394,16 @@ int phy_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_reset);
+/**
+ * phy_calibrate() - Tunes the phy hw parameters for current configuration
+ * @phy: the phy returned by phy_get()
+ *
+ * Used to calibrate phy hardware, typically by adjusting some parameters in
+ * runtime, which are otherwise lost after host controller reset and cannot
+ * be applied in phy_init() or phy_power_on().
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
int phy_calibrate(struct phy *phy)
{
int ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 34ff6434da8f..39e8deb8001e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -35,7 +35,7 @@
#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-/* QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
@@ -115,6 +115,7 @@ enum qphy_reg_layout {
QPHY_SW_RESET,
QPHY_START_CTRL,
QPHY_PCS_READY_STATUS,
+ QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
@@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = {
[QPHY_FLL_MAN_CODE] = 0xd4,
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_STATUS] = 0x174,
};
static const unsigned int usb3phy_regs_layout[] = {
@@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = {
[QPHY_FLL_MAN_CODE] = 0xd0,
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x17c,
+ [QPHY_PCS_STATUS] = 0x17c,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
@@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = {
static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_READY_STATUS] = 0x174,
+ [QPHY_PCS_STATUS] = 0x174,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
@@ -911,7 +912,6 @@ struct qmp_phy_cfg {
unsigned int start_ctrl;
unsigned int pwrdn_ctrl;
- unsigned int mask_pcs_ready;
unsigned int mask_com_pcs_ready;
/* true, if PHY has a separate PHY_COM control block */
@@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
.mask_com_pcs_ready = PCS_READY,
.has_phy_com_ctrl = true,
@@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
};
/* list of resets */
@@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
.has_phy_com_ctrl = false,
.has_lane_rst = false,
@@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PCS_READY,
.is_dual_lane_phy = true,
.no_pcs_sw_reset = true,
@@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
- .mask_pcs_ready = PHYSTATUS,
};
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
@@ -1279,7 +1272,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
- .mask_pcs_ready = PHYSTATUS,
.is_dual_lane_phy = true,
};
@@ -1457,7 +1449,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
void __iomem *status;
- unsigned int mask, val;
+ unsigned int mask, val, ready;
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
@@ -1545,10 +1537,17 @@ static int qcom_qmp_phy_enable(struct phy *phy)
/* start SerDes and Phy-Coding-Sublayer */
qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
- mask = cfg->mask_pcs_ready;
+ if (cfg->type == PHY_TYPE_UFS) {
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = PCS_READY;
+ ready = PCS_READY;
+ } else {
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = PHYSTATUS;
+ ready = 0;
+ }
- ret = readl_poll_timeout(status, val, val & mask, 10,
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
PHY_INIT_COMPLETE_TIMEOUT);
if (ret) {
dev_err(qmp->dev, "phy initialization timed-out\n");
@@ -2093,8 +2092,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
- pm_runtime_disable(dev);
- return ret;
+ goto err_node_put;
}
/*
@@ -2105,8 +2103,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(qmp->dev,
"failed to register pipe clock source\n");
- pm_runtime_disable(dev);
- return ret;
+ goto err_node_put;
}
id++;
}
@@ -2118,6 +2115,11 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ pm_runtime_disable(dev);
+ of_node_put(child);
+ return ret;
}
static struct platform_driver qcom_qmp_phy_driver = {
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 8ffba67568ec..b7f6b1324395 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -61,6 +61,7 @@
USB2_OBINT_IDDIGCHG)
/* VBCTRL */
+#define USB2_VBCTRL_OCCLREN BIT(16)
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
/* LINECTRL1 */
@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
writel(val, usb2_base + USB2_LINECTRL1);
val = readl(usb2_base + USB2_VBCTRL);
+ val &= ~USB2_VBCTRL_OCCLREN;
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index b10a84cab4a7..2b97fb1185a0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -198,7 +198,7 @@
#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
/* REG:0xc6 */
-#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9)
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 0)
/* REG:0xc7 */
#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index aebd216dcf2f..6c607df1dc9a 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -109,6 +109,7 @@ static struct platform_driver exynos_dp_video_phy_driver = {
.driver = {
.name = "exynos-dp-video-phy",
.of_match_table = exynos_dp_video_phy_of_match,
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_dp_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index 3784bf100b95..bb51195f189f 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -359,6 +359,7 @@ static struct platform_driver exynos_mipi_video_phy_driver = {
.driver = {
.of_match_table = exynos_mipi_video_phy_of_match,
.name = "exynos-mipi-video-phy",
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_mipi_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 1b4ba8bdb43c..659e7ae0a6cf 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -272,6 +272,7 @@ static struct platform_driver exynos_pcie_phy_driver = {
.driver = {
.of_match_table = exynos_pcie_phy_match,
.name = "exynos_pcie_phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 646259bee909..e510732afb8b 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -953,6 +953,7 @@ static struct platform_driver exynos5_usb3drd_phy = {
.driver = {
.of_match_table = exynos5_usbdrd_phy_of_match,
.name = "exynos5_usb3drd_phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 9e5fc126032c..4dd7324d91b2 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -237,6 +237,7 @@ static struct platform_driver exynos_sata_phy_driver = {
.driver = {
.of_match_table = exynos_sata_phy_of_match,
.name = "samsung,sata-phy",
+ .suppress_bind_attrs = true,
}
};
module_platform_driver(exynos_sata_phy_driver);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 6c82f4fbe8a2..090aa02e02de 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -250,6 +250,7 @@ static struct platform_driver samsung_usb2_phy_driver = {
.driver = {
.of_match_table = samsung_usb2_phy_of_match,
.name = "samsung-usb2-phy",
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index f8edd0840fa2..f14f1f053a75 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -405,6 +405,7 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
const __be32 *addr;
unsigned int reg;
struct clk *clk;
+ int ret = 0;
mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -413,34 +414,40 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
init = &mux->clk_data;
regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
- of_node_put(regmap_node);
if (!regmap_node) {
dev_err(dev, "Fail to get serdes-clk node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
regmap = syscon_node_to_regmap(regmap_node->parent);
if (IS_ERR(regmap)) {
dev_err(dev, "Fail to get Syscon regmap\n");
- return PTR_ERR(regmap);
+ ret = PTR_ERR(regmap);
+ goto out_put_node;
}
num_parents = of_clk_get_parent_count(node);
if (num_parents < 2) {
dev_err(dev, "SERDES clock must have parents\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
GFP_KERNEL);
- if (!parent_names)
- return -ENOMEM;
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
of_clk_parent_fill(node, parent_names, num_parents);
addr = of_get_address(regmap_node, 0, NULL, NULL);
- if (!addr)
- return -EINVAL;
+ if (!addr) {
+ ret = -EINVAL;
+ goto out_put_node;
+ }
reg = be32_to_cpu(*addr);
@@ -456,12 +463,16 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
mux->hw.init = init;
clk = devm_clk_register(dev, &mux->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_put_node;
+ }
am654_phy->clks[clock_num] = clk;
- return 0;
+out_put_node:
+ of_node_put(regmap_node);
+ return ret;
}
static const struct of_device_id serdes_am654_id_table[] = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 384396cbb22d..22256576b69a 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2412,7 +2412,7 @@ static const struct aspeed_pin_config aspeed_g4_configs[] = {
{ PIN_CONFIG_INPUT_DEBOUNCE, { C14, B14 }, SCUA8, 27 },
};
-static int aspeed_g4_sig_expr_set(const struct aspeed_pinmux_data *ctx,
+static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr,
bool enable)
{
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 053101f795a2..ff84d1afd229 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2507,6 +2507,88 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
{ PIN_CONFIG_INPUT_DEBOUNCE, { A20, B19 }, SCUA8, 27 },
};
+static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
+ int ip)
+{
+ if (ip == ASPEED_IP_SCU) {
+ WARN(!ctx->maps[ip], "Missing SCU syscon!");
+ return ctx->maps[ip];
+ }
+
+ if (ip >= ASPEED_NR_PINMUX_IPS)
+ return ERR_PTR(-EINVAL);
+
+ if (likely(ctx->maps[ip]))
+ return ctx->maps[ip];
+
+ if (ip == ASPEED_IP_GFX) {
+ struct device_node *node;
+ struct regmap *map;
+
+ node = of_parse_phandle(ctx->dev->of_node,
+ "aspeed,external-nodes", 0);
+ if (node) {
+ map = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(map))
+ return map;
+ } else
+ return ERR_PTR(-ENODEV);
+
+ ctx->maps[ASPEED_IP_GFX] = map;
+ dev_dbg(ctx->dev, "Acquired GFX regmap");
+ return map;
+ }
+
+ if (ip == ASPEED_IP_LPC) {
+ struct device_node *node;
+ struct regmap *map;
+
+ node = of_parse_phandle(ctx->dev->of_node,
+ "aspeed,external-nodes", 1);
+ if (node) {
+ map = syscon_node_to_regmap(node->parent);
+ of_node_put(node);
+ if (IS_ERR(map))
+ return map;
+ } else
+ return ERR_PTR(-ENODEV);
+
+ ctx->maps[ASPEED_IP_LPC] = map;
+ dev_dbg(ctx->dev, "Acquired LPC regmap");
+ return map;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr,
+ bool enabled)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < expr->ndescs; i++) {
+ const struct aspeed_sig_desc *desc = &expr->descs[i];
+ struct regmap *map;
+
+ map = aspeed_g5_acquire_regmap(ctx, desc->ip);
+ if (IS_ERR(map)) {
+ dev_err(ctx->dev,
+ "Failed to acquire regmap for IP block %d\n",
+ desc->ip);
+ return PTR_ERR(map);
+ }
+
+ ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
/**
* Configure a pin's signal by applying an expression's descriptor state for
* all descriptors in the expression.
@@ -2520,7 +2602,7 @@ static struct aspeed_pin_config aspeed_g5_configs[] = {
* Return: 0 if the expression is configured as requested and a negative error
* code otherwise
*/
-static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx,
+static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr,
bool enable)
{
@@ -2531,9 +2613,15 @@ static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
u32 val = (pattern << __ffs(desc->mask));
+ struct regmap *map;
- if (!ctx->maps[desc->ip])
- return -ENODEV;
+ map = aspeed_g5_acquire_regmap(ctx, desc->ip);
+ if (IS_ERR(map)) {
+ dev_err(ctx->dev,
+ "Failed to acquire regmap for IP block %d\n",
+ desc->ip);
+ return PTR_ERR(map);
+ }
/*
* Strap registers are configured in hardware or by early-boot
@@ -2586,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(const struct aspeed_pinmux_data *ctx,
}
static const struct aspeed_pinmux_ops aspeed_g5_ops = {
+ .eval = aspeed_g5_sig_expr_eval,
.set = aspeed_g5_sig_expr_set,
};
@@ -2641,34 +2730,11 @@ static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
static int aspeed_g5_pinctrl_probe(struct platform_device *pdev)
{
int i;
- struct regmap *map;
- struct device_node *node;
for (i = 0; i < ARRAY_SIZE(aspeed_g5_pins); i++)
aspeed_g5_pins[i].number = i;
- node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 0);
- map = syscon_node_to_regmap(node);
- of_node_put(node);
- if (IS_ERR(map)) {
- dev_warn(&pdev->dev, "No GFX phandle found, some mux configurations may fail\n");
- map = NULL;
- }
- aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_GFX] = map;
-
- node = of_parse_phandle(pdev->dev.of_node, "aspeed,external-nodes", 1);
- if (node) {
- map = syscon_node_to_regmap(node->parent);
- if (IS_ERR(map)) {
- dev_warn(&pdev->dev, "LHC parent is not a syscon, some mux configurations may fail\n");
- map = NULL;
- }
- } else {
- dev_warn(&pdev->dev, "No LHC phandle found, some mux configurations may fail\n");
- map = NULL;
- }
- of_node_put(node);
- aspeed_g5_pinctrl_data.pinmux.maps[ASPEED_IP_LPC] = map;
+ aspeed_g5_pinctrl_data.pinmux.dev = &pdev->dev;
return aspeed_pinctrl_probe(pdev, &aspeed_g5_pinctrl_desc,
&aspeed_g5_pinctrl_data);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 535db3de490b..54933665b5f8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -71,7 +71,7 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx,
+static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
int ret;
@@ -86,7 +86,7 @@ static int aspeed_sig_expr_enable(const struct aspeed_pinmux_data *ctx,
return 0;
}
-static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx,
+static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
int ret;
@@ -109,7 +109,7 @@ static int aspeed_sig_expr_disable(const struct aspeed_pinmux_data *ctx,
*
* Return: 0 if all expressions are disabled, otherwise a negative error code
*/
-static int aspeed_disable_sig(const struct aspeed_pinmux_data *ctx,
+static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr **exprs)
{
int ret = 0;
@@ -217,8 +217,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
{
int i;
int ret;
- const struct aspeed_pinctrl_data *pdata =
- pinctrl_dev_get_drvdata(pctldev);
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_group *pgroup = &pdata->pinmux.groups[group];
const struct aspeed_pin_function *pfunc =
&pdata->pinmux.functions[function];
@@ -306,8 +305,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
unsigned int offset)
{
int ret;
- const struct aspeed_pinctrl_data *pdata =
- pinctrl_dev_get_drvdata(pctldev);
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
const struct aspeed_sig_expr ***prios, **funcs, *expr;
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 5b0fe178ccf2..57305ca838a7 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -5,7 +5,7 @@
#include "pinmux-aspeed.h"
-const char *const aspeed_pinmux_ips[] = {
+static const char *const aspeed_pinmux_ips[] = {
[ASPEED_IP_SCU] = "SCU",
[ASPEED_IP_GFX] = "GFX",
[ASPEED_IP_LPC] = "LPC",
@@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
* neither the enabled nor disabled state. Thus we must explicitly test for
* either condition as required.
*/
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr, bool enabled)
{
- int i;
int ret;
+ int i;
+
+ if (ctx->ops->eval)
+ return ctx->ops->eval(ctx, expr, enabled);
for (i = 0; i < expr->ndescs; i++) {
const struct aspeed_sig_desc *desc = &expr->descs[i];
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 329d54d48667..db3457c86f48 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -702,11 +702,14 @@ struct aspeed_pin_function {
struct aspeed_pinmux_data;
struct aspeed_pinmux_ops {
- int (*set)(const struct aspeed_pinmux_data *ctx,
+ int (*eval)(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr, bool enabled);
+ int (*set)(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr, bool enabled);
};
struct aspeed_pinmux_data {
+ struct device *dev;
struct regmap *maps[ASPEED_NR_PINMUX_IPS];
const struct aspeed_pinmux_ops *ops;
@@ -721,11 +724,10 @@ struct aspeed_pinmux_data {
int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
struct regmap *map);
-int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx,
- const struct aspeed_sig_expr *expr,
- bool enabled);
+int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
+ const struct aspeed_sig_expr *expr, bool enabled);
-static inline int aspeed_sig_expr_set(const struct aspeed_pinmux_data *ctx,
+static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr,
bool enabled)
{
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index e5a112a8e067..297b7b5fcb28 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1455,6 +1455,20 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
chip->irq_eoi(data);
}
+static void byt_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ /*
+ * FIXME: currently the valid_mask is filled in as part of
+ * initializing the irq_chip below in byt_gpio_irq_init_hw().
+ * when converting this driver to the new way of passing the
+ * gpio_irq_chip along when adding the gpio_chip, move the
+ * mask initialization into this callback instead. Right now
+ * this callback is here to make sure the mask gets allocated.
+ */
+}
+
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
{
struct gpio_chip *gc = &vg->chip;
@@ -1525,7 +1539,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
- gc->irq.need_valid_mask = true;
+ gc->irq.init_valid_mask = byt_init_irq_valid_mask;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 03ec7a5d9d0b..ab681d1a3a74 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1543,6 +1543,30 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
{}
};
+static void chv_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct chv_community *community = pctrl->community;
+ int i;
+
+ /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
+ for (i = 0; i < community->npins; i++) {
+ const struct pinctrl_pin_desc *desc;
+ u32 intsel;
+
+ desc = &community->pins[i];
+
+ intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
+ intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+ if (intsel >= community->nirqs)
+ clear_bit(i, valid_mask);
+ }
+}
+
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
@@ -1557,7 +1581,8 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq.need_valid_mask = need_valid_mask;
+ if (need_valid_mask)
+ chip->irq.init_valid_mask = chv_init_irq_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@@ -1576,21 +1601,6 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
}
- /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
- for (i = 0; i < community->npins; i++) {
- const struct pinctrl_pin_desc *desc;
- u32 intsel;
-
- desc = &community->pins[i];
-
- intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
- intsel &= CHV_PADCTRL0_INTSEL_MASK;
- intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
-
- if (need_valid_mask && intsel >= community->nirqs)
- clear_bit(i, chip->irq.valid_mask);
- }
-
/*
* The same set of machines in chv_no_valid_mask[] have incorrectly
* configured GPIOs that generate spurious interrupts so we use
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index d3332da35637..dd5aa9a2dfe5 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -585,12 +585,24 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
return stmfx_function_enable(pctl->stmfx, func);
}
+static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
+ u32 n;
+
+ for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios)
+ clear_bit(n, valid_mask);
+
+ return 0;
+}
+
static int stmfx_pinctrl_probe(struct platform_device *pdev)
{
struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct stmfx_pinctrl *pctl;
- u32 n;
int irq, ret;
pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL);
@@ -650,7 +662,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
pctl->gpio_chip.can_sleep = true;
pctl->gpio_chip.of_node = np;
- pctl->gpio_chip.need_valid_mask = true;
+ pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask;
ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
if (ret) {
@@ -668,8 +680,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
- for_each_clear_bit(n, &pctl->gpio_valid_mask, pctl->gpio_chip.ngpio)
- clear_bit(n, pctl->gpio_chip.valid_mask);
ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
0, handle_bad_irq, IRQ_TYPE_NONE);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 8e14a5f2e970..fa2c87821401 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -138,6 +138,7 @@ config PINCTRL_QCOM_SPMI_PMIC
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 7f35c196bb3e..b8a1c43222f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -593,24 +593,25 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define msm_gpio_dbg_show NULL
#endif
-static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
+static int msm_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
{
- struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
int ret;
unsigned int len, i;
- unsigned int max_gpios = pctrl->soc->ngpios;
const int *reserved = pctrl->soc->reserved_gpios;
u16 *tmp;
/* Driver provided reserved list overrides DT and ACPI */
if (reserved) {
- bitmap_fill(chip->valid_mask, max_gpios);
+ bitmap_fill(valid_mask, ngpios);
for (i = 0; reserved[i] >= 0; i++) {
- if (i >= max_gpios || reserved[i] >= max_gpios) {
+ if (i >= ngpios || reserved[i] >= ngpios) {
dev_err(pctrl->dev, "invalid list of reserved GPIOs\n");
return -EINVAL;
}
- clear_bit(reserved[i], chip->valid_mask);
+ clear_bit(reserved[i], valid_mask);
}
return 0;
@@ -622,7 +623,7 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
if (ret < 0)
return 0;
- if (ret > max_gpios)
+ if (ret > ngpios)
return -EINVAL;
tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL);
@@ -635,9 +636,9 @@ static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
goto out;
}
- bitmap_zero(chip->valid_mask, max_gpios);
+ bitmap_zero(valid_mask, ngpios);
for (i = 0; i < len; i++)
- set_bit(tmp[i], chip->valid_mask);
+ set_bit(tmp[i], valid_mask);
out:
kfree(tmp);
@@ -653,7 +654,6 @@ static const struct gpio_chip msm_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
- .init_valid_mask = msm_gpio_init_valid_mask,
};
/* For dual-edge interrupts in software, since some hardware has no
@@ -1015,7 +1015,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
chip->of_node = pctrl->dev->of_node;
- chip->need_valid_mask = msm_gpio_needs_valid_mask(pctrl);
+ if (msm_gpio_needs_valid_mask(pctrl))
+ chip->init_valid_mask = msm_gpio_init_valid_mask;
pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f39da87ea185..442db15e0729 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -170,8 +170,6 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
- struct fwnode_handle *fwnode;
- struct irq_domain *domain;
};
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -751,23 +749,6 @@ static int pmic_gpio_of_xlate(struct gpio_chip *chip,
return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
}
-static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
-{
- struct pmic_gpio_state *state = gpiochip_get_data(chip);
- struct irq_fwspec fwspec;
-
- fwspec.fwnode = state->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = pin + PMIC_GPIO_PHYSICAL_OFFSET;
- /*
- * Set the type to a safe value temporarily. This will be overwritten
- * later with the proper value by irq_set_type.
- */
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
- return irq_create_fwspec_mapping(&fwspec);
-}
-
static void pmic_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct pmic_gpio_state *state = gpiochip_get_data(chip);
@@ -787,7 +768,6 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_gpio_of_xlate,
- .to_irq = pmic_gpio_to_irq,
.dbg_show = pmic_gpio_dbg_show,
};
@@ -964,46 +944,24 @@ static int pmic_gpio_domain_translate(struct irq_domain *domain,
return 0;
}
-static int pmic_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
+static unsigned int pmic_gpio_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
- struct pmic_gpio_state *state = container_of(domain->host_data,
- struct pmic_gpio_state,
- chip);
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq;
- unsigned int type;
- int ret, i;
-
- ret = pmic_gpio_domain_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
- &pmic_gpio_irq_chip, state,
- handle_level_irq, NULL, NULL);
+ return offset + PMIC_GPIO_PHYSICAL_OFFSET;
+}
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 4;
- parent_fwspec.param[0] = 0;
- parent_fwspec.param[1] = hwirq + 0xc0;
- parent_fwspec.param[2] = 0;
- parent_fwspec.param[3] = fwspec->param[1];
+static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = child_hwirq + 0xc0;
+ *parent_type = child_type;
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
+ return 0;
}
-static const struct irq_domain_ops pmic_gpio_domain_ops = {
- .activate = gpiochip_irq_domain_activate,
- .alloc = pmic_gpio_domain_alloc,
- .deactivate = gpiochip_irq_domain_deactivate,
- .free = irq_domain_free_irqs_common,
- .translate = pmic_gpio_domain_translate,
-};
-
static int pmic_gpio_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain;
@@ -1013,6 +971,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
struct pinctrl_desc *pctrldesc;
struct pmic_gpio_pad *pad, *pads;
struct pmic_gpio_state *state;
+ struct gpio_irq_chip *girq;
int ret, npins, i;
u32 reg;
@@ -1092,19 +1051,21 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- state->fwnode = of_node_to_fwnode(state->dev->of_node);
- state->domain = irq_domain_create_hierarchy(parent_domain, 0,
- state->chip.ngpio,
- state->fwnode,
- &pmic_gpio_domain_ops,
- &state->chip);
- if (!state->domain)
- return -ENODEV;
+ girq = &state->chip.irq;
+ girq->chip = &pmic_gpio_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(state->dev->of_node);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
+ girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
+ girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
ret = gpiochip_add_data(&state->chip, state);
if (ret) {
dev_err(state->dev, "can't add gpio chip\n");
- goto err_chip_add_data;
+ return ret;
}
/*
@@ -1130,8 +1091,6 @@ static int pmic_gpio_probe(struct platform_device *pdev)
err_range:
gpiochip_remove(&state->chip);
-err_chip_add_data:
- irq_domain_remove(state->domain);
return ret;
}
@@ -1140,7 +1099,6 @@ static int pmic_gpio_remove(struct platform_device *pdev)
struct pmic_gpio_state *state = platform_get_drvdata(pdev);
gpiochip_remove(&state->chip);
- irq_domain_remove(state->domain);
return 0;
}
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
*/
static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
*/
static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 006a8ff64057..714306bc3f79 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -706,7 +706,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
struct cros_ec_spi *ec_spi)
{
struct sched_param sched_priority = {
- .sched_priority = MAX_RT_PRIO - 1,
+ .sched_priority = MAX_RT_PRIO / 2,
};
int err;
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index f85a1b9d129b..706207d192ae 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -642,11 +642,8 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
priv->irq = pdata->irq;
} else {
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
- priv->irq);
+ if (priv->irq < 0)
return priv->irq;
- }
}
priv->regmap = pdata->regmap;
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 48d6f0d87583..83ed1fbf73cf 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
};
MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+ { "xo1.75-ec", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
static struct spi_driver olpc_xo175_ec_spi_driver = {
.driver = {
.name = "olpc-xo175-ec",
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 62b54e137231..60c18f21588d 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1881,52 +1881,17 @@ static int __init acer_wmi_enable_rf_button(void)
return status;
}
-#define ACER_WMID_ACCEL_HID "BST0001"
-
-static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
- void *ctx, void **retval)
-{
- struct acpi_device *dev;
-
- if (!strcmp(ctx, "SENR")) {
- if (acpi_bus_get_device(ah, &dev))
- return AE_OK;
- if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
- return AE_OK;
- } else
- return AE_OK;
-
- *(acpi_handle *)retval = ah;
-
- return AE_CTRL_TERMINATE;
-}
-
-static int __init acer_wmi_get_handle(const char *name, const char *prop,
- acpi_handle *ah)
-{
- acpi_status status;
- acpi_handle handle;
-
- BUG_ON(!name || !ah);
-
- handle = NULL;
- status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
- (void *)name, &handle);
- if (ACPI_SUCCESS(status) && handle) {
- *ah = handle;
- return 0;
- } else {
- return -ENODEV;
- }
-}
-
static int __init acer_wmi_accel_setup(void)
{
+ struct acpi_device *adev;
int err;
- err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
- if (err)
- return err;
+ adev = acpi_dev_get_first_match_dev("BST0001", NULL, -1);
+ if (!adev)
+ return -ENODEV;
+
+ gsensor_handle = acpi_device_handle(adev);
+ acpi_dev_put(adev);
interface->capability |= ACER_CAP_ACCEL;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 2ebde0174937..b361c73636a4 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -402,6 +402,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_forceals,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX430UNR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX430UNR"),
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
{},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ca28d27dae63..821b08e01635 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -26,6 +26,7 @@
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/debugfs.h>
@@ -35,6 +36,8 @@
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+
+#include <acpi/battery.h>
#include <acpi/video.h>
#include "asus-wmi.h"
@@ -65,6 +68,9 @@ MODULE_LICENSE("GPL");
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
#define ASUS_FAN_SFUN_WRITE 0x07
+
+/* Based on standard hwmon pwmX_enable values */
+#define ASUS_FAN_CTRL_FULLSPEED 0
#define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2
@@ -120,7 +126,7 @@ struct agfn_args {
} __packed;
/* struct used for calling fan read and write methods */
-struct fan_args {
+struct agfn_fan_args {
struct agfn_args agfn; /* common fields */
u8 fan; /* fan number: 0: set auto mode 1: 1st fan */
u32 speed; /* read: RPM/100 - write: 0-255 */
@@ -148,6 +154,12 @@ struct asus_rfkill {
u32 dev_id;
};
+enum fan_type {
+ FAN_TYPE_NONE = 0,
+ FAN_TYPE_AGFN, /* deprecated on newer platforms */
+ FAN_TYPE_SPEC83, /* starting in Spec 8.3, use CPU_FAN_CTRL */
+};
+
struct asus_wmi {
int dsts_id;
int spec;
@@ -178,14 +190,17 @@ struct asus_wmi {
struct asus_rfkill gps;
struct asus_rfkill uwb;
- bool asus_hwmon_fan_manual_mode;
- int asus_hwmon_num_fans;
- int asus_hwmon_pwm;
+ enum fan_type fan_type;
+ int fan_pwm_mode;
+ int agfn_pwm;
bool fan_boost_mode_available;
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ // The RSOC controls the maximum charging percentage.
+ bool battery_rsoc_available;
+
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
@@ -292,12 +307,11 @@ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
* Copy to dma capable address otherwise memory corruption occurs as
* bios has to be able to access it.
*/
- input.pointer = kzalloc(args.length, GFP_DMA | GFP_KERNEL);
+ input.pointer = kmemdup(args.pointer, args.length, GFP_DMA | GFP_KERNEL);
input.length = args.length;
if (!input.pointer)
return -ENOMEM;
phys_addr = virt_to_phys(input.pointer);
- memcpy(input.pointer, args.pointer, args.length);
status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_AGFN,
phys_addr, 0, &retval);
@@ -331,7 +345,6 @@ static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
int err;
err = asus_wmi_get_devstate(asus, dev_id, &retval);
-
if (err < 0)
return err;
@@ -352,6 +365,105 @@ static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id)
ASUS_WMI_DSTS_STATUS_BIT);
}
+static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
+{
+ u32 retval;
+ int status = asus_wmi_get_devstate(asus, dev_id, &retval);
+
+ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
+}
+
+/* Battery ********************************************************************/
+
+/* The battery maximum charging percentage */
+static int charge_end_threshold;
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value, ret, rv;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value < 0 || value > 100)
+ return -EINVAL;
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, value, &rv);
+ if (ret)
+ return ret;
+
+ if (rv != 1)
+ return -EIO;
+
+ /* There isn't any method in the DSDT to read the threshold, so we
+ * save the threshold.
+ */
+ charge_end_threshold = value;
+ return count;
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", charge_end_threshold);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static int asus_wmi_battery_add(struct power_supply *battery)
+{
+ /* The WMI method does not provide a way to specific a battery, so we
+ * just assume it is the first battery.
+ */
+ if (strcmp(battery->desc->name, "BAT0") != 0)
+ return -ENODEV;
+
+ if (device_create_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold))
+ return -ENODEV;
+
+ /* The charge threshold is only reset when the system is power cycled,
+ * and we can't get the current threshold so let set it to 100% when
+ * a battery is added.
+ */
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, 100, NULL);
+ charge_end_threshold = 100;
+
+ return 0;
+}
+
+static int asus_wmi_battery_remove(struct power_supply *battery)
+{
+ device_remove_file(&battery->dev,
+ &dev_attr_charge_control_end_threshold);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = asus_wmi_battery_add,
+ .remove_battery = asus_wmi_battery_remove,
+ .name = "ASUS Battery Extension",
+};
+
+static void asus_wmi_battery_init(struct asus_wmi *asus)
+{
+ asus->battery_rsoc_available = false;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_RSOC)) {
+ asus->battery_rsoc_available = true;
+ battery_hook_register(&battery_hook);
+ }
+}
+
+static void asus_wmi_battery_exit(struct asus_wmi *asus)
+{
+ if (asus->battery_rsoc_available)
+ battery_hook_unregister(&battery_hook);
+}
+
/* LEDs ***********************************************************************/
/*
@@ -427,15 +539,14 @@ static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
if (retval == 0x8000)
retval = 0;
- if (retval >= 0) {
- if (level)
- *level = retval & 0x7F;
- if (env)
- *env = (retval >> 8) & 0x7F;
- retval = 0;
- }
+ if (retval < 0)
+ return retval;
- return retval;
+ if (level)
+ *level = retval & 0x7F;
+ if (env)
+ *env = (retval >> 8) & 0x7F;
+ return 0;
}
static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
@@ -446,12 +557,7 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
max_level = asus->kbd_led.max_brightness;
- if (value > max_level)
- value = max_level;
- else if (value < 0)
- value = 0;
-
- asus->kbd_led_wk = value;
+ asus->kbd_led_wk = clamp_val(value, 0, max_level);
kbd_led_update(asus);
}
@@ -481,7 +587,6 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
retval = kbd_led_read(asus, &value, NULL);
-
if (retval < 0)
return retval;
@@ -497,15 +602,6 @@ static int wlan_led_unknown_state(struct asus_wmi *asus)
return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
}
-static int wlan_led_presence(struct asus_wmi *asus)
-{
- u32 result;
-
- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
-
- return result & ASUS_WMI_DSTS_PRESENCE_BIT;
-}
-
static void wlan_led_update(struct work_struct *work)
{
int ctrl_param;
@@ -572,15 +668,6 @@ static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
}
-static int lightbar_led_presence(struct asus_wmi *asus)
-{
- u32 result;
-
- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
-
- return result & ASUS_WMI_DSTS_PRESENCE_BIT;
-}
-
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
@@ -631,7 +718,8 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
- if (wlan_led_presence(asus) && (asus->driver->quirks->wapf > 0)) {
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_WIRELESS_LED)
+ && (asus->driver->quirks->wapf > 0)) {
INIT_WORK(&asus->wlan_led_work, wlan_led_update);
asus->wlan_led.name = "asus::wlan";
@@ -648,7 +736,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
- if (lightbar_led_presence(asus)) {
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_LIGHTBAR)) {
INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
asus->lightbar_led.name = "asus::lightbar";
@@ -771,16 +859,14 @@ static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
-
- if (ACPI_SUCCESS(status)) {
- status = acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- asus_rfkill_notify, asus);
- if (ACPI_FAILURE(status))
- pr_warn("Failed to register notify on %s\n", node);
- } else
+ if (ACPI_FAILURE(status))
return -ENODEV;
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ asus_rfkill_notify, asus);
+ if (ACPI_FAILURE(status))
+ pr_warn("Failed to register notify on %s\n", node);
+
return 0;
}
@@ -790,15 +876,13 @@ static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
+ if (ACPI_FAILURE(status))
+ return;
- if (ACPI_SUCCESS(status)) {
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- asus_rfkill_notify);
- if (ACPI_FAILURE(status))
- pr_err("Error removing rfkill notify handler %s\n",
- node);
- }
+ status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ asus_rfkill_notify);
+ if (ACPI_FAILURE(status))
+ pr_err("Error removing rfkill notify handler %s\n", node);
}
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
@@ -1126,10 +1210,10 @@ static void asus_wmi_set_als(void)
/* Hwmon device ***************************************************************/
-static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
+static int asus_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
int *speed)
{
- struct fan_args args = {
+ struct agfn_fan_args args = {
.agfn.len = sizeof(args),
.agfn.mfun = ASUS_FAN_MFUN,
.agfn.sfun = ASUS_FAN_SFUN_READ,
@@ -1153,10 +1237,10 @@ static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
return 0;
}
-static int asus_hwmon_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
+static int asus_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
int *speed)
{
- struct fan_args args = {
+ struct agfn_fan_args args = {
.agfn.len = sizeof(args),
.agfn.mfun = ASUS_FAN_MFUN,
.agfn.sfun = ASUS_FAN_SFUN_WRITE,
@@ -1176,7 +1260,7 @@ static int asus_hwmon_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
return -ENXIO;
if (speed && fan == 1)
- asus->asus_hwmon_pwm = *speed;
+ asus->agfn_pwm = *speed;
return 0;
}
@@ -1185,77 +1269,60 @@ static int asus_hwmon_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
* Check if we can read the speed of one fan. If true we assume we can also
* control it.
*/
-static int asus_hwmon_get_fan_number(struct asus_wmi *asus, int *num_fans)
+static bool asus_wmi_has_agfn_fan(struct asus_wmi *asus)
{
int status;
- int speed = 0;
+ int speed;
+ u32 value;
- *num_fans = 0;
+ status = asus_agfn_fan_speed_read(asus, 1, &speed);
+ if (status != 0)
+ return false;
- status = asus_hwmon_agfn_fan_speed_read(asus, 1, &speed);
- if (!status)
- *num_fans = 1;
+ status = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
+ if (status != 0)
+ return false;
- return 0;
+ /*
+ * We need to find a better way, probably using sfun,
+ * bits or spec ...
+ * Currently we disable it if:
+ * - ASUS_WMI_UNSUPPORTED_METHOD is returned
+ * - reverved bits are non-zero
+ * - sfun and presence bit are not set
+ */
+ return !(value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
+ || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)));
}
-static int asus_hwmon_fan_set_auto(struct asus_wmi *asus)
+static int asus_fan_set_auto(struct asus_wmi *asus)
{
int status;
+ u32 retval;
- status = asus_hwmon_agfn_fan_speed_write(asus, 0, NULL);
- if (status)
- return -ENXIO;
-
- asus->asus_hwmon_fan_manual_mode = false;
-
- return 0;
-}
+ switch (asus->fan_type) {
+ case FAN_TYPE_SPEC83:
+ status = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ 0, &retval);
+ if (status)
+ return status;
-static int asus_hwmon_fan_rpm_show(struct device *dev, int fan)
-{
- struct asus_wmi *asus = dev_get_drvdata(dev);
- int value;
- int ret;
+ if (retval != 1)
+ return -EIO;
+ break;
- /* no speed readable on manual mode */
- if (asus->asus_hwmon_fan_manual_mode)
- return -ENXIO;
+ case FAN_TYPE_AGFN:
+ status = asus_agfn_fan_speed_write(asus, 0, NULL);
+ if (status)
+ return -ENXIO;
+ break;
- ret = asus_hwmon_agfn_fan_speed_read(asus, fan+1, &value);
- if (ret) {
- pr_warn("reading fan speed failed: %d\n", ret);
+ default:
return -ENXIO;
}
- return value;
-}
-static void asus_hwmon_pwm_show(struct asus_wmi *asus, int fan, int *value)
-{
- int err;
-
- if (asus->asus_hwmon_pwm >= 0) {
- *value = asus->asus_hwmon_pwm;
- return;
- }
-
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, value);
- if (err < 0)
- return;
-
- *value &= 0xFF;
-
- if (*value == 1) /* Low Speed */
- *value = 85;
- else if (*value == 2)
- *value = 170;
- else if (*value == 3)
- *value = 255;
- else if (*value) {
- pr_err("Unknown fan speed %#x\n", *value);
- *value = -1;
- }
+ return 0;
}
static ssize_t pwm1_show(struct device *dev,
@@ -1263,9 +1330,33 @@ static ssize_t pwm1_show(struct device *dev,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ int err;
int value;
- asus_hwmon_pwm_show(asus, 0, &value);
+ /* If we already set a value then just return it */
+ if (asus->agfn_pwm >= 0)
+ return sprintf(buf, "%d\n", asus->agfn_pwm);
+
+ /*
+ * If we haven't set already set a value through the AGFN interface,
+ * we read a current value through the (now-deprecated) FAN_CTRL device.
+ */
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
+ if (err < 0)
+ return err;
+
+ value &= 0xFF;
+
+ if (value == 1) /* Low Speed */
+ value = 85;
+ else if (value == 2)
+ value = 170;
+ else if (value == 3)
+ value = 255;
+ else if (value) {
+ pr_err("Unknown fan speed %#x\n", value);
+ value = -1;
+ }
return sprintf(buf, "%d\n", value);
}
@@ -1279,17 +1370,16 @@ static ssize_t pwm1_store(struct device *dev,
int ret;
ret = kstrtouint(buf, 10, &value);
-
if (ret)
return ret;
value = clamp(value, 0, 255);
- state = asus_hwmon_agfn_fan_speed_write(asus, 1, &value);
+ state = asus_agfn_fan_speed_write(asus, 1, &value);
if (state)
pr_warn("Setting fan speed failed: %d\n", state);
else
- asus->asus_hwmon_fan_manual_mode = true;
+ asus->fan_pwm_mode = ASUS_FAN_CTRL_MANUAL;
return count;
}
@@ -1298,10 +1388,37 @@ static ssize_t fan1_input_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int value = asus_hwmon_fan_rpm_show(dev, 0);
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
- return sprintf(buf, "%d\n", value < 0 ? -1 : value*100);
+ switch (asus->fan_type) {
+ case FAN_TYPE_SPEC83:
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+ break;
+
+ case FAN_TYPE_AGFN:
+ /* no speed readable on manual mode */
+ if (asus->fan_pwm_mode == ASUS_FAN_CTRL_MANUAL)
+ return -ENXIO;
+
+ ret = asus_agfn_fan_speed_read(asus, 1, &value);
+ if (ret) {
+ pr_warn("reading fan speed failed: %d\n", ret);
+ return -ENXIO;
+ }
+ break;
+
+ default:
+ return -ENXIO;
+ }
+ return sprintf(buf, "%d\n", value < 0 ? -1 : value*100);
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -1310,10 +1427,16 @@ static ssize_t pwm1_enable_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
- if (asus->asus_hwmon_fan_manual_mode)
- return sprintf(buf, "%d\n", ASUS_FAN_CTRL_MANUAL);
-
- return sprintf(buf, "%d\n", ASUS_FAN_CTRL_AUTO);
+ /*
+ * Just read back the cached pwm mode.
+ *
+ * For the CPU_FAN device, the spec indicates that we should be
+ * able to read the device status and consult bit 19 to see if we
+ * are in Full On or Automatic mode. However, this does not work
+ * in practice on X532FL at least (the bit is always 0) and there's
+ * also nothing in the DSDT to indicate that this behaviour exists.
+ */
+ return sprintf(buf, "%d\n", asus->fan_pwm_mode);
}
static ssize_t pwm1_enable_store(struct device *dev,
@@ -1323,21 +1446,50 @@ static ssize_t pwm1_enable_store(struct device *dev,
struct asus_wmi *asus = dev_get_drvdata(dev);
int status = 0;
int state;
+ int value;
int ret;
+ u32 retval;
ret = kstrtouint(buf, 10, &state);
-
if (ret)
return ret;
- if (state == ASUS_FAN_CTRL_MANUAL)
- asus->asus_hwmon_fan_manual_mode = true;
- else
- status = asus_hwmon_fan_set_auto(asus);
+ if (asus->fan_type == FAN_TYPE_SPEC83) {
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (status)
- return status;
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+ } else if (asus->fan_type == FAN_TYPE_AGFN) {
+ switch (state) {
+ case ASUS_FAN_CTRL_MANUAL:
+ break;
+
+ case ASUS_FAN_CTRL_AUTO:
+ status = asus_fan_set_auto(asus);
+ if (status)
+ return status;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ asus->fan_pwm_mode = state;
return count;
}
@@ -1357,7 +1509,6 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
-
if (err < 0)
return err;
@@ -1390,59 +1541,33 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct asus_wmi *asus = dev_get_drvdata(dev->parent);
- int dev_id = -1;
- int fan_attr = -1;
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
- bool ok = true;
- if (attr == &dev_attr_pwm1.attr)
- dev_id = ASUS_WMI_DEVID_FAN_CTRL;
- else if (attr == &dev_attr_temp1_input.attr)
- dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
-
-
- if (attr == &dev_attr_fan1_input.attr
+ if (attr == &dev_attr_pwm1.attr) {
+ if (asus->fan_type != FAN_TYPE_AGFN)
+ return 0;
+ } else if (attr == &dev_attr_fan1_input.attr
|| attr == &dev_attr_fan1_label.attr
- || attr == &dev_attr_pwm1.attr
|| attr == &dev_attr_pwm1_enable.attr) {
- fan_attr = 1;
- }
-
- if (dev_id != -1) {
- int err = asus_wmi_get_devstate(asus, dev_id, &value);
+ if (asus->fan_type == FAN_TYPE_NONE)
+ return 0;
+ } else if (attr == &dev_attr_temp1_input.attr) {
+ int err = asus_wmi_get_devstate(asus,
+ ASUS_WMI_DEVID_THERMAL_CTRL,
+ &value);
- if (err < 0 && fan_attr == -1)
+ if (err < 0)
return 0; /* can't return negative here */
- }
- if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) {
- /*
- * We need to find a better way, probably using sfun,
- * bits or spec ...
- * Currently we disable it if:
- * - ASUS_WMI_UNSUPPORTED_METHOD is returned
- * - reverved bits are non-zero
- * - sfun and presence bit are not set
- */
- if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
- || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
- ok = false;
- else
- ok = fan_attr <= asus->asus_hwmon_num_fans;
- } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
/*
* If the temperature value in deci-Kelvin is near the absolute
* zero temperature, something is clearly wrong
*/
if (value == 0 || value == 1)
- ok = false;
- } else if (fan_attr <= asus->asus_hwmon_num_fans && fan_attr != -1) {
- ok = true;
- } else {
- ok = false;
+ return 0;
}
- return ok ? attr->mode : 0;
+ return attr->mode;
}
static const struct attribute_group hwmon_attribute_group = {
@@ -1468,20 +1593,19 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
static int asus_wmi_fan_init(struct asus_wmi *asus)
{
- int status;
+ asus->fan_type = FAN_TYPE_NONE;
+ asus->agfn_pwm = -1;
- asus->asus_hwmon_pwm = -1;
- asus->asus_hwmon_num_fans = -1;
- asus->asus_hwmon_fan_manual_mode = false;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+ asus->fan_type = FAN_TYPE_SPEC83;
+ else if (asus_wmi_has_agfn_fan(asus))
+ asus->fan_type = FAN_TYPE_AGFN;
- status = asus_hwmon_get_fan_number(asus, &asus->asus_hwmon_num_fans);
- if (status) {
- asus->asus_hwmon_num_fans = 0;
- pr_warn("Could not determine number of fans: %d\n", status);
- return -ENXIO;
- }
+ if (asus->fan_type == FAN_TYPE_NONE)
+ return -ENODEV;
- pr_info("Number of fans: %d\n", asus->asus_hwmon_num_fans);
+ asus_fan_set_auto(asus);
+ asus->fan_pwm_mode = ASUS_FAN_CTRL_AUTO;
return 0;
}
@@ -1523,7 +1647,6 @@ static int fan_boost_mode_write(struct asus_wmi *asus)
pr_info("Set fan boost mode: %u\n", value);
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_FAN_BOOST_MODE, value,
&retval);
-
if (err) {
pr_warn("Failed to set fan boost mode: %d\n", err);
return err;
@@ -1606,6 +1729,7 @@ static DEVICE_ATTR_RW(fan_boost_mode);
static int read_backlight_power(struct asus_wmi *asus)
{
int ret;
+
if (asus->driver->quirks->store_backlight_power)
ret = !asus->driver->panel_power;
else
@@ -1624,7 +1748,6 @@ static int read_brightness_max(struct asus_wmi *asus)
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
-
if (err < 0)
return err;
@@ -1644,7 +1767,6 @@ static int read_brightness(struct backlight_device *bd)
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
-
if (err < 0)
return err;
@@ -1734,7 +1856,6 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
return max;
power = read_backlight_power(asus);
-
if (power == -ENODEV)
power = FB_BLANK_UNBLANK;
else if (power < 0)
@@ -1900,7 +2021,6 @@ static void asus_wmi_notify(u32 value, void *context)
for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
code = asus_wmi_get_event_code(value);
-
if (code < 0) {
pr_warn("Failed to get notify code: %d\n", code);
return;
@@ -1929,7 +2049,6 @@ static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
code = asus_wmi_get_event_code(WMI_EVENT_VALUE_ATK);
-
if (code < 0) {
pr_warn("Failed to get event during flush: %d\n", code);
return code;
@@ -1945,32 +2064,25 @@ static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
/* Sysfs **********************************************************************/
-static int parse_arg(const char *buf, unsigned long count, int *val)
-{
- if (!count)
- return 0;
- if (sscanf(buf, "%i", val) != 1)
- return -EINVAL;
- return count;
-}
-
static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid,
const char *buf, size_t count)
{
u32 retval;
- int rv, err, value;
+ int err, value;
value = asus_wmi_get_devstate_simple(asus, devid);
if (value < 0)
return value;
- rv = parse_arg(buf, count, &value);
- err = asus_wmi_set_devstate(devid, value, &retval);
+ err = kstrtoint(buf, 0, &value);
+ if (err)
+ return err;
+ err = asus_wmi_set_devstate(devid, value, &retval);
if (err < 0)
return err;
- return rv;
+ return count;
}
static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
@@ -2019,8 +2131,10 @@ static ssize_t cpufv_store(struct device *dev, struct device_attribute *attr,
{
int value, rv;
- if (!count || sscanf(buf, "%i", &value) != 1)
- return -EINVAL;
+ rv = kstrtoint(buf, 0, &value);
+ if (rv)
+ return rv;
+
if (value < 0 || value > 2)
return -EINVAL;
@@ -2181,7 +2295,6 @@ static int show_dsts(struct seq_file *m, void *data)
u32 retval = -1;
err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
-
if (err < 0)
return err;
@@ -2198,7 +2311,6 @@ static int show_devs(struct seq_file *m, void *data)
err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
&retval);
-
if (err < 0)
return err;
@@ -2334,7 +2446,6 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_input;
err = asus_wmi_fan_init(asus); /* probably no problems on error */
- asus_hwmon_fan_set_auto(asus);
err = asus_wmi_hwmon_init(asus);
if (err)
@@ -2392,6 +2503,8 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_wmi_handler;
}
+ asus_wmi_battery_init(asus);
+
asus_wmi_debugfs_init(asus);
return 0;
@@ -2426,7 +2539,8 @@ static int asus_wmi_remove(struct platform_device *device)
asus_wmi_rfkill_exit(asus);
asus_wmi_debugfs_exit(asus);
asus_wmi_sysfs_exit(asus->platform_device);
- asus_hwmon_fan_set_auto(asus);
+ asus_fan_set_auto(asus);
+ asus_wmi_battery_exit(asus);
kfree(asus);
return 0;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 09dfa6f48a1a..ab610376fdad 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -226,7 +226,7 @@ static const unsigned char pwm_lookup_table[256] = {
/* General access */
static u8 ec_read_u8(u8 addr)
{
- u8 value;
+ u8 value = 0;
ec_read(addr, &value);
return value;
}
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index acc653f9c16f..6669db2555fb 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -311,11 +311,13 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
{ KE_IGNORE, 0xfff1, { KEY_RESERVED } },
/* Keyboard backlight level changed */
- { KE_IGNORE, 0x01e1, { KEY_RESERVED } },
- { KE_IGNORE, 0x02ea, { KEY_RESERVED } },
- { KE_IGNORE, 0x02eb, { KEY_RESERVED } },
- { KE_IGNORE, 0x02ec, { KEY_RESERVED } },
- { KE_IGNORE, 0x02f6, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_OFF_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_ON_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_25_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_50_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_75_TOKEN, { KEY_RESERVED } },
+ { KE_IGNORE, KBD_LED_AUTO_100_TOKEN, { KEY_RESERVED } },
};
static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code)
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 2521e45280b8..6bcbbb375401 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -502,6 +502,17 @@ static DEVICE_ATTR_RO(dock);
static DEVICE_ATTR_RO(tablet);
static DEVICE_ATTR_RW(postcode);
+static struct attribute *hp_wmi_attrs[] = {
+ &dev_attr_display.attr,
+ &dev_attr_hddtemp.attr,
+ &dev_attr_als.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_tablet.attr,
+ &dev_attr_postcode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hp_wmi);
+
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -678,16 +689,6 @@ static void hp_wmi_input_destroy(void)
input_unregister_device(hp_wmi_input_dev);
}
-static void cleanup_sysfs(struct platform_device *device)
-{
- device_remove_file(&device->dev, &dev_attr_display);
- device_remove_file(&device->dev, &dev_attr_hddtemp);
- device_remove_file(&device->dev, &dev_attr_als);
- device_remove_file(&device->dev, &dev_attr_dock);
- device_remove_file(&device->dev, &dev_attr_tablet);
- device_remove_file(&device->dev, &dev_attr_postcode);
-}
-
static int __init hp_wmi_rfkill_setup(struct platform_device *device)
{
int err, wireless;
@@ -858,8 +859,6 @@ fail:
static int __init hp_wmi_bios_setup(struct platform_device *device)
{
- int err;
-
/* clear detected rfkill devices */
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
@@ -869,35 +868,12 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
- err = device_create_file(&device->dev, &dev_attr_display);
- if (err)
- goto add_sysfs_error;
- err = device_create_file(&device->dev, &dev_attr_hddtemp);
- if (err)
- goto add_sysfs_error;
- err = device_create_file(&device->dev, &dev_attr_als);
- if (err)
- goto add_sysfs_error;
- err = device_create_file(&device->dev, &dev_attr_dock);
- if (err)
- goto add_sysfs_error;
- err = device_create_file(&device->dev, &dev_attr_tablet);
- if (err)
- goto add_sysfs_error;
- err = device_create_file(&device->dev, &dev_attr_postcode);
- if (err)
- goto add_sysfs_error;
return 0;
-
-add_sysfs_error:
- cleanup_sysfs(device);
- return err;
}
static int __exit hp_wmi_bios_remove(struct platform_device *device)
{
int i;
- cleanup_sysfs(device);
for (i = 0; i < rfkill2_count; i++) {
rfkill_unregister(rfkill2[i].rfkill);
@@ -966,6 +942,7 @@ static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
.pm = &hp_wmi_pm_ops,
+ .dev_groups = hp_wmi_groups,
},
.remove = __exit_p(hp_wmi_bios_remove),
};
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 7a2747455237..799cbe2ffcf3 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
+ AXIS_DMI_MATCH("HPZBook17G5", "HP ZBook 17 G5", x_inverted),
AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted),
{ NULL, }
/* Laptop models without axis info (yet):
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 197d8a192721..61fe341a85aa 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -81,9 +81,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- multi = devm_kmalloc(dev,
- offsetof(struct i2c_multi_inst_data, clients[ret]),
- GFP_KERNEL);
+ multi = devm_kmalloc(dev, struct_size(multi, clients, ret), GFP_KERNEL);
if (!multi)
return -ENOMEM;
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index bc0d55a59015..ef6d4bd77b1a 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -253,35 +253,45 @@ static void intel_button_array_enable(struct device *device, bool enable)
static int intel_hid_pm_prepare(struct device *device)
{
- struct intel_hid_priv *priv = dev_get_drvdata(device);
+ if (device_may_wakeup(device)) {
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
- priv->wakeup_mode = true;
+ priv->wakeup_mode = true;
+ }
return 0;
}
+static void intel_hid_pm_complete(struct device *device)
+{
+ struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+ priv->wakeup_mode = false;
+}
+
static int intel_hid_pl_suspend_handler(struct device *device)
{
- if (pm_suspend_via_firmware()) {
+ intel_button_array_enable(device, false);
+
+ if (!pm_suspend_no_platform())
intel_hid_set_enable(device, false);
- intel_button_array_enable(device, false);
- }
+
return 0;
}
static int intel_hid_pl_resume_handler(struct device *device)
{
- struct intel_hid_priv *priv = dev_get_drvdata(device);
+ intel_hid_pm_complete(device);
- priv->wakeup_mode = false;
- if (pm_resume_via_firmware()) {
+ if (!pm_suspend_no_platform())
intel_hid_set_enable(device, true);
- intel_button_array_enable(device, true);
- }
+
+ intel_button_array_enable(device, true);
return 0;
}
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
.prepare = intel_hid_pm_prepare,
+ .complete = intel_hid_pm_complete,
.freeze = intel_hid_pl_suspend_handler,
.thaw = intel_hid_pl_resume_handler,
.restore = intel_hid_pl_resume_handler,
@@ -491,6 +501,12 @@ static int intel_hid_probe(struct platform_device *device)
}
device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
return 0;
err_remove_notify:
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index a0d0cecff55f..b74932307d69 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -176,6 +176,12 @@ static int intel_vbtn_probe(struct platform_device *device)
return -EBUSY;
device_init_wakeup(&device->dev, true);
+ /*
+ * In order for system wakeup to work, the EC GPE has to be marked as
+ * a wakeup one, so do that here (this setting will persist, but it has
+ * no effect until the wakeup mask is set for the EC GPE).
+ */
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
@@ -195,22 +201,30 @@ static int intel_vbtn_remove(struct platform_device *device)
static int intel_vbtn_pm_prepare(struct device *dev)
{
- struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+ if (device_may_wakeup(dev)) {
+ struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
- priv->wakeup_mode = true;
+ priv->wakeup_mode = true;
+ }
return 0;
}
-static int intel_vbtn_pm_resume(struct device *dev)
+static void intel_vbtn_pm_complete(struct device *dev)
{
struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
priv->wakeup_mode = false;
+}
+
+static int intel_vbtn_pm_resume(struct device *dev)
+{
+ intel_vbtn_pm_complete(dev);
return 0;
}
static const struct dev_pm_ops intel_vbtn_pm_ops = {
.prepare = intel_vbtn_pm_prepare,
+ .complete = intel_vbtn_pm_complete,
.resume = intel_vbtn_pm_resume,
.restore = intel_vbtn_pm_resume,
.thaw = intel_vbtn_pm_resume,
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
index 951c105bafc1..7ccf583649e6 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -60,11 +60,8 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev)
wctmu->regmap = pmic->regmap;
irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- dev_err(&pdev->dev, "invalid irq %d\n", irq);
+ if (irq < 0)
return irq;
- }
regmap_irq_chip = pmic->irq_chip_data_tmu;
virq = regmap_irq_get_virq(regmap_irq_chip, irq);
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 4fbdff48a4b5..1d5d877b9582 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -34,7 +34,6 @@ enum {
INT33FE_NODE_MAX17047,
INT33FE_NODE_PI3USB30532,
INT33FE_NODE_DISPLAYPORT,
- INT33FE_NODE_ROLE_SWITCH,
INT33FE_NODE_USB_CONNECTOR,
INT33FE_NODE_MAX,
};
@@ -45,7 +44,6 @@ struct cht_int33fe_data {
struct i2c_client *pi3usb30532;
struct fwnode_handle *dp;
- struct fwnode_handle *mux;
};
static const struct software_node nodes[];
@@ -139,46 +137,10 @@ static const struct software_node nodes[] = {
{ "max17047", NULL, max17047_props },
{ "pi3usb30532" },
{ "displayport" },
- { "usb-role-switch" },
{ "connector", &nodes[0], usb_connector_props, usb_connector_refs },
{ }
};
-static int cht_int33fe_setup_mux(struct cht_int33fe_data *data)
-{
- struct fwnode_handle *fwnode;
- struct device *dev;
- struct device *p;
-
- fwnode = software_node_fwnode(&nodes[INT33FE_NODE_ROLE_SWITCH]);
- if (!fwnode)
- return -ENODEV;
-
- /* First finding the platform device */
- p = bus_find_device_by_name(&platform_bus_type, NULL,
- "intel_xhci_usb_sw");
- if (!p)
- return -EPROBE_DEFER;
-
- /* Then the mux child device */
- dev = device_find_child_by_name(p, "intel_xhci_usb_sw-role-switch");
- put_device(p);
- if (!dev)
- return -EPROBE_DEFER;
-
- /* If there already is a node for the mux, using that one. */
- if (dev->fwnode)
- fwnode_remove_software_node(fwnode);
- else
- dev->fwnode = fwnode;
-
- data->mux = fwnode_handle_get(dev->fwnode);
- put_device(dev);
- mux_ref.node = to_software_node(data->mux);
-
- return 0;
-}
-
static int cht_int33fe_setup_dp(struct cht_int33fe_data *data)
{
struct fwnode_handle *fwnode;
@@ -211,10 +173,9 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data)
{
software_node_unregister_nodes(nodes);
- if (data->mux) {
- fwnode_handle_put(data->mux);
+ if (mux_ref.node) {
+ fwnode_handle_put(software_node_fwnode(mux_ref.node));
mux_ref.node = NULL;
- data->mux = NULL;
}
if (data->dp) {
@@ -235,14 +196,16 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
/* The devices that are not created in this driver need extra steps. */
/*
- * There is no ACPI device node for the USB role mux, so we need to find
- * the mux device and assign our node directly to it. That means we
- * depend on the mux driver. This function will return -PROBE_DEFER
- * until the mux device is registered.
+ * There is no ACPI device node for the USB role mux, so we need to wait
+ * until the mux driver has created software node for the mux device.
+ * It means we depend on the mux driver. This function will return
+ * -EPROBE_DEFER until the mux device is registered.
*/
- ret = cht_int33fe_setup_mux(data);
- if (ret)
+ mux_ref.node = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!mux_ref.node) {
+ ret = -EPROBE_DEFER;
goto err_remove_nodes;
+ }
/*
* The DP connector does have ACPI device node. In this case we can just
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index d9542c661ddc..af233b7b77f2 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -122,7 +122,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
generic_handle_irq(irq_find_mapping(chip->irq.domain,
GPE0A_PME_B0_VIRT_GPIO_PIN));
- pm_system_wakeup();
+ pm_wakeup_hard_event(chip->parent);
return IRQ_HANDLED;
}
@@ -144,6 +144,7 @@ static struct irq_chip int0002_cht_irqchip = {
* No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
* and we don't want to mess with the ACPI SCI irq settings.
*/
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static const struct x86_cpu_id int0002_cpu_ids[] = {
@@ -152,6 +153,13 @@ static const struct x86_cpu_id int0002_cpu_ids[] = {
{}
};
+static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ bitmap_clear(valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+}
+
static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -166,10 +174,8 @@ static int int0002_probe(struct platform_device *pdev)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Error getting IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -184,7 +190,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
- chip->irq.need_valid_mask = true;
+ chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
if (ret) {
@@ -192,8 +198,6 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- bitmap_clear(chip->irq.valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
-
/*
* We manually request the irq here instead of passing a flow-handler
* to gpiochip_set_chained_irqchip, because the irq is shared.
@@ -216,6 +220,13 @@ static int int0002_probe(struct platform_device *pdev)
gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
+ device_init_wakeup(dev, true);
+ return 0;
+}
+
+static int int0002_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, false);
return 0;
}
@@ -231,6 +242,7 @@ static struct platform_driver int0002_driver = {
.acpi_match_table = int0002_acpi_ids,
},
.probe = int0002_probe,
+ .remove = int0002_remove,
};
module_platform_driver(int0002_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 235c0b89f824..94a008efb09b 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -806,12 +806,13 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- INTEL_CPU_FAM6(SKYLAKE_MOBILE, spt_reg_map),
- INTEL_CPU_FAM6(SKYLAKE_DESKTOP, spt_reg_map),
- INTEL_CPU_FAM6(KABYLAKE_MOBILE, spt_reg_map),
- INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
- INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
- INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
+ INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
+ INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
+ INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
+ INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
+ INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
{}
};
@@ -877,10 +878,14 @@ static int pmc_core_probe(struct platform_device *pdev)
if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
pmcdev->map = &cnp_reg_map;
- if (lpit_read_residency_count_address(&slp_s0_addr))
+ if (lpit_read_residency_count_address(&slp_s0_addr)) {
pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
- else
+
+ if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
+ return -ENODEV;
+ } else {
pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
+ }
pmcdev->regbase = ioremap(pmcdev->base_addr,
pmcdev->map->regmap_length);
diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c
index a8754a6db1b8..6fe829f30997 100644
--- a/drivers/platform/x86/intel_pmc_core_pltdrv.c
+++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c
@@ -18,8 +18,16 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+static void intel_pmc_core_release(struct device *dev)
+{
+ /* Nothing to do. */
+}
+
static struct platform_device pmc_core_device = {
.name = "intel_pmc_core",
+ .dev = {
+ .release = intel_pmc_core_release,
+ },
};
/*
@@ -30,12 +38,12 @@ static struct platform_device pmc_core_device = {
* other list may grow, but this list should not.
*/
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
- INTEL_CPU_FAM6(SKYLAKE_MOBILE, pmc_core_device),
- INTEL_CPU_FAM6(SKYLAKE_DESKTOP, pmc_core_device),
- INTEL_CPU_FAM6(KABYLAKE_MOBILE, pmc_core_device),
- INTEL_CPU_FAM6(KABYLAKE_DESKTOP, pmc_core_device),
- INTEL_CPU_FAM6(CANNONLAKE_MOBILE, pmc_core_device),
- INTEL_CPU_FAM6(ICELAKE_MOBILE, pmc_core_device),
+ INTEL_CPU_FAM6(SKYLAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(SKYLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(KABYLAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
+ INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
+ INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 55037ff258f8..5c1da2bb1435 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -936,10 +936,8 @@ static int ipc_plat_probe(struct platform_device *pdev)
spin_lock_init(&ipcdev.gcr_lock);
ipcdev.irq = platform_get_irq(pdev, 0);
- if (ipcdev.irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq\n");
+ if (ipcdev.irq < 0)
return -EINVAL;
- }
ret = ipc_plat_get_res(pdev);
if (ret) {
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
index 68d75391db57..3de5a3c66529 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c
@@ -29,6 +29,8 @@ static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
static int punit_msr_white_list[] = {
MSR_TURBO_RATIO_LIMIT,
MSR_CONFIG_TDP_CONTROL,
+ MSR_TURBO_RATIO_LIMIT1,
+ MSR_TURBO_RATIO_LIMIT2,
};
struct isst_valid_cmd_ranges {
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
index f7266a115a08..ad8c7c0df4d9 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
@@ -132,11 +132,9 @@ static void isst_if_remove(struct pci_dev *pdev)
static int __maybe_unused isst_if_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct isst_if_device *punit_dev;
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
int i;
- punit_dev = pci_get_drvdata(pdev);
for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
punit_dev->range_0[i] = readl(punit_dev->punit_mmio +
mmio_range[0].beg + 4 * i);
@@ -149,11 +147,9 @@ static int __maybe_unused isst_if_suspend(struct device *device)
static int __maybe_unused isst_if_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct isst_if_device *punit_dev;
+ struct isst_if_device *punit_dev = dev_get_drvdata(device);
int i;
- punit_dev = pci_get_drvdata(pdev);
for (i = 0; i < ARRAY_SIZE(punit_dev->range_0); ++i)
writel(punit_dev->range_0[i], punit_dev->punit_mmio +
mmio_range[0].beg + 4 * i);
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index b0d3110ae378..48b112b4f0b0 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -32,6 +32,8 @@
#define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1
#define APU2_GPIO_REG_MODESW AMD_FCH_GPIO_REG_GPIO32_GE1
#define APU2_GPIO_REG_SIMSWAP AMD_FCH_GPIO_REG_GPIO33_GE2
+#define APU2_GPIO_REG_MPCIE2 AMD_FCH_GPIO_REG_GPIO59_DEVSLP0
+#define APU2_GPIO_REG_MPCIE3 AMD_FCH_GPIO_REG_GPIO51
/* order in which the gpio lines are defined in the register list */
#define APU2_GPIO_LINE_LED1 0
@@ -39,6 +41,8 @@
#define APU2_GPIO_LINE_LED3 2
#define APU2_GPIO_LINE_MODESW 3
#define APU2_GPIO_LINE_SIMSWAP 4
+#define APU2_GPIO_LINE_MPCIE2 5
+#define APU2_GPIO_LINE_MPCIE3 6
/* gpio device */
@@ -48,6 +52,8 @@ static int apu2_gpio_regs[] = {
[APU2_GPIO_LINE_LED3] = APU2_GPIO_REG_LED3,
[APU2_GPIO_LINE_MODESW] = APU2_GPIO_REG_MODESW,
[APU2_GPIO_LINE_SIMSWAP] = APU2_GPIO_REG_SIMSWAP,
+ [APU2_GPIO_LINE_MPCIE2] = APU2_GPIO_REG_MPCIE2,
+ [APU2_GPIO_LINE_MPCIE3] = APU2_GPIO_REG_MPCIE3,
};
static const char * const apu2_gpio_names[] = {
@@ -56,6 +62,8 @@ static const char * const apu2_gpio_names[] = {
[APU2_GPIO_LINE_LED3] = "front-led3",
[APU2_GPIO_LINE_MODESW] = "front-button",
[APU2_GPIO_LINE_SIMSWAP] = "simswap",
+ [APU2_GPIO_LINE_MPCIE2] = "mpcie2_reset",
+ [APU2_GPIO_LINE_MPCIE3] = "mpcie3_reset",
};
static const struct amd_fch_gpio_pdata board_apu2 = {
@@ -69,7 +77,8 @@ static const struct amd_fch_gpio_pdata board_apu2 = {
static const struct gpio_led apu2_leds[] = {
{ .name = "apu:green:1" },
{ .name = "apu:green:2" },
- { .name = "apu:green:3" }
+ { .name = "apu:green:3" },
+ { .name = "apu:simswap" },
};
static const struct gpio_led_platform_data apu2_leds_pdata = {
@@ -86,6 +95,8 @@ static struct gpiod_lookup_table gpios_led_table = {
NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_REG_SIMSWAP,
+ NULL, 3, GPIO_ACTIVE_LOW),
}
};
@@ -93,7 +104,7 @@ static struct gpiod_lookup_table gpios_led_table = {
static struct gpio_keys_button apu2_keys_buttons[] = {
{
- .code = KEY_SETUP,
+ .code = KEY_RESTART,
.active_low = 1,
.desc = "front button",
.type = EV_KEY,
@@ -255,6 +266,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
MODULE_ALIAS("platform:pcengines-apuv2");
-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index aa53648a2214..9aca5e7ce6d0 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -415,6 +415,13 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
},
},
+ {
+ .ident = "SIMATIC IPC227E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
+ },
+ },
{ /*sentinel*/ }
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7bde4640ef34..da794dcfdd92 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3647,22 +3647,19 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
/* Set up key map */
- hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
- GFP_KERNEL);
- if (!hotkey_keycode_map) {
- pr_err("failed to allocate memory for key map\n");
- res = -ENOMEM;
- goto err_exit;
- }
-
keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
ARRAY_SIZE(tpacpi_keymap_qtable));
BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"using keymap number %lu\n", keymap_id);
- memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id],
- TPACPI_HOTKEY_MAP_SIZE);
+ hotkey_keycode_map = kmemdup(&tpacpi_keymaps[keymap_id],
+ TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL);
+ if (!hotkey_keycode_map) {
+ pr_err("failed to allocate memory for key map\n");
+ res = -ENOMEM;
+ goto err_exit;
+ }
input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
@@ -9714,6 +9711,107 @@ static struct ibm_struct battery_driver_data = {
.exit = tpacpi_battery_exit,
};
+/*************************************************************************
+ * LCD Shadow subdriver, for the Lenovo PrivacyGuard feature
+ */
+
+static int lcdshadow_state;
+
+static int lcdshadow_on_off(bool state)
+{
+ acpi_handle set_shadow_handle;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "SSSS", &set_shadow_handle))) {
+ pr_warn("Thinkpad ACPI has no %s interface.\n", "SSSS");
+ return -EIO;
+ }
+
+ if (!acpi_evalf(set_shadow_handle, &output, NULL, "dd", (int)state))
+ return -EIO;
+
+ lcdshadow_state = state;
+ return 0;
+}
+
+static int lcdshadow_set(bool on)
+{
+ if (lcdshadow_state < 0)
+ return lcdshadow_state;
+ if (lcdshadow_state == on)
+ return 0;
+ return lcdshadow_on_off(on);
+}
+
+static int tpacpi_lcdshadow_init(struct ibm_init_struct *iibm)
+{
+ acpi_handle get_shadow_handle;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GSSS", &get_shadow_handle))) {
+ lcdshadow_state = -ENODEV;
+ return 0;
+ }
+
+ if (!acpi_evalf(get_shadow_handle, &output, NULL, "dd", 0)) {
+ lcdshadow_state = -EIO;
+ return -EIO;
+ }
+ if (!(output & 0x10000)) {
+ lcdshadow_state = -ENODEV;
+ return 0;
+ }
+ lcdshadow_state = output & 0x1;
+
+ return 0;
+}
+
+static void lcdshadow_resume(void)
+{
+ if (lcdshadow_state >= 0)
+ lcdshadow_on_off(lcdshadow_state);
+}
+
+static int lcdshadow_read(struct seq_file *m)
+{
+ if (lcdshadow_state < 0) {
+ seq_puts(m, "status:\t\tnot supported\n");
+ } else {
+ seq_printf(m, "status:\t\t%d\n", lcdshadow_state);
+ seq_puts(m, "commands:\t0, 1\n");
+ }
+
+ return 0;
+}
+
+static int lcdshadow_write(char *buf)
+{
+ char *cmd;
+ int state = -1;
+
+ if (lcdshadow_state < 0)
+ return -ENODEV;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (strlencmp(cmd, "0") == 0)
+ state = 0;
+ else if (strlencmp(cmd, "1") == 0)
+ state = 1;
+ }
+
+ if (state == -1)
+ return -EINVAL;
+
+ return lcdshadow_set(state);
+}
+
+static struct ibm_struct lcdshadow_driver_data = {
+ .name = "lcdshadow",
+ .resume = lcdshadow_resume,
+ .read = lcdshadow_read,
+ .write = lcdshadow_write,
+};
+
/****************************************************************************
****************************************************************************
*
@@ -10195,6 +10293,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.init = tpacpi_battery_init,
.data = &battery_driver_data,
},
+ {
+ .init = tpacpi_lcdshadow_init,
+ .data = &lcdshadow_driver_data,
+ },
};
static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 4370e4add83a..1c7d8324ff5c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -136,6 +136,22 @@ static const struct ts_dmi_data chuwi_vi10_data = {
.properties = chuwi_vi10_props,
};
+static const struct property_entry chuwi_surbook_mini_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 88),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_surbook_mini_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_surbook_mini_props,
+};
+
static const struct property_entry connect_tablet9_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 9),
PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
@@ -230,6 +246,24 @@ static const struct ts_dmi_data gp_electronic_t701_data = {
.properties = gp_electronic_t701_props,
};
+static const struct property_entry irbis_tw90_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data irbis_tw90_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = irbis_tw90_props,
+};
+
static const struct property_entry itworks_tw891_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
@@ -647,6 +681,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Surbook Mini (CWI540) */
+ .driver_data = (void *)&chuwi_surbook_mini_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C3W6_AP108_4G"),
+ },
+ },
+ {
/* Connect Tablet 9 */
.driver_data = (void *)&connect_tablet9_data,
.matches = {
@@ -709,6 +751,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Irbis TW90 */
+ .driver_data = (void *)&irbis_tw90_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW90"),
+ },
+ },
+ {
/* I.T.Works TW891 */
.driver_data = (void *)&itworks_tw891_data,
.matches = {
@@ -884,6 +934,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Trekstor Primebook C11B (same touchscreen as the C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMEBOOK C11B"),
+ },
+ },
+ {
/* Trekstor Primebook C13 */
.driver_data = (void *)&trekstor_primebook_c13_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 784cea8572c2..59e9aa0f9643 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -340,9 +340,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
* expensive, but have no corresponding WCxx method. So we
* should not fail if this happens.
*/
- if (acpi_has_method(handle, wc_method))
- wc_status = acpi_execute_simple_method(handle,
- wc_method, 1);
+ wc_status = acpi_execute_simple_method(handle, wc_method, 1);
}
strcpy(method, "WQ");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
USB_CH_IP_CUR_LVL_1P5;
break;
}
+ /* Else, fall through */
case USB_STAT_HM_IDGND:
dev_err(di->dev, "USB Type - Charging not allowed\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 24ff2a068978..cd1270614cc6 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -59,14 +59,14 @@ struct idle_inject_thread {
/**
* struct idle_inject_device - idle injection data
* @timer: idle injection period timer
- * @idle_duration_ms: duration of CPU idle time to inject
- * @run_duration_ms: duration of CPU run time to allow
+ * @idle_duration_us: duration of CPU idle time to inject
+ * @run_duration_us: duration of CPU run time to allow
* @cpumask: mask of CPUs affected by idle injection
*/
struct idle_inject_device {
struct hrtimer timer;
- unsigned int idle_duration_ms;
- unsigned int run_duration_ms;
+ unsigned int idle_duration_us;
+ unsigned int run_duration_us;
unsigned long int cpumask[0];
};
@@ -104,16 +104,16 @@ static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
*/
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{
- unsigned int duration_ms;
+ unsigned int duration_us;
struct idle_inject_device *ii_dev =
container_of(timer, struct idle_inject_device, timer);
- duration_ms = READ_ONCE(ii_dev->run_duration_ms);
- duration_ms += READ_ONCE(ii_dev->idle_duration_ms);
+ duration_us = READ_ONCE(ii_dev->run_duration_us);
+ duration_us += READ_ONCE(ii_dev->idle_duration_us);
idle_inject_wakeup(ii_dev);
- hrtimer_forward_now(timer, ms_to_ktime(duration_ms));
+ hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
@@ -138,35 +138,35 @@ static void idle_inject_fn(unsigned int cpu)
*/
iit->should_run = 0;
- play_idle(READ_ONCE(ii_dev->idle_duration_ms));
+ play_idle(READ_ONCE(ii_dev->idle_duration_us));
}
/**
* idle_inject_set_duration - idle and run duration update helper
- * @run_duration_ms: CPU run time to allow in milliseconds
- * @idle_duration_ms: CPU idle time to inject in milliseconds
+ * @run_duration_us: CPU run time to allow in microseconds
+ * @idle_duration_us: CPU idle time to inject in microseconds
*/
void idle_inject_set_duration(struct idle_inject_device *ii_dev,
- unsigned int run_duration_ms,
- unsigned int idle_duration_ms)
+ unsigned int run_duration_us,
+ unsigned int idle_duration_us)
{
- if (run_duration_ms && idle_duration_ms) {
- WRITE_ONCE(ii_dev->run_duration_ms, run_duration_ms);
- WRITE_ONCE(ii_dev->idle_duration_ms, idle_duration_ms);
+ if (run_duration_us && idle_duration_us) {
+ WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
+ WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
}
}
/**
* idle_inject_get_duration - idle and run duration retrieval helper
- * @run_duration_ms: memory location to store the current CPU run time
- * @idle_duration_ms: memory location to store the current CPU idle time
+ * @run_duration_us: memory location to store the current CPU run time
+ * @idle_duration_us: memory location to store the current CPU idle time
*/
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
- unsigned int *run_duration_ms,
- unsigned int *idle_duration_ms)
+ unsigned int *run_duration_us,
+ unsigned int *idle_duration_us)
{
- *run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
- *idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
+ *run_duration_us = READ_ONCE(ii_dev->run_duration_us);
+ *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
}
/**
@@ -181,10 +181,10 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev,
*/
int idle_inject_start(struct idle_inject_device *ii_dev)
{
- unsigned int idle_duration_ms = READ_ONCE(ii_dev->idle_duration_ms);
- unsigned int run_duration_ms = READ_ONCE(ii_dev->run_duration_ms);
+ unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
+ unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
- if (!idle_duration_ms || !run_duration_ms)
+ if (!idle_duration_us || !run_duration_us)
return -EINVAL;
pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
@@ -193,7 +193,8 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
idle_inject_wakeup(ii_dev);
hrtimer_start(&ii_dev->timer,
- ms_to_ktime(idle_duration_ms + run_duration_ms),
+ ns_to_ktime((idle_duration_us + run_duration_us) *
+ NSEC_PER_USEC),
HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 9fd6dd342169..94ddd7d659c8 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -957,27 +957,27 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core),
INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_CORE, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_ULT, rapl_defaults_core),
- INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL_G, rapl_defaults_core),
INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(BROADWELL_CORE, rapl_defaults_core),
- INTEL_CPU_FAM6(BROADWELL_GT3E, rapl_defaults_core),
- INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL_D, rapl_defaults_core),
INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(SKYLAKE_DESKTOP, rapl_defaults_core),
- INTEL_CPU_FAM6(SKYLAKE_MOBILE, rapl_defaults_core),
+ INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(SKYLAKE_L, rapl_defaults_core),
INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core),
- INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core),
- INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core),
- INTEL_CPU_FAM6(ICELAKE_DESKTOP, rapl_defaults_core),
+ INTEL_CPU_FAM6(KABYLAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core),
+ INTEL_CPU_FAM6(CANNONLAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(ICELAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core),
INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
- INTEL_CPU_FAM6(ICELAKE_XEON_D, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -985,8 +985,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann),
INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core),
- INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
@@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void)
unregister_pm_notifier(&rapl_pm_notifier);
}
-module_init(rapl_init);
+fs_initcall(rapl_init);
module_exit(rapl_exit);
MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 540e8aafc990..f808c5fa9838 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -671,7 +671,7 @@ static int __init powercap_init(void)
return class_register(&powercap_class);
}
-device_initcall(powercap_init);
+fs_initcall(powercap_init);
MODULE_DESCRIPTION("PowerCap sysfs Driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index c3ab07ab31a9..8edfac17364e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -882,8 +882,11 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
return of_pwm_get(dev, dev->of_node, con_id);
/* then lookup via ACPI */
- if (dev && is_acpi_node(dev->fwnode))
- return acpi_pwm_get(dev->fwnode);
+ if (dev && is_acpi_node(dev->fwnode)) {
+ pwm = acpi_pwm_get(dev->fwnode);
+ if (!IS_ERR(pwm) || PTR_ERR(pwm) != -ENOENT)
+ return pwm;
+ }
/*
* We look up the provider in the static table typically provided by
diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile
index ef6777e14d3d..6f0404f50107 100644
--- a/drivers/ras/Makefile
+++ b/drivers/ras/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RAS) += ras.o debugfs.o
+obj-$(CONFIG_RAS) += ras.o
+obj-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_RAS_CEC) += cec.o
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 5d545806d930..c09cf55e2d20 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -4,6 +4,7 @@
*/
#include <linux/mm.h>
#include <linux/gfp.h>
+#include <linux/ras.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c
index 9c1b717efad8..0d4f985afbf3 100644
--- a/drivers/ras/debugfs.c
+++ b/drivers/ras/debugfs.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/debugfs.h>
+#include <linux/ras.h>
+#include "debugfs.h"
struct dentry *ras_debugfs_dir;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b57093d7c01f..3ee63531f6d5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -83,6 +83,7 @@ config REGULATOR_88PM8607
config REGULATOR_ACT8865
tristate "Active-semi act8865 voltage regulator"
depends on I2C
+ depends on POWER_SUPPLY
select REGMAP_I2C
help
This driver controls a active-semi act8865 voltage output
@@ -618,6 +619,15 @@ config REGULATOR_MT6323
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6358
+ tristate "MediaTek MT6358 PMIC"
+ depends on MFD_MT6397 && BROKEN
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6358 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6380
tristate "MediaTek MT6380 PMIC"
depends on MTK_PMIC_WRAP
@@ -906,6 +916,13 @@ config REGULATOR_SY8106A
help
This driver supports SY8106A single output regulator.
+config REGULATOR_SY8824X
+ tristate "Silergy SY8824C/SY8824E regulator"
+ depends on I2C && (OF || COMPILE_TEST)
+ select REGMAP_I2C
+ help
+ This driver supports SY8824C single output regulator.
+
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index eef73b5a35a4..2210ba56f9bd 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
+obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
@@ -111,6 +112,7 @@ obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
+obj-$(CONFIG_REGULATOR_SY8824X) += sy8824x.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index cf72d7c6b8c9..0fa97f934df4 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -16,8 +16,10 @@
#include <linux/regulator/act8865.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/power_supply.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
/*
* ACT8600 Global Register Map.
@@ -89,35 +91,50 @@
*/
#define ACT8865_SYS_MODE 0x00
#define ACT8865_SYS_CTRL 0x01
+#define ACT8865_SYS_UNLK_REGS 0x0b
#define ACT8865_DCDC1_VSET1 0x20
#define ACT8865_DCDC1_VSET2 0x21
#define ACT8865_DCDC1_CTRL 0x22
+#define ACT8865_DCDC1_SUS 0x24
#define ACT8865_DCDC2_VSET1 0x30
#define ACT8865_DCDC2_VSET2 0x31
#define ACT8865_DCDC2_CTRL 0x32
+#define ACT8865_DCDC2_SUS 0x34
#define ACT8865_DCDC3_VSET1 0x40
#define ACT8865_DCDC3_VSET2 0x41
#define ACT8865_DCDC3_CTRL 0x42
+#define ACT8865_DCDC3_SUS 0x44
#define ACT8865_LDO1_VSET 0x50
#define ACT8865_LDO1_CTRL 0x51
+#define ACT8865_LDO1_SUS 0x52
#define ACT8865_LDO2_VSET 0x54
#define ACT8865_LDO2_CTRL 0x55
+#define ACT8865_LDO2_SUS 0x56
#define ACT8865_LDO3_VSET 0x60
#define ACT8865_LDO3_CTRL 0x61
+#define ACT8865_LDO3_SUS 0x62
#define ACT8865_LDO4_VSET 0x64
#define ACT8865_LDO4_CTRL 0x65
+#define ACT8865_LDO4_SUS 0x66
#define ACT8865_MSTROFF 0x20
/*
* Field Definitions.
*/
#define ACT8865_ENA 0x80 /* ON - [7] */
+#define ACT8865_DIS 0x40 /* DIS - [6] */
+
#define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */
#define ACT8600_LDO10_ENA 0x40 /* ON - [6] */
#define ACT8600_SUDCDC_VSEL_MASK 0xFF /* SUDCDC VSET - [7:0] */
+#define ACT8600_APCH_CHG_ACIN BIT(7)
+#define ACT8600_APCH_CHG_USB BIT(6)
+#define ACT8600_APCH_CSTATE0 BIT(5)
+#define ACT8600_APCH_CSTATE1 BIT(4)
+
/*
* ACT8865 voltage number
*/
@@ -217,6 +234,171 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
+static int act8865_set_suspend_state(struct regulator_dev *rdev, bool enable)
+{
+ struct regmap *regmap = rdev->regmap;
+ int id = rdev->desc->id, reg, val;
+
+ switch (id) {
+ case ACT8865_ID_DCDC1:
+ reg = ACT8865_DCDC1_SUS;
+ val = 0xa8;
+ break;
+ case ACT8865_ID_DCDC2:
+ reg = ACT8865_DCDC2_SUS;
+ val = 0xa8;
+ break;
+ case ACT8865_ID_DCDC3:
+ reg = ACT8865_DCDC3_SUS;
+ val = 0xa8;
+ break;
+ case ACT8865_ID_LDO1:
+ reg = ACT8865_LDO1_SUS;
+ val = 0xe8;
+ break;
+ case ACT8865_ID_LDO2:
+ reg = ACT8865_LDO2_SUS;
+ val = 0xe8;
+ break;
+ case ACT8865_ID_LDO3:
+ reg = ACT8865_LDO3_SUS;
+ val = 0xe8;
+ break;
+ case ACT8865_ID_LDO4:
+ reg = ACT8865_LDO4_SUS;
+ val = 0xe8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (enable)
+ val |= BIT(4);
+
+ /*
+ * Ask the PMIC to enable/disable this output when entering hibernate
+ * mode.
+ */
+ return regmap_write(regmap, reg, val);
+}
+
+static int act8865_set_suspend_enable(struct regulator_dev *rdev)
+{
+ return act8865_set_suspend_state(rdev, true);
+}
+
+static int act8865_set_suspend_disable(struct regulator_dev *rdev)
+{
+ return act8865_set_suspend_state(rdev, false);
+}
+
+static unsigned int act8865_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case ACT8865_REGULATOR_MODE_FIXED:
+ return REGULATOR_MODE_FAST;
+ case ACT8865_REGULATOR_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case ACT8865_REGULATOR_MODE_LOWPOWER:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int act8865_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct regmap *regmap = rdev->regmap;
+ int id = rdev_get_id(rdev);
+ int reg, val = 0;
+
+ switch (id) {
+ case ACT8865_ID_DCDC1:
+ reg = ACT8865_DCDC1_CTRL;
+ break;
+ case ACT8865_ID_DCDC2:
+ reg = ACT8865_DCDC2_CTRL;
+ break;
+ case ACT8865_ID_DCDC3:
+ reg = ACT8865_DCDC3_CTRL;
+ break;
+ case ACT8865_ID_LDO1:
+ reg = ACT8865_LDO1_CTRL;
+ break;
+ case ACT8865_ID_LDO2:
+ reg = ACT8865_LDO2_CTRL;
+ break;
+ case ACT8865_ID_LDO3:
+ reg = ACT8865_LDO3_CTRL;
+ break;
+ case ACT8865_ID_LDO4:
+ reg = ACT8865_LDO4_CTRL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ case REGULATOR_MODE_NORMAL:
+ if (id <= ACT8865_ID_DCDC3)
+ val = BIT(5);
+ break;
+ case REGULATOR_MODE_STANDBY:
+ if (id > ACT8865_ID_DCDC3)
+ val = BIT(5);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(regmap, reg, BIT(5), val);
+}
+
+static unsigned int act8865_get_mode(struct regulator_dev *rdev)
+{
+ struct regmap *regmap = rdev->regmap;
+ int id = rdev_get_id(rdev);
+ int reg, ret, val = 0;
+
+ switch (id) {
+ case ACT8865_ID_DCDC1:
+ reg = ACT8865_DCDC1_CTRL;
+ break;
+ case ACT8865_ID_DCDC2:
+ reg = ACT8865_DCDC2_CTRL;
+ break;
+ case ACT8865_ID_DCDC3:
+ reg = ACT8865_DCDC3_CTRL;
+ break;
+ case ACT8865_ID_LDO1:
+ reg = ACT8865_LDO1_CTRL;
+ break;
+ case ACT8865_ID_LDO2:
+ reg = ACT8865_LDO2_CTRL;
+ break;
+ case ACT8865_ID_LDO3:
+ reg = ACT8865_LDO3_CTRL;
+ break;
+ case ACT8865_ID_LDO4:
+ reg = ACT8865_LDO4_CTRL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ if (id <= ACT8865_ID_DCDC3 && (val & BIT(5)))
+ return REGULATOR_MODE_FAST;
+ else if (id > ACT8865_ID_DCDC3 && !(val & BIT(5)))
+ return REGULATOR_MODE_NORMAL;
+ else
+ return REGULATOR_MODE_STANDBY;
+}
+
static const struct regulator_ops act8865_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
@@ -224,24 +406,44 @@ static const struct regulator_ops act8865_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
+ .set_mode = act8865_set_mode,
+ .get_mode = act8865_get_mode,
.is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = act8865_set_suspend_enable,
+ .set_suspend_disable = act8865_set_suspend_disable,
};
static const struct regulator_ops act8865_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = act8865_set_mode,
+ .get_mode = act8865_get_mode,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = act8865_set_suspend_enable,
+ .set_suspend_disable = act8865_set_suspend_disable,
+ .set_pull_down = regulator_set_pull_down_regmap,
+};
+
+static const struct regulator_ops act8865_fixed_ldo_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
-#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
+#define ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, _ops) \
[_family##_ID_##_id] = { \
.name = _name, \
.of_match = of_match_ptr(_name), \
+ .of_map_mode = act8865_of_map_mode, \
.regulators_node = of_match_ptr("regulators"), \
.supply_name = _supply, \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
- .ops = &act8865_ops, \
+ .ops = _ops, \
.n_voltages = ACT8865_VOLTAGE_NUM, \
.linear_ranges = act8865_voltage_ranges, \
.n_linear_ranges = ARRAY_SIZE(act8865_voltage_ranges), \
@@ -249,9 +451,17 @@ static const struct regulator_ops act8865_ldo_ops = {
.vsel_mask = ACT8865_VSEL_MASK, \
.enable_reg = _family##_##_id##_CTRL, \
.enable_mask = ACT8865_ENA, \
+ .pull_down_reg = _family##_##_id##_CTRL, \
+ .pull_down_mask = ACT8865_DIS, \
.owner = THIS_MODULE, \
}
+#define ACT88xx_REG(_name, _family, _id, _vsel_reg, _supply) \
+ ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ops)
+
+#define ACT88xx_LDO(_name, _family, _id, _vsel_reg, _supply) \
+ ACT88xx_REG_(_name, _family, _id, _vsel_reg, _supply, &act8865_ldo_ops)
+
static const struct regulator_desc act8600_regulators[] = {
ACT88xx_REG("DCDC1", ACT8600, DCDC1, VSET, "vp1"),
ACT88xx_REG("DCDC2", ACT8600, DCDC2, VSET, "vp2"),
@@ -281,7 +491,7 @@ static const struct regulator_desc act8600_regulators[] = {
.of_match = of_match_ptr("LDO_REG9"),
.regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO9,
- .ops = &act8865_ldo_ops,
+ .ops = &act8865_fixed_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1,
.fixed_uV = 3300000,
@@ -294,7 +504,7 @@ static const struct regulator_desc act8600_regulators[] = {
.of_match = of_match_ptr("LDO_REG10"),
.regulators_node = of_match_ptr("regulators"),
.id = ACT8600_ID_LDO10,
- .ops = &act8865_ldo_ops,
+ .ops = &act8865_fixed_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1,
.fixed_uV = 1200000,
@@ -323,20 +533,20 @@ static const struct regulator_desc act8865_regulators[] = {
ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET1, "vp1"),
ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET1, "vp2"),
ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET1, "vp3"),
- ACT88xx_REG("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
- ACT88xx_REG("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
- ACT88xx_REG("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
- ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
+ ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
+ ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
+ ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
+ ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
};
static const struct regulator_desc act8865_alt_regulators[] = {
ACT88xx_REG("DCDC_REG1", ACT8865, DCDC1, VSET2, "vp1"),
ACT88xx_REG("DCDC_REG2", ACT8865, DCDC2, VSET2, "vp2"),
ACT88xx_REG("DCDC_REG3", ACT8865, DCDC3, VSET2, "vp3"),
- ACT88xx_REG("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
- ACT88xx_REG("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
- ACT88xx_REG("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
- ACT88xx_REG("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
+ ACT88xx_LDO("LDO_REG1", ACT8865, LDO1, VSET, "inl45"),
+ ACT88xx_LDO("LDO_REG2", ACT8865, LDO2, VSET, "inl45"),
+ ACT88xx_LDO("LDO_REG3", ACT8865, LDO3, VSET, "inl67"),
+ ACT88xx_LDO("LDO_REG4", ACT8865, LDO4, VSET, "inl67"),
};
#ifdef CONFIG_OF
@@ -372,6 +582,75 @@ static void act8865_power_off(void)
while (1);
}
+static int act8600_charger_get_status(struct regmap *map)
+{
+ unsigned int val;
+ int ret;
+ u8 state0, state1;
+
+ ret = regmap_read(map, ACT8600_APCH_STAT, &val);
+ if (ret < 0)
+ return ret;
+
+ state0 = val & ACT8600_APCH_CSTATE0;
+ state1 = val & ACT8600_APCH_CSTATE1;
+
+ if (state0 && !state1)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ if (!state0 && state1)
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ if (!state0 && !state1)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+}
+
+static int act8600_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct regmap *map = power_supply_get_drvdata(psy);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = act8600_charger_get_status(map);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property act8600_charger_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+};
+
+static const struct power_supply_desc act8600_charger_desc = {
+ .name = "act8600-charger",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = act8600_charger_properties,
+ .num_properties = ARRAY_SIZE(act8600_charger_properties),
+ .get_property = act8600_charger_get_property,
+};
+
+static int act8600_charger_probe(struct device *dev, struct regmap *regmap)
+{
+ struct power_supply *charger;
+ struct power_supply_config cfg = {
+ .drv_data = regmap,
+ .of_node = dev->of_node,
+ };
+
+ charger = devm_power_supply_register(dev, &act8600_charger_desc, &cfg);
+
+ return PTR_ERR_OR_ZERO(charger);
+}
+
static int act8865_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
@@ -483,9 +762,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
}
}
+ if (type == ACT8600) {
+ ret = act8600_charger_probe(dev, act8865->regmap);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to probe charger");
+ return ret;
+ }
+ }
+
i2c_set_clientdata(client, act8865);
- return 0;
+ /* Unlock expert registers for ACT8865. */
+ return type != ACT8865 ? 0 : regmap_write(act8865->regmap,
+ ACT8865_SYS_UNLK_REGS, 0xef);
}
static const struct i2c_device_id act8865_ids[] = {
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 584284938ac9..d2f804dbc785 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
reg = ACT8945A_DCDC3_CTRL;
break;
case ACT8945A_ID_LDO1:
- reg = ACT8945A_LDO1_SUS;
+ reg = ACT8945A_LDO1_CTRL;
break;
case ACT8945A_ID_LDO2:
- reg = ACT8945A_LDO2_SUS;
+ reg = ACT8945A_LDO2_CTRL;
break;
case ACT8945A_ID_LDO3:
- reg = ACT8945A_LDO3_SUS;
+ reg = ACT8945A_LDO3_CTRL;
break;
case ACT8945A_ID_LDO4:
- reg = ACT8945A_LDO4_SUS;
+ reg = ACT8945A_LDO4_CTRL;
break;
default:
return -EINVAL;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 152053361862..989506bd90b1 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -174,14 +174,14 @@
#define AXP803_DCDC5_1140mV_STEPS 35
#define AXP803_DCDC5_1140mV_END \
(AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
-#define AXP803_DCDC5_NUM_VOLTAGES 68
+#define AXP803_DCDC5_NUM_VOLTAGES 69
#define AXP803_DCDC6_600mV_START 0x00
#define AXP803_DCDC6_600mV_STEPS 50
#define AXP803_DCDC6_600mV_END \
(AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
#define AXP803_DCDC6_1120mV_START 0x33
-#define AXP803_DCDC6_1120mV_STEPS 14
+#define AXP803_DCDC6_1120mV_STEPS 20
#define AXP803_DCDC6_1120mV_END \
(AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
#define AXP803_DCDC6_NUM_VOLTAGES 72
@@ -240,7 +240,7 @@
#define AXP806_DCDCA_600mV_END \
(AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
#define AXP806_DCDCA_1120mV_START 0x33
-#define AXP806_DCDCA_1120mV_STEPS 14
+#define AXP806_DCDCA_1120mV_STEPS 20
#define AXP806_DCDCA_1120mV_END \
(AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
#define AXP806_DCDCA_NUM_VOLTAGES 72
@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
AXP806_DCDCD_600mV_END,
20000),
REGULATOR_LINEAR_RANGE(1600000,
- AXP806_DCDCD_600mV_START,
- AXP806_DCDCD_600mV_END,
+ AXP806_DCDCD_1600mV_START,
+ AXP806_DCDCD_1600mV_END,
100000),
};
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e0c0cf462004..afe94470b67f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -381,12 +381,16 @@ static struct device_node *of_get_child_regulator(struct device_node *parent,
if (!regnode) {
regnode = of_get_child_regulator(child, prop_name);
if (regnode)
- return regnode;
+ goto err_node_put;
} else {
- return regnode;
+ goto err_node_put;
}
}
return NULL;
+
+err_node_put:
+ of_node_put(child);
+ return regnode;
}
/**
@@ -564,13 +568,15 @@ static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
- ssize_t ret;
+ int uV;
regulator_lock(rdev);
- ret = sprintf(buf, "%d\n", regulator_get_voltage_rdev(rdev));
+ uV = regulator_get_voltage_rdev(rdev);
regulator_unlock(rdev);
- return ret;
+ if (uV < 0)
+ return uV;
+ return sprintf(buf, "%d\n", uV);
}
static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
@@ -5640,7 +5646,7 @@ static int __init regulator_init(void)
/* init early to allow our consumers to complete system booting */
core_initcall(regulator_init);
-static int __init regulator_late_cleanup(struct device *dev, void *data)
+static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
const struct regulator_ops *ops = rdev->desc->ops;
@@ -5689,18 +5695,9 @@ unlock:
return 0;
}
-static int __init regulator_init_complete(void)
+static void regulator_init_complete_work_function(struct work_struct *work)
{
/*
- * Since DT doesn't provide an idiomatic mechanism for
- * enabling full constraints and since it's much more natural
- * with DT to provide them just assume that a DT enabled
- * system has full constraints.
- */
- if (of_have_populated_dt())
- has_full_constraints = true;
-
- /*
* Regulators may had failed to resolve their input supplies
* when were registered, either because the input supply was
* not registered yet or because its parent device was not
@@ -5717,6 +5714,35 @@ static int __init regulator_init_complete(void)
*/
class_for_each_device(&regulator_class, NULL, NULL,
regulator_late_cleanup);
+}
+
+static DECLARE_DELAYED_WORK(regulator_init_complete_work,
+ regulator_init_complete_work_function);
+
+static int __init regulator_init_complete(void)
+{
+ /*
+ * Since DT doesn't provide an idiomatic mechanism for
+ * enabling full constraints and since it's much more natural
+ * with DT to provide them just assume that a DT enabled
+ * system has full constraints.
+ */
+ if (of_have_populated_dt())
+ has_full_constraints = true;
+
+ /*
+ * We punt completion for an arbitrary amount of time since
+ * systems like distros will load many drivers from userspace
+ * so consumers might not always be ready yet, this is
+ * particularly an issue with laptops where this might bounce
+ * the display off then on. Ideally we'd get a notification
+ * from userspace when this happens but we don't so just wait
+ * a bit and hope we waited long enough. It'd be better if
+ * we'd only do this on systems that need it, and a kernel
+ * command line option might be useful.
+ */
+ schedule_delayed_work(&regulator_init_complete_work,
+ msecs_to_jiffies(30000));
return 0;
}
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 2ffc64622451..56f3f72d7707 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -1032,10 +1032,8 @@ static int da9062_regulator_probe(struct platform_device *pdev)
/* LDOs overcurrent event support */
irq = platform_get_irq_byname(pdev, "LDO_LIM");
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ.\n");
+ if (irq < 0)
return irq;
- }
regulators->irq_ldo_lim = irq;
ret = devm_request_threaded_irq(&pdev->dev, irq,
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 02f816318fba..28b1b20f45bd 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -863,10 +863,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
/* LDOs overcurrent event support */
irq = platform_get_irq_byname(pdev, "LDO_LIM");
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ.\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(&pdev->dev, irq,
NULL, da9063_ldo_lim_event,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 0309823d2c72..bf80748f1ccc 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -285,7 +285,7 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
pdata->reg_node[n] = da9211_matches[i].of_node;
pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev,
da9211_matches[i].of_node,
- "enable",
+ "enable-gpios",
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"da9211-enable");
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 999547dde99d..d90a6fd8cbc7 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -23,14 +23,63 @@
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
+#include <linux/clk.h>
+
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
+
+ struct clk *enable_clock;
+ unsigned int clk_enable_counter;
};
+struct fixed_dev_type {
+ bool has_enable_clock;
+};
+
+static const struct fixed_dev_type fixed_voltage_data = {
+ .has_enable_clock = false,
+};
+
+static const struct fixed_dev_type fixed_clkenable_data = {
+ .has_enable_clock = true,
+};
+
+static int reg_clock_enable(struct regulator_dev *rdev)
+{
+ struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(priv->enable_clock);
+ if (ret)
+ return ret;
+
+ priv->clk_enable_counter++;
+
+ return ret;
+}
+
+static int reg_clock_disable(struct regulator_dev *rdev)
+{
+ struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+
+ clk_disable_unprepare(priv->enable_clock);
+ priv->clk_enable_counter--;
+
+ return 0;
+}
+
+static int reg_clock_is_enabled(struct regulator_dev *rdev)
+{
+ struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
+
+ return priv->clk_enable_counter > 0;
+}
+
/**
* of_get_fixed_voltage_config - extract fixed_voltage_config structure info
@@ -84,10 +133,19 @@ of_get_fixed_voltage_config(struct device *dev,
static struct regulator_ops fixed_voltage_ops = {
};
+static struct regulator_ops fixed_voltage_clkenabled_ops = {
+ .enable = reg_clock_enable,
+ .disable = reg_clock_disable,
+ .is_enabled = reg_clock_is_enabled,
+};
+
static int reg_fixed_voltage_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
+ const struct fixed_dev_type *drvtype =
+ of_match_device(dev->driver->of_match_table, dev)->data;
struct regulator_config cfg = { };
enum gpiod_flags gflags;
int ret;
@@ -118,7 +176,18 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
- drvdata->desc.ops = &fixed_voltage_ops;
+
+ if (drvtype->has_enable_clock) {
+ drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
+
+ drvdata->enable_clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(drvdata->enable_clock)) {
+ dev_err(dev, "Cant get enable-clock from devicetree\n");
+ return -ENOENT;
+ }
+ } else {
+ drvdata->desc.ops = &fixed_voltage_ops;
+ }
drvdata->desc.enable_time = config->startup_delay;
@@ -191,8 +260,16 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id fixed_of_match[] = {
- { .compatible = "regulator-fixed", },
- {},
+ {
+ .compatible = "regulator-fixed",
+ .data = &fixed_voltage_data,
+ },
+ {
+ .compatible = "regulator-fixed-clock",
+ .data = &fixed_clkenable_data,
+ },
+ {
+ },
};
MODULE_DEVICE_TABLE(of, fixed_of_match);
#endif
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 4986cc5064a1..ca3dc3f3bb29 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -860,3 +860,24 @@ int regulator_get_current_limit_regmap(struct regulator_dev *rdev)
return -EINVAL;
}
EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap);
+
+/**
+ * regulator_bulk_set_supply_names - initialize the 'supply' fields in an array
+ * of regulator_bulk_data structs
+ *
+ * @consumers: array of regulator_bulk_data entries to initialize
+ * @supply_names: array of supply name strings
+ * @num_supplies: number of supply names to initialize
+ *
+ * Note: the 'consumers' array must be the size of 'num_supplies'.
+ */
+void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+ const char *const *supply_names,
+ unsigned int num_supplies)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_supplies; i++)
+ consumers[i].supply = supply_names[i];
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names);
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index 5647e2f97ff8..4b9f618b07e9 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -30,13 +30,13 @@
/* LM3632 */
#define LM3632_BOOST_VSEL_MAX 0x26
-#define LM3632_LDO_VSEL_MAX 0x29
+#define LM3632_LDO_VSEL_MAX 0x28
#define LM3632_VBOOST_MIN 4500000
#define LM3632_VLDO_MIN 4000000
/* LM36274 */
#define LM36274_BOOST_VSEL_MAX 0x3f
-#define LM36274_LDO_VSEL_MAX 0x34
+#define LM36274_LDO_VSEL_MAX 0x32
#define LM36274_VOLTAGE_MIN 4000000
/* Common */
@@ -226,7 +226,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
.of_match = "vboost",
.id = LM36274_BOOST,
.ops = &lm363x_boost_voltage_table_ops,
- .n_voltages = LM36274_BOOST_VSEL_MAX,
+ .n_voltages = LM36274_BOOST_VSEL_MAX + 1,
.min_uV = LM36274_VOLTAGE_MIN,
.uV_step = LM363X_STEP_50mV,
.type = REGULATOR_VOLTAGE,
@@ -239,7 +239,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
.of_match = "vpos",
.id = LM36274_LDO_POS,
.ops = &lm363x_regulator_voltage_table_ops,
- .n_voltages = LM36274_LDO_VSEL_MAX,
+ .n_voltages = LM36274_LDO_VSEL_MAX + 1,
.min_uV = LM36274_VOLTAGE_MIN,
.uV_step = LM363X_STEP_50mV,
.type = REGULATOR_VOLTAGE,
@@ -254,7 +254,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = {
.of_match = "vneg",
.id = LM36274_LDO_NEG,
.ops = &lm363x_regulator_voltage_table_ops,
- .n_voltages = LM36274_LDO_VSEL_MAX,
+ .n_voltages = LM36274_LDO_VSEL_MAX + 1,
.min_uV = LM36274_VOLTAGE_MIN,
.uV_step = LM363X_STEP_50mV,
.type = REGULATOR_VOLTAGE,
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 5d067f7c2116..4ae12ac1f4c6 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -65,7 +65,6 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
int id = rdev_get_id(rdev);
- struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
unsigned int reg;
int ret;
@@ -86,11 +85,11 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
else
reg = 0;
- ret = regmap_update_bits(lp87565->regmap, regulators[id].ctrl2_reg,
+ ret = regmap_update_bits(rdev->regmap, regulators[id].ctrl2_reg,
LP87565_BUCK_CTRL_2_SLEW_RATE,
reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
if (ret) {
- dev_err(lp87565->dev, "SLEW RATE write failed: %d\n", ret);
+ dev_err(&rdev->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
}
@@ -163,7 +162,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3;
+ int i, min_idx, max_idx;
platform_set_drvdata(pdev, lp87565);
@@ -182,9 +181,9 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
max_idx = LP87565_BUCK_3210;
break;
default:
- dev_err(lp87565->dev, "Invalid lp config %d\n",
- lp87565->dev_type);
- return -EINVAL;
+ min_idx = LP87565_BUCK_0;
+ max_idx = LP87565_BUCK_3;
+ break;
}
for (i = min_idx; i <= max_idx; i++) {
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 1b00f3638996..00e9bb92c326 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -464,7 +464,7 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
{
struct lp8788 *lp = ldo->lp;
enum lp8788_ext_ldo_en_id enable_id;
- u8 en_mask[] = {
+ static const u8 en_mask[] = {
[EN_ALDO1] = LP8788_EN_SEL_ALDO1_M,
[EN_ALDO234] = LP8788_EN_SEL_ALDO234_M,
[EN_ALDO5] = LP8788_EN_SEL_ALDO5_M,
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 8020eb57374a..c8e579e99316 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -257,7 +257,7 @@ static int max77686_of_parse_cb(struct device_node *np,
case MAX77686_BUCK9:
case MAX77686_LDO20 ... MAX77686_LDO22:
config->ena_gpiod = gpiod_get_from_of_node(np,
- "maxim,ena",
+ "maxim,ena-gpios",
0,
GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
"max77686-regulator");
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4bca54446287..347043a5a9a7 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -485,7 +485,6 @@ static int max8660_probe(struct i2c_client *client,
rdev = devm_regulator_register(&client->dev,
&max8660_reg[id], &config);
if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
dev_err(&client->dev, "failed to register %s\n",
max8660_reg[id].name);
return PTR_ERR(rdev);
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
new file mode 100644
index 000000000000..ba42682e06f3
--- /dev/null
+++ b/drivers/regulator/mt6358-regulator.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6358-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6358_BUCK_MODE_AUTO 0
+#define MT6358_BUCK_MODE_FORCE_PWM 1
+
+/*
+ * MT6358 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ */
+struct mt6358_regulator_info {
+ struct regulator_desc desc;
+ u32 status_reg;
+ u32 qi;
+ const u32 *index_table;
+ unsigned int n_table;
+ u32 vsel_shift;
+ u32 da_vsel_reg;
+ u32 da_vsel_mask;
+ u32 da_vsel_shift;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 modeset_shift;
+};
+
+#define MT6358_BUCK(match, vreg, min, max, step, \
+ volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask, \
+ _da_vsel_shift, _modeset_reg, _modeset_shift) \
+[MT6358_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6358_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6358_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = MT6358_BUCK_##vreg##_ELR0, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = MT6358_BUCK_##vreg##_CON0, \
+ .enable_mask = BIT(0), \
+ .of_map_mode = mt6358_map_mode, \
+ }, \
+ .status_reg = MT6358_BUCK_##vreg##_DBG1, \
+ .qi = BIT(0), \
+ .da_vsel_reg = _da_vsel_reg, \
+ .da_vsel_mask = _da_vsel_mask, \
+ .da_vsel_shift = _da_vsel_shift, \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = BIT(_modeset_shift), \
+ .modeset_shift = _modeset_shift \
+}
+
+#define MT6358_LDO(match, vreg, ldo_volt_table, \
+ ldo_index_table, enreg, enbit, vosel, \
+ vosel_mask, vosel_shift) \
+[MT6358_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6358_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6358_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .status_reg = MT6358_LDO_##vreg##_CON1, \
+ .qi = BIT(15), \
+ .index_table = ldo_index_table, \
+ .n_table = ARRAY_SIZE(ldo_index_table), \
+ .vsel_shift = vosel_shift, \
+}
+
+#define MT6358_LDO1(match, vreg, min, max, step, \
+ volt_ranges, _da_vsel_reg, _da_vsel_mask, \
+ _da_vsel_shift, vosel, vosel_mask) \
+[MT6358_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6358_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6358_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = MT6358_LDO_##vreg##_CON0, \
+ .enable_mask = BIT(0), \
+ }, \
+ .da_vsel_reg = _da_vsel_reg, \
+ .da_vsel_mask = _da_vsel_mask, \
+ .da_vsel_shift = _da_vsel_shift, \
+ .status_reg = MT6358_LDO_##vreg##_DBG1, \
+ .qi = BIT(0), \
+}
+
+#define MT6358_REG_FIXED(match, vreg, \
+ enreg, enbit, volt) \
+[MT6358_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6358_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6358_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .status_reg = MT6358_LDO_##vreg##_CON1, \
+ .qi = BIT(15), \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range buck_volt_range4[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
+};
+
+static const u32 vdram2_voltages[] = {
+ 600000, 1800000,
+};
+
+static const u32 vsim_voltages[] = {
+ 1700000, 1800000, 2700000, 3000000, 3100000,
+};
+
+static const u32 vibr_voltages[] = {
+ 1200000, 1300000, 1500000, 1800000,
+ 2000000, 2800000, 3000000, 3300000,
+};
+
+static const u32 vusb_voltages[] = {
+ 3000000, 3100000,
+};
+
+static const u32 vcamd_voltages[] = {
+ 900000, 1000000, 1100000, 1200000,
+ 1300000, 1500000, 1800000,
+};
+
+static const u32 vefuse_voltages[] = {
+ 1700000, 1800000, 1900000,
+};
+
+static const u32 vmch_vemc_voltages[] = {
+ 2900000, 3000000, 3300000,
+};
+
+static const u32 vcama_voltages[] = {
+ 1800000, 2500000, 2700000,
+ 2800000, 2900000, 3000000,
+};
+
+static const u32 vcn33_bt_wifi_voltages[] = {
+ 3300000, 3400000, 3500000,
+};
+
+static const u32 vmc_voltages[] = {
+ 1800000, 2900000, 3000000, 3300000,
+};
+
+static const u32 vldo28_voltages[] = {
+ 2800000, 3000000,
+};
+
+static const u32 vdram2_idx[] = {
+ 0, 12,
+};
+
+static const u32 vsim_idx[] = {
+ 3, 4, 8, 11, 12,
+};
+
+static const u32 vibr_idx[] = {
+ 0, 1, 2, 4, 5, 9, 11, 13,
+};
+
+static const u32 vusb_idx[] = {
+ 3, 4,
+};
+
+static const u32 vcamd_idx[] = {
+ 3, 4, 5, 6, 7, 9, 12,
+};
+
+static const u32 vefuse_idx[] = {
+ 11, 12, 13,
+};
+
+static const u32 vmch_vemc_idx[] = {
+ 2, 3, 5,
+};
+
+static const u32 vcama_idx[] = {
+ 0, 7, 9, 10, 11, 12,
+};
+
+static const u32 vcn33_bt_wifi_idx[] = {
+ 1, 2, 3,
+};
+
+static const u32 vmc_idx[] = {
+ 4, 10, 11, 13,
+};
+
+static const u32 vldo28_idx[] = {
+ 1, 3,
+};
+
+static unsigned int mt6358_map_mode(unsigned int mode)
+{
+ return mode == MT6358_BUCK_MODE_AUTO ?
+ REGULATOR_MODE_NORMAL : REGULATOR_MODE_FAST;
+}
+
+static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ int idx, ret;
+ const u32 *pvol;
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+ pvol = info->index_table;
+
+ idx = pvol[selector];
+ ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
+ info->desc.vsel_mask,
+ idx << info->vsel_shift);
+
+ return ret;
+}
+
+static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
+{
+ int idx, ret;
+ u32 selector;
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+ const u32 *pvol;
+
+ ret = regmap_read(rdev->regmap, info->desc.vsel_reg, &selector);
+ if (ret != 0) {
+ dev_info(&rdev->dev,
+ "Failed to get mt6358 %s vsel reg: %d\n",
+ info->desc.name, ret);
+ return ret;
+ }
+
+ selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+ pvol = info->index_table;
+ for (idx = 0; idx < info->desc.n_voltages; idx++) {
+ if (pvol[idx] == selector)
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
+static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
+{
+ int ret, regval;
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->da_vsel_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6358 Buck %s vsel reg: %d\n",
+ info->desc.name, ret);
+ return ret;
+ }
+
+ ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+
+ return ret;
+}
+
+static int mt6358_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 regval;
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->status_reg, &regval);
+ if (ret != 0) {
+ dev_info(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MT6358_BUCK_MODE_FORCE_PWM;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6358_BUCK_MODE_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
+ info->modeset_reg, info->modeset_mask,
+ info->modeset_shift, val);
+
+ val <<= info->modeset_shift;
+
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+}
+
+static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6358_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret, regval;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6358 buck mode: %d\n", ret);
+ return ret;
+ }
+
+ switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+ case MT6358_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case MT6358_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct regulator_ops mt6358_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = mt6358_get_buck_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6358_get_status,
+ .set_mode = mt6358_regulator_set_mode,
+ .get_mode = mt6358_regulator_get_mode,
+};
+
+static const struct regulator_ops mt6358_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = mt6358_set_voltage_sel,
+ .get_voltage_sel = mt6358_get_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6358_get_status,
+};
+
+static const struct regulator_ops mt6358_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6358_get_status,
+};
+
+/* The array is indexed by id(MT6358_ID_XXX) */
+static struct mt6358_regulator_info mt6358_regulators[] = {
+ MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
+ buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
+ 0, MT6358_VDRAM1_ANA_CON0, 8),
+ MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
+ buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
+ 0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+ MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+ MT6358_VPA_ANA_CON0, 3),
+ MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
+ buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
+ 0, MT6358_VPROC_ANA_CON0, 1),
+ MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
+ buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
+ 0, MT6358_VPROC_ANA_CON0, 2),
+ MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
+ buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, 0,
+ MT6358_VCORE_VGPU_ANA_CON0, 2),
+ MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
+ buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+ MT6358_VS2_ANA_CON0, 8),
+ MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
+ buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
+ 0, MT6358_VMODEM_ANA_CON0, 8),
+ MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
+ buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+ MT6358_VS1_ANA_CON0, 8),
+ MT6358_REG_FIXED("ldo_vrf12", VRF12,
+ MT6358_LDO_VRF12_CON0, 0, 1200000),
+ MT6358_REG_FIXED("ldo_vio18", VIO18,
+ MT6358_LDO_VIO18_CON0, 0, 1800000),
+ MT6358_REG_FIXED("ldo_vcamio", VCAMIO,
+ MT6358_LDO_VCAMIO_CON0, 0, 1800000),
+ MT6358_REG_FIXED("ldo_vcn18", VCN18, MT6358_LDO_VCN18_CON0, 0, 1800000),
+ MT6358_REG_FIXED("ldo_vfe28", VFE28, MT6358_LDO_VFE28_CON0, 0, 2800000),
+ MT6358_REG_FIXED("ldo_vcn28", VCN28, MT6358_LDO_VCN28_CON0, 0, 2800000),
+ MT6358_REG_FIXED("ldo_vxo22", VXO22, MT6358_LDO_VXO22_CON0, 0, 2200000),
+ MT6358_REG_FIXED("ldo_vaux18", VAUX18,
+ MT6358_LDO_VAUX18_CON0, 0, 1800000),
+ MT6358_REG_FIXED("ldo_vbif28", VBIF28,
+ MT6358_LDO_VBIF28_CON0, 0, 2800000),
+ MT6358_REG_FIXED("ldo_vio28", VIO28, MT6358_LDO_VIO28_CON0, 0, 2800000),
+ MT6358_REG_FIXED("ldo_va12", VA12, MT6358_LDO_VA12_CON0, 0, 1200000),
+ MT6358_REG_FIXED("ldo_vrf18", VRF18, MT6358_LDO_VRF18_CON0, 0, 1800000),
+ MT6358_REG_FIXED("ldo_vaud28", VAUD28,
+ MT6358_LDO_VAUD28_CON0, 0, 2800000),
+ MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
+ MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0x10, 0),
+ MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
+ MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
+ MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
+ MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+ MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
+ MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
+ MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vmch", VMCH, vmch_vemc_voltages, vmch_vemc_idx,
+ MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+ MT6358_LDO("ldo_vcama1", VCAMA1, vcama_voltages, vcama_idx,
+ MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vemc", VEMC, vmch_vemc_voltages, vmch_vemc_idx,
+ MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+ MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
+ vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
+ 0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+ MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
+ vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
+ 0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+ MT6358_LDO("ldo_vcama2", VCAMA2, vcama_voltages, vcama_idx,
+ MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
+ MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+ MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
+ MT6358_LDO_VLDO28_CON0_0, 0,
+ MT6358_VLDO28_ANA_CON0, 0x300, 8),
+ MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
+ MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+ MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
+ buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+ MT6358_LDO_VSRAM_CON0, 0x7f),
+ MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
+ buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+ MT6358_LDO_VSRAM_CON2, 0x7f),
+ MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
+ buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+ MT6358_LDO_VSRAM_CON3, 0x7f),
+ MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
+ buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+ MT6358_LDO_VSRAM_CON1, 0x7f),
+};
+
+static int mt6358_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+
+ for (i = 0; i < MT6358_MAX_REGULATOR; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6358_regulators[i];
+ config.regmap = mt6397->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6358_regulators[i].desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6358_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id mt6358_platform_ids[] = {
+ {"mt6358-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6358_platform_ids);
+
+static struct platform_driver mt6358_regulator_driver = {
+ .driver = {
+ .name = "mt6358-regulator",
+ },
+ .probe = mt6358_regulator_probe,
+ .id_table = mt6358_platform_ids,
+};
+
+module_platform_driver(mt6358_regulator_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6358 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 397918ebba55..afefb29ce1b0 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -416,8 +416,10 @@ device_node *regulator_of_get_init_node(struct device *dev,
if (!name)
name = child->name;
- if (!strcmp(desc->of_match, name))
+ if (!strcmp(desc->of_match, name)) {
+ of_node_put(search);
return of_node_get(child);
+ }
}
of_node_put(search);
@@ -460,16 +462,11 @@ error:
return NULL;
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
{
struct device *dev;
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+ dev = class_find_device_by_of_node(&regulator_class, np);
return dev ? dev_to_rdev(dev) : NULL;
}
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index b2c2d01d1637..db6c085da65e 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -50,6 +50,20 @@ enum rpmh_regulator_type {
#define PMIC4_BOB_MODE_AUTO 2
#define PMIC4_BOB_MODE_PWM 3
+#define PMIC5_LDO_MODE_RETENTION 3
+#define PMIC5_LDO_MODE_LPM 4
+#define PMIC5_LDO_MODE_HPM 7
+
+#define PMIC5_SMPS_MODE_RETENTION 3
+#define PMIC5_SMPS_MODE_PFM 4
+#define PMIC5_SMPS_MODE_AUTO 6
+#define PMIC5_SMPS_MODE_PWM 7
+
+#define PMIC5_BOB_MODE_PASS 2
+#define PMIC5_BOB_MODE_PFM 4
+#define PMIC5_BOB_MODE_AUTO 6
+#define PMIC5_BOB_MODE_PWM 7
+
/**
* struct rpmh_vreg_hw_data - RPMh regulator hardware configurations
* @regulator_type: RPMh accelerator type used to manage this
@@ -488,6 +502,14 @@ static const int pmic_mode_map_pmic4_ldo[REGULATOR_MODE_STANDBY + 1] = {
[REGULATOR_MODE_FAST] = -EINVAL,
};
+static const int pmic_mode_map_pmic5_ldo[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC5_LDO_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC5_LDO_MODE_LPM,
+ [REGULATOR_MODE_NORMAL] = PMIC5_LDO_MODE_HPM,
+ [REGULATOR_MODE_FAST] = -EINVAL,
+};
+
static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
{
unsigned int mode;
@@ -518,6 +540,14 @@ static const int pmic_mode_map_pmic4_smps[REGULATOR_MODE_STANDBY + 1] = {
[REGULATOR_MODE_FAST] = PMIC4_SMPS_MODE_PWM,
};
+static const int pmic_mode_map_pmic5_smps[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = PMIC5_SMPS_MODE_RETENTION,
+ [REGULATOR_MODE_IDLE] = PMIC5_SMPS_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC5_SMPS_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC5_SMPS_MODE_PWM,
+};
+
static unsigned int
rpmh_regulator_pmic4_smps_of_map_mode(unsigned int rpmh_mode)
{
@@ -552,6 +582,14 @@ static const int pmic_mode_map_pmic4_bob[REGULATOR_MODE_STANDBY + 1] = {
[REGULATOR_MODE_FAST] = PMIC4_BOB_MODE_PWM,
};
+static const int pmic_mode_map_pmic5_bob[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = -EINVAL,
+ [REGULATOR_MODE_IDLE] = PMIC5_BOB_MODE_PFM,
+ [REGULATOR_MODE_NORMAL] = PMIC5_BOB_MODE_AUTO,
+ [REGULATOR_MODE_FAST] = PMIC5_BOB_MODE_PWM,
+};
+
static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
{
unsigned int mode;
@@ -637,6 +675,72 @@ static const struct rpmh_vreg_hw_data pmic4_lvs = {
/* LVS hardware does not support voltage or mode configuration. */
};
+static const struct rpmh_vreg_hw_data pmic5_pldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ .n_voltages = 256,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ .n_voltages = 63,
+ .hpm_min_load_uA = 10000,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_nldo = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ .n_voltages = 124,
+ .hpm_min_load_uA = 30000,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .n_voltages = 216,
+ .pmic_mode_map = pmic_mode_map_pmic5_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ .n_voltages = 264,
+ .pmic_mode_map = pmic_mode_map_pmic5_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .n_voltages = 5,
+ .pmic_mode_map = pmic_mode_map_pmic5_smps,
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_bob = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_bypass_ops,
+ .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
+ .n_voltages = 136,
+ .pmic_mode_map = pmic_mode_map_pmic5_bob,
+ .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
+};
+
#define RPMH_VREG(_name, _resource_name, _hw_data, _supply_name) \
{ \
.name = _name, \
@@ -705,6 +809,75 @@ static const struct rpmh_vreg_init_data pm8005_vreg_data[] = {
{},
};
+static const struct rpmh_vreg_init_data pm8150_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_hfsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_hfsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps510, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps510, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps510, "vdd-s10"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo, "vdd-l6-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps510, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps510, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps510, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_hfsmps510, "vdd-s8"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo_lv, "vdd-l1-l8"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"),
+ RPMH_VREG("bob", "bob%s1", &pmic5_bob, "vdd-bob"),
+ {},
+};
+
+static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps515, "vdd-s2"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ {},
+};
+
static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -744,6 +917,22 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
static const struct of_device_id rpmh_regulator_match_table[] = {
{
+ .compatible = "qcom,pm8005-rpmh-regulators",
+ .data = pm8005_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm8009-rpmh-regulators",
+ .data = pm8009_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm8150-rpmh-regulators",
+ .data = pm8150_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm8150l-rpmh-regulators",
+ .data = pm8150l_vreg_data,
+ },
+ {
.compatible = "qcom,pm8998-rpmh-regulators",
.data = pm8998_vreg_data,
},
@@ -751,10 +940,6 @@ static const struct of_device_id rpmh_regulator_match_table[] = {
.compatible = "qcom,pmi8998-rpmh-regulators",
.data = pmi8998_vreg_data,
},
- {
- .compatible = "qcom,pm8005-rpmh-regulators",
- .data = pm8005_vreg_data,
- },
{}
};
MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e7af0c53d449..61bd5ef0806c 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -606,7 +606,7 @@ static unsigned int rk8xx_regulator_of_map_mode(unsigned int mode)
case 2:
return REGULATOR_MODE_NORMAL;
default:
- return -EINVAL;
+ return REGULATOR_MODE_INVALID;
}
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 054baaadfdfd..5bc00884cf51 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -1226,7 +1226,7 @@ common_reg:
goto out;
}
- if (s2mps11->ext_control_gpiod[i]) {
+ if (config.ena_gpiod) {
ret = s2mps14_pmic_enable_ext_control(s2mps11,
regulator);
if (ret < 0) {
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 04b732991d69..a0565daecace 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np,
ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
"enable-gpios", 0,
gflags, "gpio-en-ldo");
- if (ena_gpiod) {
+ if (!IS_ERR(ena_gpiod)) {
config->ena_gpiod = ena_gpiod;
devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
}
@@ -447,18 +447,19 @@ static int slg51000_i2c_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct slg51000 *chip;
- struct gpio_desc *cs_gpiod = NULL;
+ struct gpio_desc *cs_gpiod;
int error, ret;
chip = devm_kzalloc(dev, sizeof(struct slg51000), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- cs_gpiod = devm_gpiod_get_from_of_node(dev, dev->of_node,
- "dlg,cs-gpios", 0,
- GPIOD_OUT_HIGH
- | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
- "slg51000-cs");
+ cs_gpiod = devm_gpiod_get_optional(dev, "dlg,cs",
+ GPIOD_OUT_HIGH |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(cs_gpiod))
+ return PTR_ERR(cs_gpiod);
+
if (cs_gpiod) {
dev_info(dev, "Found chip selector property\n");
chip->cs_gpiod = cs_gpiod;
diff --git a/drivers/regulator/stm32-booster.c b/drivers/regulator/stm32-booster.c
index 2a897666c650..03f162ffd144 100644
--- a/drivers/regulator/stm32-booster.c
+++ b/drivers/regulator/stm32-booster.c
@@ -20,7 +20,6 @@
#define STM32MP1_SYSCFG_EN_BOOSTER_MASK BIT(8)
static const struct regulator_ops stm32h7_booster_ops = {
- .list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -31,7 +30,6 @@ static const struct regulator_desc stm32h7_booster_desc = {
.supply_name = "vdda",
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
- .min_uV = 3300000,
.fixed_uV = 3300000,
.ramp_delay = 66000, /* up to 50us to stabilize */
.ops = &stm32h7_booster_ops,
@@ -53,7 +51,6 @@ static int stm32mp1_booster_disable(struct regulator_dev *rdev)
}
static const struct regulator_ops stm32mp1_booster_ops = {
- .list_voltage = regulator_list_voltage_linear,
.enable = stm32mp1_booster_enable,
.disable = stm32mp1_booster_disable,
.is_enabled = regulator_is_enabled_regmap,
@@ -64,7 +61,6 @@ static const struct regulator_desc stm32mp1_booster_desc = {
.supply_name = "vdda",
.n_voltages = 1,
.type = REGULATOR_VOLTAGE,
- .min_uV = 3300000,
.fixed_uV = 3300000,
.ramp_delay = 66000,
.ops = &stm32mp1_booster_ops,
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
new file mode 100644
index 000000000000..92adb4f3ee19
--- /dev/null
+++ b/drivers/regulator/sy8824x.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SY8824C/SY8824E regulator driver
+//
+// Copyright (C) 2019 Synaptics Incorporated
+//
+// Author: Jisheng Zhang <jszhang@kernel.org>
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define SY8824C_BUCK_EN (1 << 7)
+#define SY8824C_MODE (1 << 6)
+
+struct sy8824_config {
+ /* registers */
+ unsigned int vol_reg;
+ unsigned int mode_reg;
+ unsigned int enable_reg;
+ /* Voltage range and step(linear) */
+ unsigned int vsel_min;
+ unsigned int vsel_step;
+ unsigned int vsel_count;
+};
+
+struct sy8824_device_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_init_data *regulator;
+ const struct sy8824_config *cfg;
+};
+
+static int sy8824_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct sy8824_device_info *di = rdev_get_drvdata(rdev);
+ const struct sy8824_config *cfg = di->cfg;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ regmap_update_bits(rdev->regmap, cfg->mode_reg,
+ SY8824C_MODE, SY8824C_MODE);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ regmap_update_bits(rdev->regmap, cfg->mode_reg,
+ SY8824C_MODE, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int sy8824_get_mode(struct regulator_dev *rdev)
+{
+ struct sy8824_device_info *di = rdev_get_drvdata(rdev);
+ const struct sy8824_config *cfg = di->cfg;
+ u32 val;
+ int ret = 0;
+
+ ret = regmap_read(rdev->regmap, cfg->mode_reg, &val);
+ if (ret < 0)
+ return ret;
+ if (val & SY8824C_MODE)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops sy8824_regulator_ops = {
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .map_voltage = regulator_map_voltage_linear,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = sy8824_set_mode,
+ .get_mode = sy8824_get_mode,
+};
+
+static int sy8824_regulator_register(struct sy8824_device_info *di,
+ struct regulator_config *config)
+{
+ struct regulator_desc *rdesc = &di->desc;
+ const struct sy8824_config *cfg = di->cfg;
+ struct regulator_dev *rdev;
+
+ rdesc->name = "sy8824-reg";
+ rdesc->supply_name = "vin";
+ rdesc->ops = &sy8824_regulator_ops;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->n_voltages = cfg->vsel_count;
+ rdesc->enable_reg = cfg->enable_reg;
+ rdesc->enable_mask = SY8824C_BUCK_EN;
+ rdesc->min_uV = cfg->vsel_min;
+ rdesc->uV_step = cfg->vsel_step;
+ rdesc->vsel_reg = cfg->vol_reg;
+ rdesc->vsel_mask = cfg->vsel_count - 1;
+ rdesc->owner = THIS_MODULE;
+
+ rdev = devm_regulator_register(di->dev, &di->desc, config);
+ return PTR_ERR_OR_ZERO(rdev);
+}
+
+static const struct regmap_config sy8824_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int sy8824_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct sy8824_device_info *di;
+ struct regulator_config config = { };
+ struct regmap *regmap;
+ int ret;
+
+ di = devm_kzalloc(dev, sizeof(struct sy8824_device_info), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ di->regulator = of_get_regulator_init_data(dev, np, &di->desc);
+ if (!di->regulator) {
+ dev_err(dev, "Platform data not found!\n");
+ return -EINVAL;
+ }
+
+ di->dev = dev;
+ di->cfg = of_device_get_match_data(dev);
+
+ regmap = devm_regmap_init_i2c(client, &sy8824_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+ i2c_set_clientdata(client, di);
+
+ config.dev = di->dev;
+ config.init_data = di->regulator;
+ config.regmap = regmap;
+ config.driver_data = di;
+ config.of_node = np;
+
+ ret = sy8824_regulator_register(di, &config);
+ if (ret < 0)
+ dev_err(dev, "Failed to register regulator!\n");
+ return ret;
+}
+
+static const struct sy8824_config sy8824c_cfg = {
+ .vol_reg = 0x00,
+ .mode_reg = 0x00,
+ .enable_reg = 0x00,
+ .vsel_min = 762500,
+ .vsel_step = 12500,
+ .vsel_count = 64,
+};
+
+static const struct sy8824_config sy8824e_cfg = {
+ .vol_reg = 0x00,
+ .mode_reg = 0x00,
+ .enable_reg = 0x00,
+ .vsel_min = 700000,
+ .vsel_step = 12500,
+ .vsel_count = 64,
+};
+
+static const struct sy8824_config sy20276_cfg = {
+ .vol_reg = 0x00,
+ .mode_reg = 0x01,
+ .enable_reg = 0x01,
+ .vsel_min = 600000,
+ .vsel_step = 10000,
+ .vsel_count = 128,
+};
+
+static const struct sy8824_config sy20278_cfg = {
+ .vol_reg = 0x00,
+ .mode_reg = 0x01,
+ .enable_reg = 0x01,
+ .vsel_min = 762500,
+ .vsel_step = 12500,
+ .vsel_count = 64,
+};
+
+static const struct of_device_id sy8824_dt_ids[] = {
+ {
+ .compatible = "silergy,sy8824c",
+ .data = &sy8824c_cfg
+ },
+ {
+ .compatible = "silergy,sy8824e",
+ .data = &sy8824e_cfg
+ },
+ {
+ .compatible = "silergy,sy20276",
+ .data = &sy20276_cfg
+ },
+ {
+ .compatible = "silergy,sy20278",
+ .data = &sy20278_cfg
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sy8824_dt_ids);
+
+static const struct i2c_device_id sy8824_id[] = {
+ { "sy8824", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sy8824_id);
+
+static struct i2c_driver sy8824_regulator_driver = {
+ .driver = {
+ .name = "sy8824-regulator",
+ .of_match_table = of_match_ptr(sy8824_dt_ids),
+ },
+ .probe = sy8824_i2c_probe,
+ .id_table = sy8824_id,
+};
+module_i2c_driver(sy8824_regulator_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_DESCRIPTION("SY8824C/SY8824E regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index 6e22f5ebba2e..e302bd01a084 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -138,7 +138,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
rpdata->en_gpiod = devm_fwnode_get_index_gpiod_from_child(tps->dev,
"enable", 0, &np->fwnode, 0, "enable");
- if (IS_ERR(rpdata->en_gpiod)) {
+ if (IS_ERR_OR_NULL(rpdata->en_gpiod)) {
ret = PTR_ERR(rpdata->en_gpiod);
/* Ignore the error other than probe defer */
@@ -150,7 +150,7 @@ static int tps65132_of_parse_cb(struct device_node *np,
rpdata->act_dis_gpiod = devm_fwnode_get_index_gpiod_from_child(
tps->dev, "active-discharge", 0,
&np->fwnode, 0, "active-discharge");
- if (IS_ERR(rpdata->act_dis_gpiod)) {
+ if (IS_ERR_OR_NULL(rpdata->act_dis_gpiod)) {
ret = PTR_ERR(rpdata->act_dis_gpiod);
/* Ignore the error other than probe defer */
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6fa15b2d6fb3..866b4dd01da9 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
2500, 2750,
};
+/* 600mV to 1450mV in 12.5 mV steps */
+static const struct regulator_linear_range VDD1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
+};
+
+/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
+static const struct regulator_linear_range VDD2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
+ REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
+};
+
static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
}
static const struct regulator_ops twl4030smps_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+
.set_voltage = twl4030smps_set_voltage,
.get_voltage = twl4030smps_get_voltage,
};
@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
}, \
}
-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
+ n_volt) \
static const struct twlreg_info TWL4030_INFO_##label = { \
.base = offset, \
.id = num, \
@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
.of_map_mode = twl4030reg_map_mode, \
+ .n_voltages = n_volt, \
+ .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
+ .linear_ranges = label ## _ranges, \
}, \
}
@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
+TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
+TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
/* VUSBCP is managed *only* by the USB subchip */
TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 5fe208b381eb..b8100c3cedad 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -57,6 +57,9 @@ struct twlreg_info {
#define VREG_BC_PROC 3
#define VREG_BC_CLK_RST 4
+/* TWL6030 LDO register values for VREG_VOLTAGE */
+#define TWL6030_VREG_VOLTAGE_WR_S BIT(7)
+
/* TWL6030 LDO register values for CFG_STATE */
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
@@ -68,9 +71,10 @@ struct twlreg_info {
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
-/* Flags for SMPS Voltage reading */
+/* Flags for SMPS Voltage reading and LDO reading*/
#define SMPS_OFFSET_EN BIT(0)
#define SMPS_EXTENDED_EN BIT(1)
+#define TWL_6030_WARM_RESET BIT(3)
/* twl6032 SMPS EPROM values */
#define TWL6030_SMPS_OFFSET 0xB0
@@ -250,6 +254,9 @@ twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
+ if (info->flags & TWL_6030_WARM_RESET)
+ selector |= TWL6030_VREG_VOLTAGE_WR_S;
+
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
selector);
}
@@ -259,6 +266,9 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
+ if (info->flags & TWL_6030_WARM_RESET)
+ vsel &= ~TWL6030_VREG_VOLTAGE_WR_S;
+
return vsel;
}
@@ -665,14 +675,14 @@ static int twlreg_probe(struct platform_device *pdev)
struct regulation_constraints *c;
struct regulator_dev *rdev;
struct regulator_config config = { };
+ struct device_node *np = pdev->dev.of_node;
template = of_device_get_match_data(&pdev->dev);
if (!template)
return -ENODEV;
id = template->desc.id;
- initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
- &template->desc);
+ initdata = of_get_regulator_init_data(&pdev->dev, np, &template->desc);
if (!initdata)
return -EINVAL;
@@ -710,10 +720,13 @@ static int twlreg_probe(struct platform_device *pdev)
break;
}
+ if (of_get_property(np, "ti,retain-on-reset", NULL))
+ info->flags |= TWL_6030_WARM_RESET;
+
config.dev = &pdev->dev;
config.init_data = initdata;
config.driver_data = info;
- config.of_node = pdev->dev.of_node;
+ config.of_node = np;
rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 9026d5a3e964..2311924c3103 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -186,6 +186,10 @@ static const struct of_device_id uniphier_regulator_match[] = {
.data = &uniphier_pro4_usb3_data,
},
{
+ .compatible = "socionext,uniphier-pro5-usb3-regulator",
+ .data = &uniphier_pro4_usb3_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-usb3-regulator",
.data = &uniphier_pxs2_usb3_data,
},
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 21efb7d39d62..7b07281aa0ae 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -116,9 +116,20 @@ config RESET_QCOM_PDC
to control reset signals provided by PDC for Modem, Compute,
Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS.
+config RESET_SCMI
+ tristate "Reset driver controlled via ARM SCMI interface"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ default ARM_SCMI_PROTOCOL
+ help
+ This driver provides support for reset signal/domains that are
+ controlled by firmware that implements the SCMI interface.
+
+ This driver uses SCMI Message Protocol to interact with the
+ firmware controlling all the reset signals.
+
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST
- default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN
+ default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN || ARC
help
This enables a simple reset controller driver for reset lines that
that can be asserted and deasserted by toggling bits in a contiguous,
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 61456b8f659c..cf60ce526064 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
+obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 3ecd770f910b..1443a55a0c29 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -169,9 +169,9 @@ static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
[IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
- [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
- [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
- [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
+ [IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
+ [IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
[IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
BIT(2) | BIT(1) },
[IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
@@ -220,9 +220,9 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
- case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
value = assert ? 0 : bit;
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 5242e0679df7..7d05d766e1ea 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -1,58 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Amlogic Meson Reset Controller driver
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
new file mode 100644
index 000000000000..c6d3c8427f14
--- /dev/null
+++ b/drivers/reset/reset-scmi.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM System Control and Management Interface (ARM SCMI) reset driver
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/reset-controller.h>
+#include <linux/scmi_protocol.h>
+
+/**
+ * struct scmi_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @handle: ARM SCMI handle used for communication with system controller
+ */
+struct scmi_reset_data {
+ struct reset_controller_dev rcdev;
+ const struct scmi_handle *handle;
+};
+
+#define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev)
+#define to_scmi_handle(p) (to_scmi_reset_data(p)->handle)
+
+/**
+ * scmi_reset_assert() - assert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be asserted
+ *
+ * This function implements the reset driver op to assert a device's reset
+ * using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_handle *handle = to_scmi_handle(rcdev);
+
+ return handle->reset_ops->assert(handle, id);
+}
+
+/**
+ * scmi_reset_deassert() - deassert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be deasserted
+ *
+ * This function implements the reset driver op to deassert a device's reset
+ * using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_handle *handle = to_scmi_handle(rcdev);
+
+ return handle->reset_ops->deassert(handle, id);
+}
+
+/**
+ * scmi_reset_reset() - reset the device
+ * @rcdev: reset controller entity
+ * @id: ID of the reset signal to be reset(assert + deassert)
+ *
+ * This function implements the reset driver op to trigger a device's
+ * reset signal using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_handle *handle = to_scmi_handle(rcdev);
+
+ return handle->reset_ops->reset(handle, id);
+}
+
+static const struct reset_control_ops scmi_reset_ops = {
+ .assert = scmi_reset_assert,
+ .deassert = scmi_reset_deassert,
+ .reset = scmi_reset_reset,
+};
+
+static int scmi_reset_probe(struct scmi_device *sdev)
+{
+ struct scmi_reset_data *data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle || !handle->reset_ops)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->rcdev.ops = &scmi_reset_ops;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.of_node = np;
+ data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle);
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_RESET },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_reset_driver = {
+ .name = "scmi-reset",
+ .probe = scmi_reset_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_reset_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI reset controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 1154f7b1f4dd..067e7e7b34f1 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -127,6 +127,9 @@ static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "aspeed,ast2500-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
+ { .compatible = "snps,dw-high-reset" },
+ { .compatible = "snps,dw-low-reset",
+ .data = &reset_simple_active_low },
{ /* sentinel */ },
};
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 72b7ddc43116..c93ef33b01d3 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -663,21 +663,12 @@ void rtc_update_irq(struct rtc_device *rtc,
}
EXPORT_SYMBOL_GPL(rtc_update_irq);
-static int __rtc_match(struct device *dev, const void *data)
-{
- const char *name = data;
-
- if (strcmp(dev_name(dev), name) == 0)
- return 1;
- return 0;
-}
-
struct rtc_device *rtc_class_open(const char *name)
{
struct device *dev;
struct rtc_device *rtc = NULL;
- dev = class_find_device(rtc_class, NULL, name, __rtc_match);
+ dev = class_find_device_by_name(rtc_class, name);
if (dev)
rtc = to_rtc_device(dev);
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 225a8df1d4e9..367497914c10 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -14,7 +14,7 @@
*/
/*
* It would be more efficient to use i2c msgs/i2c_transfer directly but, as
- * recommened in .../Documentation/i2c/writing-clients section
+ * recommended in .../Documentation/i2c/writing-clients.rst section
* "Sending and receiving", using SMBus level communication is preferred.
*/
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index a863b0462b43..cde73b6a9afb 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -4,6 +4,3 @@
#
obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
-
-drivers-y += drivers/s390/built-in.a
-
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b9ce93e9df89..99f86612f775 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format;
char msg_no;
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
- do {
- rc = dasd_sleep_on(cqr);
- if (rc && suborder_not_supported(cqr))
- return -EOPNOTSUPP;
- } while (rc && (cqr->retries > 0));
- if (rc) {
+ rc = dasd_sleep_on(cqr);
+ if (rc && !suborder_not_supported(cqr)) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index b8a8816d94e7..845e12ac5954 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -49,6 +49,3 @@ obj-$(CONFIG_CRASH_DUMP) += sclp_sdias.o zcore.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o
-
-chkbss := sclp_early_core.o
-include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 8c9d412b6d33..e7cf0a1d4f71 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
}
if (dstat == 0x08)
break;
+ /* else, fall through */
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index e71992a3c55f..cc5e84b80c69 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -40,7 +40,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
- sclp.has_sipl = !!(sccb->cbl & 0x02);
+ sclp.has_sipl = !!(sccb->cbl & 0x4000);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
if (sccb->fac91 & 0x40)
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8d3370da2dfc..3e0b2f63a9d2 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
+ /* fallthrough */
case TS_NOT_OPER:
/*
* Nothing to do.
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
break;
if (device->tape_state == TS_UNUSED)
break;
+ /* fallthrough */
default:
if (device->tape_state == TS_BLKUSE)
break;
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
+ /* fallthrough */
case -EIO:
__tape_end_request(device, request, -EIO);
break;
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 0fa1b6b1491a..9e066281e2d0 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -43,6 +43,8 @@ static struct cma *vmcp_cma;
static int __init early_parse_vmcp_cma(char *p)
{
+ if (!p)
+ return 1;
vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE);
return 0;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index c522e9313c50..0005ec9285aa 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -581,11 +581,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_register);
-static int __ccwgroup_match_all(struct device *dev, const void *data)
-{
- return 1;
-}
-
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
@@ -597,8 +592,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
- while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
- __ccwgroup_match_all))) {
+ while ((dev = driver_find_next_device(&cdriver->driver, NULL))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
ccwgroup_ungroup(gdev);
@@ -608,13 +602,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
-static int __ccwgroupdev_check_busid(struct device *dev, const void *id)
-{
- const char *bus_id = id;
-
- return (strcmp(bus_id, dev_name(dev)) == 0);
-}
-
/**
* get_ccwgroupdev_by_busid() - obtain device from a bus id
* @gdrv: driver the device is owned by
@@ -631,8 +618,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
{
struct device *dev;
- dev = driver_find_device(&gdrv->driver, NULL, bus_id,
- __ccwgroupdev_check_busid);
+ dev = driver_find_device_by_name(&gdrv->driver, bus_id);
return dev ? to_ccwgroupdev(dev) : NULL;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index c421899be20f..131430bd48d9 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1695,18 +1695,6 @@ int ccw_device_force_console(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(ccw_device_force_console);
#endif
-/*
- * get ccw_device matching the busid, but only if owned by cdrv
- */
-static int
-__ccwdev_check_busid(struct device *dev, const void *id)
-{
- const char *bus_id = id;
-
- return (strcmp(bus_id, dev_name(dev)) == 0);
-}
-
-
/**
* get_ccwdev_by_busid() - obtain device from a bus id
* @cdrv: driver the device is owned by
@@ -1723,8 +1711,7 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
{
struct device *dev;
- dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
- __ccwdev_check_busid);
+ dev = driver_find_device_by_name(&cdrv->driver, bus_id);
return dev ? to_ccwdev(dev) : NULL;
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 730c4e68094b..4142c85e77d8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
int retries = 0, cc;
unsigned long laob = 0;
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
- !q->u.out.use_cq));
- if (q->u.out.use_cq && aob != 0) {
+ if (aob) {
fc = QDIO_SIGA_WRITEQ;
laob = aob;
}
@@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
{
unsigned long phys_aob = 0;
- if (!q->use_cq)
- return 0;
-
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
@@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
for_each_output_queue(irq_ptr, q, i) {
if (use_cq) {
+ if (multicast_outbound(q))
+ continue;
if (qdio_enable_async_operation(&q->u.out) < 0) {
use_cq = 0;
continue;
@@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* One SIGA-W per buffer required for unicast HSI */
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+ if (q->u.out.use_cq)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
} else {
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q, 0);
- else
- qperf_inc(q, fast_requeue);
+ rc = qdio_kick_outbound_q(q, 0);
}
/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 8c1d2357ef5b..7a838e3d7c0f 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
}
-const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
.read = vfio_ccw_async_region_read,
.write = vfio_ccw_async_region_write,
.release = vfio_ccw_async_region_release,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 1d4c893ead23..3645d1720c4b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
sizeof(*pa->pa_iova_pfn) +
sizeof(*pa->pa_pfn),
GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn))
+ if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_nr = 0;
return -ENOMEM;
+ }
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
@@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len;
+ int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
@@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
- return ccwchain_loop_tic(chain, cp);
+ ret = ccwchain_loop_tic(chain, cp);
+
+ if (ret)
+ ccwchain_free(chain);
+
+ return ret;
}
/* Loop for TICs. */
@@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
- if (ret)
- cp_free(cp);
-
- /* It is safe to force: if not set but idals used
- * ccwchain_calc_length returns an error.
- */
- cp->orb.cmd.c64 = 1;
- if (!ret)
+ if (!ret) {
cp->initialized = true;
+ /* It is safe to force: if it was not set but idals used
+ * ccwchain_calc_length would have returned an error.
+ */
+ cp->orb.cmd.c64 = 1;
+ }
+
return ret;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 2b90a5ecaeb9..e401a3d0aa57 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -27,6 +27,9 @@ struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
static struct kmem_cache *vfio_ccw_cmd_region;
+debug_info_t *vfio_ccw_debug_msg_id;
+debug_info_t *vfio_ccw_debug_trace_id;
+
/*
* Helpers
*/
@@ -88,7 +91,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- if (is_final)
+ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
cp_free(&private->cp);
}
mutex_lock(&private->io_mutex);
@@ -164,6 +167,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_disable;
+ VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
+ sch->schid.cssid, sch->schid.ssid,
+ sch->schid.sch_no);
return 0;
out_disable:
@@ -194,6 +200,9 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
kfree(private->cp.guest_cp);
kfree(private);
+ VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
+ sch->schid.cssid, sch->schid.ssid,
+ sch->schid.sch_no);
return 0;
}
@@ -263,27 +272,64 @@ static struct css_driver vfio_ccw_sch_driver = {
.sch_event = vfio_ccw_sch_event,
};
+static int __init vfio_ccw_debug_init(void)
+{
+ vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
+ 11 * sizeof(long));
+ if (!vfio_ccw_debug_msg_id)
+ goto out_unregister;
+ debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(vfio_ccw_debug_msg_id, 2);
+ vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
+ if (!vfio_ccw_debug_trace_id)
+ goto out_unregister;
+ debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
+ debug_set_level(vfio_ccw_debug_trace_id, 2);
+ return 0;
+
+out_unregister:
+ debug_unregister(vfio_ccw_debug_msg_id);
+ debug_unregister(vfio_ccw_debug_trace_id);
+ return -1;
+}
+
+static void vfio_ccw_debug_exit(void)
+{
+ debug_unregister(vfio_ccw_debug_msg_id);
+ debug_unregister(vfio_ccw_debug_trace_id);
+}
+
static int __init vfio_ccw_sch_init(void)
{
- int ret = -ENOMEM;
+ int ret;
+
+ ret = vfio_ccw_debug_init();
+ if (ret)
+ return ret;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
- if (!vfio_ccw_work_q)
- return -ENOMEM;
+ if (!vfio_ccw_work_q) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
- if (!vfio_ccw_io_region)
+ if (!vfio_ccw_io_region) {
+ ret = -ENOMEM;
goto out_err;
+ }
vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
sizeof(struct ccw_cmd_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_cmd_region), NULL);
- if (!vfio_ccw_cmd_region)
+ if (!vfio_ccw_cmd_region) {
+ ret = -ENOMEM;
goto out_err;
+ }
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
@@ -298,6 +344,7 @@ out_err:
kmem_cache_destroy(vfio_ccw_cmd_region);
kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
+ vfio_ccw_debug_exit();
return ret;
}
@@ -308,6 +355,7 @@ static void __exit vfio_ccw_sch_exit(void)
kmem_cache_destroy(vfio_ccw_io_region);
kmem_cache_destroy(vfio_ccw_cmd_region);
destroy_workqueue(vfio_ccw_work_q);
+ vfio_ccw_debug_exit();
}
module_init(vfio_ccw_sch_init);
module_exit(vfio_ccw_sch_exit);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 49d9d3da0282..4a1e727c62d9 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -37,9 +37,14 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
goto out;
}
+ VFIO_CCW_TRACE_EVENT(5, "stIO");
+ VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
+
/* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb);
+ VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
+
switch (ccode) {
case 0:
/*
@@ -86,9 +91,14 @@ static int fsm_do_halt(struct vfio_ccw_private *private)
spin_lock_irqsave(sch->lock, flags);
+ VFIO_CCW_TRACE_EVENT(2, "haltIO");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
/* Issue "Halt Subchannel" */
ccode = hsch(sch->schid);
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
+
switch (ccode) {
case 0:
/*
@@ -122,9 +132,14 @@ static int fsm_do_clear(struct vfio_ccw_private *private)
spin_lock_irqsave(sch->lock, flags);
+ VFIO_CCW_TRACE_EVENT(2, "clearIO");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
/* Issue "Clear Subchannel" */
ccode = csch(sch->schid);
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
+
switch (ccode) {
case 0:
/*
@@ -149,6 +164,9 @@ static void fsm_notoper(struct vfio_ccw_private *private,
{
struct subchannel *sch = private->sch;
+ VFIO_CCW_TRACE_EVENT(2, "notoper");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
/*
* TODO:
* Probably we should send the machine check to the guest.
@@ -229,6 +247,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct ccw_io_region *io_region = private->io_region;
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
+ struct subchannel_id schid = get_schid(private);
private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
@@ -239,18 +258,32 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Don't try to build a cp if transport mode is specified. */
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): transport mode\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
}
io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
orb);
if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): cp_init=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
errstr = "cp init";
goto err_out;
}
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
errstr = "cp prefetch";
cp_free(&private->cp);
goto err_out;
@@ -259,23 +292,36 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Start channel program and wait for I/O interrupt. */
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
errstr = "cp fsm_io_helper";
cp_free(&private->cp);
goto err_out;
}
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): halt on io_region\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): clear on io_region\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
}
err_out:
- trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+ trace_vfio_ccw_io_fctl(scsw->cmd.fctl, schid,
io_region->ret_code, errstr);
}
@@ -308,6 +354,9 @@ static void fsm_irq(struct vfio_ccw_private *private,
{
struct irb *irb = this_cpu_ptr(&cio_irb);
+ VFIO_CCW_TRACE_EVENT(6, "IRQ");
+ VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
+
memcpy(&private->irb, irb, sizeof(*irb));
queue_work(vfio_ccw_work_q, &private->io_work);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 5eb61116ca6f..f0d71ab77c50 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -124,6 +124,11 @@ static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
private->mdev = mdev;
private->state = VFIO_CCW_STATE_IDLE;
+ VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
+ mdev_uuid(mdev), private->sch->schid.cssid,
+ private->sch->schid.ssid,
+ private->sch->schid.sch_no);
+
return 0;
}
@@ -132,6 +137,11 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
+ VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
+ mdev_uuid(mdev), private->sch->schid.cssid,
+ private->sch->schid.ssid,
+ private->sch->schid.sch_no);
+
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
if (!vfio_ccw_sch_quiesce(private->sch))
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index f1092c3dc1b1..bbe9babf767b 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -17,6 +17,7 @@
#include <linux/eventfd.h>
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
+#include <asm/debug.h>
#include "css.h"
#include "vfio_ccw_cp.h"
@@ -139,4 +140,20 @@ static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
extern struct workqueue_struct *vfio_ccw_work_q;
+
+/* s390 debug feature, similar to base cio */
+extern debug_info_t *vfio_ccw_debug_msg_id;
+extern debug_info_t *vfio_ccw_debug_trace_id;
+
+#define VFIO_CCW_TRACE_EVENT(imp, txt) \
+ debug_text_event(vfio_ccw_debug_trace_id, imp, txt)
+
+#define VFIO_CCW_MSG_EVENT(imp, args...) \
+ debug_sprintf_event(vfio_ccw_debug_msg_id, imp, ##args)
+
+static inline void VFIO_CCW_HEX_EVENT(int level, void *data, int length)
+{
+ debug_event(vfio_ccw_debug_trace_id, level, data, length);
+}
+
#endif
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 6ccd93d0b1cb..52aa95c8af4b 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,7 @@ ap-objs := ap_bus.o ap_card.o ap_queue.o
obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 5ea83dc4f1d7..dad2be333d82 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
+ /* fall through */
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 7f418d2d8cdf..f76a1d0f54c4 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2,7 +2,7 @@
/*
* pkey device driver
*
- * Copyright IBM Corp. 2017
+ * Copyright IBM Corp. 2017,2019
* Author(s): Harald Freudenberger
*/
@@ -24,16 +24,14 @@
#include <crypto/aes.h>
#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface");
-/* Size of parameter block used for all cca requests/replies */
-#define PARMBSIZE 512
-
-/* Size of vardata block used for some of the cca requests/replies */
-#define VARDATASIZE 4096
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
/* mask of available pckmo subfunctions, fetched once at module init */
static cpacf_mask_t pckmo_functions;
@@ -62,40 +60,6 @@ static void __exit pkey_debug_exit(void)
debug_unregister(debug_info);
}
-/* Key token types */
-#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
-
-/* For TOKTYPE_NON_CCA: */
-#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
-
-/* For TOKTYPE_CCA_INTERNAL: */
-#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
-
-/* header part of a key token */
-struct keytoken_header {
- u8 type; /* one of the TOKTYPE values */
- u8 res0[3];
- u8 version; /* one of the TOKVER values */
- u8 res1[3];
-} __packed;
-
-/* inside view of a secure key token (only type 0x01 version 0x04) */
-struct secaeskeytoken {
- u8 type; /* 0x01 for internal key token */
- u8 res0[3];
- u8 version; /* should be 0x04 */
- u8 res1[1];
- u8 flag; /* key flags */
- u8 res2[1];
- u64 mkvp; /* master key verification pattern */
- u8 key[32]; /* key value (encrypted) */
- u8 cv[8]; /* control vector */
- u16 bitsize; /* key bit size */
- u16 keysize; /* key byte size */
- u8 tvv[4]; /* token validation value */
-} __packed;
-
/* inside view of a protected key token (only type 0x00 version 0x01) */
struct protaeskeytoken {
u8 type; /* 0x00 for PAES specific key tokens */
@@ -108,557 +72,11 @@ struct protaeskeytoken {
} __packed;
/*
- * Simple check if the token is a valid CCA secure AES key
- * token. If keybitsize is given, the bitsize of the key is
- * also checked. Returns 0 on success or errno value on failure.
- */
-static int check_secaeskeytoken(const u8 *token, int keybitsize)
-{
- struct secaeskeytoken *t = (struct secaeskeytoken *) token;
-
- if (t->type != TOKTYPE_CCA_INTERNAL) {
- DEBUG_ERR(
- "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
- return -EINVAL;
- }
- if (t->version != TOKVER_CCA_AES) {
- DEBUG_ERR(
- "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->version, TOKVER_CCA_AES);
- return -EINVAL;
- }
- if (keybitsize > 0 && t->bitsize != keybitsize) {
- DEBUG_ERR(
- "%s secure token check failed, bitsize mismatch %d != %d\n",
- __func__, (int) t->bitsize, keybitsize);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block and fill in values
- * for the common fields. Returns 0 on success or errno value
- * on failure.
- */
-static int alloc_and_prep_cprbmem(size_t paramblen,
- u8 **pcprbmem,
- struct CPRBX **preqCPRB,
- struct CPRBX **prepCPRB)
-{
- u8 *cprbmem;
- size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
- struct CPRBX *preqcblk, *prepcblk;
-
- /*
- * allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block
- */
- cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
- if (!cprbmem)
- return -ENOMEM;
-
- preqcblk = (struct CPRBX *) cprbmem;
- prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
-
- /* fill request cprb struct */
- preqcblk->cprb_len = sizeof(struct CPRBX);
- preqcblk->cprb_ver_id = 0x02;
- memcpy(preqcblk->func_id, "T2", 2);
- preqcblk->rpl_msgbl = cprbplusparamblen;
- if (paramblen) {
- preqcblk->req_parmb =
- ((u8 *) preqcblk) + sizeof(struct CPRBX);
- preqcblk->rpl_parmb =
- ((u8 *) prepcblk) + sizeof(struct CPRBX);
- }
-
- *pcprbmem = cprbmem;
- *preqCPRB = preqcblk;
- *prepCPRB = prepcblk;
-
- return 0;
-}
-
-/*
- * Free the cprb memory allocated with the function above.
- * If the scrub value is not zero, the memory is filled
- * with zeros before freeing (useful if there was some
- * clear key material in there).
- */
-static void free_cprbmem(void *mem, size_t paramblen, int scrub)
-{
- if (scrub)
- memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
- kfree(mem);
-}
-
-/*
- * Helper function to prepare the xcrb struct
- */
-static inline void prep_xcrb(struct ica_xcRB *pxcrb,
- u16 cardnr,
- struct CPRBX *preqcblk,
- struct CPRBX *prepcblk)
-{
- memset(pxcrb, 0, sizeof(*pxcrb));
- pxcrb->agent_ID = 0x4341; /* 'CA' */
- pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
- pxcrb->request_control_blk_length =
- preqcblk->cprb_len + preqcblk->req_parml;
- pxcrb->request_control_blk_addr = (void __user *) preqcblk;
- pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
- pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
-}
-
-/*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
-{
- int rc;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- rc = zcrypt_send_cprb(xcrb);
- set_fs(old_fs);
-
- return rc;
-}
-
-/*
- * Generate (random) AES secure key.
- */
-int pkey_genseckey(u16 cardnr, u16 domain,
- u32 keytype, struct pkey_seckey *seckey)
-{
- int i, rc, keysize;
- int seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct kgreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- char key_form[8];
- char key_length[8];
- char key_type1[8];
- char key_type2[8];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid[6];
- } lv2;
- } *preqparm;
- struct kgrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with KG request */
- preqparm = (struct kgreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "KG", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- memcpy(preqparm->lv1.key_form, "OP ", 8);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
- preqparm->lv2.len = sizeof(struct lv2);
- for (i = 0; i < 6; i++) {
- preqparm->lv2.keyid[i].len = sizeof(struct keyid);
- preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
- }
- preqcblk->req_parml = sizeof(struct kgreqparm);
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s secure key generate failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
-
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
- if (rc) {
- rc = -EIO;
- goto out;
- }
-
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
- return rc;
-}
-EXPORT_SYMBOL(pkey_genseckey);
-
-/*
- * Generate an AES secure key with given key value.
- */
-int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_seckey *seckey)
-{
- int rc, keysize, seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct cmreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 clrkey[0];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid;
- } lv2;
- } *preqparm;
- struct lv2 *plv2;
- struct cmrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with CM request */
- preqparm = (struct cmreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "CM", 2);
- memcpy(preqparm->rule_array, "AES ", 8);
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- preqparm->lv1.len = sizeof(struct lv1) + keysize;
- memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
- plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
- plv2->len = sizeof(struct lv2);
- plv2->keyid.len = sizeof(struct keyid);
- plv2->keyid.attr = 0x30;
- preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s clear key import failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
-
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
- if (rc) {
- rc = -EIO;
- goto out;
- }
-
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 1);
- return rc;
-}
-EXPORT_SYMBOL(pkey_clr2seckey);
-
-/*
- * Derive a proteced key from the secure key blob.
- */
-int pkey_sec2protkey(u16 cardnr, u16 domain,
- const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
-{
- int rc;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct uskreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- } lv1;
- struct lv2 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- u8 token[0]; /* cca secure key token */
- } lv2 __packed;
- } *preqparm;
- struct uskrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- struct cpacfkeyblock {
- u8 version; /* version of this struct */
- u8 flags[2];
- u8 algo;
- u8 form;
- u8 pad1[3];
- u16 keylen;
- u8 key[64]; /* the key (keylen bytes) */
- u16 keyattrlen;
- u8 keyattr[32];
- u8 pad2[1];
- u8 vptype;
- u8 vp[32]; /* verification pattern */
- } keyblock;
- } lv3 __packed;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with USK request */
- preqparm = (struct uskreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "US", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
- preqparm->lv1.attr_flags = 0x0001;
- preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_len = sizeof(struct lv2)
- - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_flags = 0x0000;
- memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
- preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
- if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
-
- /* check the returned keyblock */
- if (prepparm->lv3.keyblock.version != 0x01) {
- DEBUG_ERR(
- "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->lv3.keyblock.version);
- rc = -EIO;
- goto out;
- }
-
- /* copy the tanslated protected key */
- switch (prepparm->lv3.keyblock.keylen) {
- case 16+32:
- protkey->type = PKEY_KEYTYPE_AES_128;
- break;
- case 24+32:
- protkey->type = PKEY_KEYTYPE_AES_192;
- break;
- case 32+32:
- protkey->type = PKEY_KEYTYPE_AES_256;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
- __func__, prepparm->lv3.keyblock.keylen);
- rc = -EIO;
- goto out;
- }
- protkey->len = prepparm->lv3.keyblock.keylen;
- memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
- return rc;
-}
-EXPORT_SYMBOL(pkey_sec2protkey);
-
-/*
* Create a protected key from a clear key value.
*/
-int pkey_clr2protkey(u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_protkey *protkey)
+static int pkey_clr2protkey(u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_protkey *protkey)
{
long fc;
int keysize;
@@ -707,338 +125,43 @@ int pkey_clr2protkey(u32 keytype,
return 0;
}
-EXPORT_SYMBOL(pkey_clr2protkey);
-
-/*
- * query cryptographic facility from adapter
- */
-static int query_crypto_facility(u16 cardnr, u16 domain,
- const char *keyword,
- u8 *rarray, size_t *rarraylen,
- u8 *varray, size_t *varraylen)
-{
- int rc;
- u16 len;
- u8 *mem, *ptr;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct fqreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 data[VARDATASIZE];
- } lv1;
- u16 dummylen;
- } *preqparm;
- size_t parmbsize = sizeof(struct fqreqparm);
- struct fqrepparm {
- u8 subfunc_code[2];
- u8 lvdata[0];
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with FQ request */
- preqparm = (struct fqreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "FQ", 2);
- memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- preqparm->lv1.len = sizeof(preqparm->lv1);
- preqparm->dummylen = sizeof(preqparm->dummylen);
- preqcblk->req_parml = parmbsize;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
-
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
- ptr = prepparm->lvdata;
-
- /* check and possibly copy reply rule array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (rarray && rarraylen && *rarraylen > 0) {
- *rarraylen = (len > *rarraylen ? *rarraylen : len);
- memcpy(rarray, ptr, *rarraylen);
- }
- ptr += len;
- }
- /* check and possible copy reply var array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (varray && varraylen && *varraylen > 0) {
- *varraylen = (len > *varraylen ? *varraylen : len);
- memcpy(varray, ptr, *varraylen);
- }
- ptr += len;
- }
-
-out:
- free_cprbmem(mem, parmbsize, 0);
- return rc;
-}
-
-/*
- * Fetch the current and old mkvp values via
- * query_crypto_facility from adapter.
- */
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int rc, found = 0;
- size_t rlen, vlen;
- u8 *rarray, *varray, *pg;
-
- pg = (u8 *) __get_free_page(GFP_KERNEL);
- if (!pg)
- return -ENOMEM;
- rarray = pg;
- varray = pg + PAGE_SIZE/2;
- rlen = vlen = PAGE_SIZE/2;
-
- rc = query_crypto_facility(cardnr, domain, "STATICSA",
- rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
- if (rarray[8*8] == '2') {
- /* current master key state is valid */
- mkvp[0] = *((u64 *)(varray + 184));
- mkvp[1] = *((u64 *)(varray + 172));
- found = 1;
- }
- }
-
- free_page((unsigned long) pg);
-
- return found ? 0 : -ENOENT;
-}
-
-/* struct to hold cached mkvp info for each card/domain */
-struct mkvp_info {
- struct list_head list;
- u16 cardnr;
- u16 domain;
- u64 mkvp[2];
-};
-
-/* a list with mkvp_info entries */
-static LIST_HEAD(mkvp_list);
-static DEFINE_SPINLOCK(mkvp_list_lock);
-
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int rc = -ENOENT;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
- rc = 0;
- break;
- }
- }
- spin_unlock_bh(&mkvp_list_lock);
-
- return rc;
-}
-
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int found = 0;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- found = 1;
- break;
- }
- }
- if (!found) {
- ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
- if (!ptr) {
- spin_unlock_bh(&mkvp_list_lock);
- return;
- }
- ptr->cardnr = cardnr;
- ptr->domain = domain;
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- list_add(&ptr->list, &mkvp_list);
- }
- spin_unlock_bh(&mkvp_list_lock);
-}
-
-static void mkvp_cache_scrub(u16 cardnr, u16 domain)
-{
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- list_del(&ptr->list);
- kfree(ptr);
- break;
- }
- }
- spin_unlock_bh(&mkvp_list_lock);
-}
-
-static void __exit mkvp_cache_free(void)
-{
- struct mkvp_info *ptr, *pnext;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
- list_del(&ptr->list);
- kfree(ptr);
- }
- spin_unlock_bh(&mkvp_list_lock);
-}
-
-/*
- * Search for a matching crypto card based on the Master Key
- * Verification Pattern provided inside a secure key.
- */
-int pkey_findcard(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain, int verify)
-{
- struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_status_ext *device_status;
- u16 card, dom;
- u64 mkvp[2];
- int i, rc, oi = -1;
-
- /* mkvp must not be zero */
- if (t->mkvp == 0)
- return -EINVAL;
-
- /* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
- return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
-
- /* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- if (device_status[i].online &&
- device_status[i].functions & 0x04) {
- /* an enabled CCA Coprocessor card */
- /* try cached mkvp */
- if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
- t->mkvp == mkvp[0]) {
- if (!verify)
- break;
- /* verify: fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- }
- }
- } else {
- /* Card is offline and/or not a CCA card. */
- /* del mkvp entry from cache if it exists */
- mkvp_cache_scrub(card, dom);
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT) {
- /* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- if (!(device_status[i].online &&
- device_status[i].functions & 0x04))
- continue;
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- /* fresh fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- if (t->mkvp == mkvp[1] && oi < 0)
- oi = i;
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
- /* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_status[oi].qid);
- dom = AP_QID_QUEUE(device_status[oi].qid);
- }
- }
- if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
- if (pcardnr)
- *pcardnr = card;
- if (pdomain)
- *pdomain = dom;
- rc = 0;
- } else
- rc = -ENODEV;
-
- kfree(device_status);
- return rc;
-}
-EXPORT_SYMBOL(pkey_findcard);
/*
* Find card and transform secure key into protected key.
*/
-int pkey_skey2pkey(const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
+static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
{
- u16 cardnr, domain;
int rc, verify;
+ u16 cardnr, domain;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
/*
- * The pkey_sec2protkey call may fail when a card has been
+ * The cca_xxx2protkey call may fail when a card has been
* addressed where the master key was changed after last fetch
- * of the mkvp into the cache. So first try without verify then
- * with verify enabled (thus refreshing the mkvp for each card).
+ * of the mkvp into the cache. Try 3 times: First witout verify
+ * then with verify and last round with verify and old master
+ * key verification pattern match not ignored.
*/
- for (verify = 0; verify < 2; verify++) {
- rc = pkey_findcard(seckey, &cardnr, &domain, verify);
- if (rc)
+ for (verify = 0; verify < 3; verify++) {
+ rc = cca_findcard(key, &cardnr, &domain, verify);
+ if (rc < 0)
+ continue;
+ if (rc > 0 && verify < 2)
continue;
- rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ rc = cca_sec2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ break;
+ case TOKVER_CCA_VLSC:
+ rc = cca_cipher2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ break;
+ default:
+ return -EINVAL;
+ }
if (rc == 0)
break;
}
@@ -1048,22 +171,20 @@ int pkey_skey2pkey(const struct pkey_seckey *seckey,
return rc;
}
-EXPORT_SYMBOL(pkey_skey2pkey);
/*
* Verify key and give back some info about the key.
*/
-int pkey_verifykey(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain,
- u16 *pkeysize, u32 *pattributes)
+static int pkey_verifykey(const struct pkey_seckey *seckey,
+ u16 *pcardnr, u16 *pdomain,
+ u16 *pkeysize, u32 *pattributes)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
u16 cardnr, domain;
- u64 mkvp[2];
int rc;
/* check the secure key for valid AES secure key */
- rc = check_secaeskeytoken((u8 *) seckey, 0);
+ rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -1072,18 +193,16 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
*pkeysize = t->bitsize;
/* try to find a card which can handle this key */
- rc = pkey_findcard(seckey, &cardnr, &domain, 1);
- if (rc)
+ rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
+ if (rc < 0)
goto out;
- /* check mkvp for old mkvp match */
- rc = mkvp_cache_fetch(cardnr, domain, mkvp);
- if (rc)
- goto out;
- if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) {
+ if (rc > 0) {
+ /* key mkvp matches to old master key mkvp */
DEBUG_DBG("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ rc = 0;
}
if (pcardnr)
@@ -1095,12 +214,11 @@ out:
DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_verifykey);
/*
* Generate a random protected key
*/
-int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
+static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey)
{
struct pkey_clrkey clrkey;
int keysize;
@@ -1135,12 +253,11 @@ int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
return 0;
}
-EXPORT_SYMBOL(pkey_genprotkey);
/*
* Verify if a protected key is still valid
*/
-int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
{
unsigned long fc;
struct {
@@ -1181,12 +298,11 @@ int pkey_verifyprotkey(const struct pkey_protkey *protkey)
return 0;
}
-EXPORT_SYMBOL(pkey_verifyprotkey);
/*
* Transform a non-CCA key token into a protected key
*/
-static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
+static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
@@ -1214,7 +330,7 @@ static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
/*
* Transform a CCA internal key token into a protected key
*/
-static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
+static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
@@ -1223,44 +339,414 @@ static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
case TOKVER_CCA_AES:
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
-
- return pkey_skey2pkey((struct pkey_seckey *)key,
- protkey);
+ break;
+ case TOKVER_CCA_VLSC:
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ break;
default:
DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
+
+ return pkey_skey2pkey(key, protkey);
}
/*
* Transform a key blob (of any type) into a protected key
*/
-int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+int pkey_keyblob2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
+ int rc;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header))
+ if (keylen < sizeof(struct keytoken_header)) {
+ DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
+ }
switch (hdr->type) {
case TOKTYPE_NON_CCA:
- return pkey_nonccatok2pkey(key, keylen, protkey);
+ rc = pkey_nonccatok2pkey(key, keylen, protkey);
+ break;
case TOKTYPE_CCA_INTERNAL:
- return pkey_ccainttok2pkey(key, keylen, protkey);
+ rc = pkey_ccainttok2pkey(key, keylen, protkey);
+ break;
default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
- hdr->type);
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
+
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ return rc;
+
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
+static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_genseckey(card, dom, ksize, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_gencipherkey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, const u8 *clrkey,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_clr2seckey(card, dom, ksize,
+ clrkey, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_clr2cipherkey(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_verifykey2(const u8 *key, size_t keylen,
+ u16 *cardnr, u16 *domain,
+ enum pkey_key_type *ktype,
+ enum pkey_key_size *ksize, u32 *flags)
+{
+ int rc;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header) ||
+ hdr->type != TOKTYPE_CCA_INTERNAL)
+ return -EINVAL;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_DATA;
+ if (ksize)
+ *ksize = (enum pkey_key_size) t->bitsize;
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX3C, t->mkvp, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX3C, 0, t->mkvp, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_CIPHER;
+ if (ksize) {
+ *ksize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *ksize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *ksize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *ksize = PKEY_SIZE_AES_256;
+ }
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX6, t->mkvp0, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX6, 0, t->mkvp0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else
+ rc = -EINVAL;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ struct pkey_protkey *pkey)
+{
+ int i, card, dom, rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ return pkey_nonccatok2pkey(key, keylen, pkey);
+ case TOKTYPE_CCA_INTERNAL:
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ return -EINVAL;
+ break;
+ case TOKVER_CCA_VLSC:
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ break;
+ default:
+ DEBUG_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (hdr->version == TOKVER_CCA_AES)
+ rc = cca_sec2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ else /* TOKVER_CCA_VLSC */
+ rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc = EINVAL;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header) ||
+ hdr->type != TOKTYPE_CCA_INTERNAL ||
+ flags == 0)
+ return -EINVAL;
+
+ if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+ int minhwtype = ZCRYPT_CEX3C;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+ }
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_apqns4keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc = -EINVAL;
+ u32 _nr_apqns, *_apqns = NULL;
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *) cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *) alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+ }
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
/*
* File io functions
*/
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
+{
+ void *kkey;
+
+ if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
+ kkey = kmalloc(keylen, GFP_KERNEL);
+ if (!kkey)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(kkey, ukey, keylen)) {
+ kfree(kkey);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return kkey;
+}
+
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
+{
+ void *kapqns = NULL;
+ size_t nbytes;
+
+ if (uapqns && nr_apqns > 0) {
+ nbytes = nr_apqns * sizeof(struct pkey_apqn);
+ kapqns = kmalloc(nbytes, GFP_KERNEL);
+ if (!kapqns)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(kapqns, uapqns, nbytes))
+ return ERR_PTR(-EFAULT);
+ }
+
+ return kapqns;
+}
+
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -1273,9 +759,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kgs, ugs, sizeof(kgs)))
return -EFAULT;
- rc = pkey_genseckey(kgs.cardnr, kgs.domain,
- kgs.keytype, &kgs.seckey);
- DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc);
+ rc = cca_genseckey(kgs.cardnr, kgs.domain,
+ kgs.keytype, kgs.seckey.seckey);
+ DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1288,9 +774,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
- rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
- &kcs.clrkey, &kcs.seckey);
- DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc);
+ rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ kcs.clrkey.clrkey, kcs.seckey.seckey);
+ DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1304,9 +790,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
- rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
- &ksp.seckey, &ksp.protkey);
- DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc);
+ rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
+ ksp.seckey.seckey, ksp.protkey.protkey,
+ NULL, &ksp.protkey.type);
+ DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1335,10 +822,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&kfc, ufc, sizeof(kfc)))
return -EFAULT;
- rc = pkey_findcard(&kfc.seckey,
- &kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc);
- if (rc)
+ rc = cca_findcard(kfc.seckey.seckey,
+ &kfc.cardnr, &kfc.domain, 1);
+ DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
return -EFAULT;
@@ -1350,7 +837,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&ksp, usp, sizeof(ksp)))
return -EFAULT;
- rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
+ rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey);
DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
@@ -1400,24 +887,148 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
case PKEY_KBLOB2PROTK: {
struct pkey_kblob2pkey __user *utp = (void __user *) arg;
struct pkey_kblob2pkey ktp;
- __u8 __user *ukey;
- __u8 *kkey;
+ u8 *kkey;
if (copy_from_user(&ktp, utp, sizeof(ktp)))
return -EFAULT;
- if (ktp.keylen < MINKEYBLOBSIZE ||
- ktp.keylen > MAXKEYBLOBSIZE)
- return -EINVAL;
- ukey = ktp.key;
- kkey = kmalloc(ktp.keylen, GFP_KERNEL);
- if (kkey == NULL)
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_GENSECK2: {
+ struct pkey_genseck2 __user *ugs = (void __user *) arg;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
return -ENOMEM;
- if (copy_from_user(kkey, ukey, ktp.keylen)) {
+ }
+ rc = pkey_genseckey2(apqns, kgs.apqn_entries,
+ kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen);
+ DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
kfree(kkey);
+ break;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree(kkey);
+ break;
+ }
+ case PKEY_CLR2SECK2: {
+ struct pkey_clr2seck2 __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
return -EFAULT;
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
}
- rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
+ kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kkey, &klen);
+ DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
+ kfree(kkey);
+ break;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree(kkey);
+ break;
+ }
+ case PKEY_VERIFYKEY2: {
+ struct pkey_verifykey2 __user *uvk = (void __user *) arg;
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
+
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_verifykey2(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags);
+ DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_KBLOB2PROTK2: {
+ struct pkey_kblob2pkey2 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey2 ktp;
+ struct pkey_apqn *apqns = NULL;
+ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
+ kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ kfree(apqns);
kfree(kkey);
if (rc)
break;
@@ -1425,6 +1036,97 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
break;
}
+ case PKEY_APQNS4K: {
+ struct pkey_apqns4key __user *uak = (void __user *) arg;
+ struct pkey_apqns4key kak;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+ u8 *kkey;
+
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
+ case PKEY_APQNS4KT: {
+ struct pkey_apqns4keytype __user *uat = (void __user *) arg;
+ struct pkey_apqns4keytype kat;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1567,6 +1269,7 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
loff_t off, size_t count)
{
int rc;
+ struct pkey_seckey *seckey = (struct pkey_seckey *) buf;
if (off != 0 || count < sizeof(struct secaeskeytoken))
return -EINVAL;
@@ -1574,13 +1277,13 @@ static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
if (count < 2 * sizeof(struct secaeskeytoken))
return -EINVAL;
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
if (is_xts) {
- buf += sizeof(struct pkey_seckey);
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ seckey++;
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
if (rc)
return rc;
@@ -1716,7 +1419,6 @@ static int __init pkey_init(void)
static void __exit pkey_exit(void)
{
misc_deregister(&pkey_dev);
- mkvp_cache_free();
pkey_debug_exit();
}
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 0604b49a4d32..5c0f53c6dde7 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1143,7 +1143,7 @@ int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
msleep(20);
status = ap_tapq(apqn, NULL);
}
- WARN_ON_ONCE(retry <= 0);
+ WARN_ON_ONCE(retry2 <= 0);
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1058b4b5cc1e..45bdb47f84c1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -35,6 +35,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
+#include "zcrypt_ccamisc.h"
/*
* Module description.
@@ -133,18 +134,6 @@ struct zcdn_device {
static int zcdn_create(const char *name);
static int zcdn_destroy(const char *name);
-/* helper function, matches the name for find_zcdndev_by_name() */
-static int __match_zcdn_name(struct device *dev, const void *data)
-{
- return strcmp(dev_name(dev), (const char *)data) == 0;
-}
-
-/* helper function, matches the devt value for find_zcdndev_by_devt() */
-static int __match_zcdn_devt(struct device *dev, const void *data)
-{
- return dev->devt == *((dev_t *) data);
-}
-
/*
* Find zcdn device by name.
* Returns reference to the zcdn device which needs to be released
@@ -152,10 +141,7 @@ static int __match_zcdn_devt(struct device *dev, const void *data)
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) name,
- __match_zcdn_name);
+ struct device *dev = class_find_device_by_name(zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -167,10 +153,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) &devt,
- __match_zcdn_devt);
+ struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -1160,6 +1143,34 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
}
EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstat)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+
+ memset(devstat, 0, sizeof(*devstat));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (card == AP_QID_CARD(zq->queue->qid) &&
+ queue == AP_QID_QUEUE(zq->queue->qid)) {
+ devstat->hwtype = zc->card->ap_dev.device_type;
+ devstat->functions = zc->card->functions >> 26;
+ devstat->qid = zq->queue->qid;
+ devstat->online = zq->online ? 0x01 : 0x00;
+ spin_unlock(&zcrypt_list_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(zcrypt_device_status_ext);
+
static void zcrypt_status_mask(char status[], size_t max_adapters)
{
struct zcrypt_card *zc;
@@ -1874,6 +1885,7 @@ void __exit zcrypt_api_exit(void)
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
+ zcrypt_ccamisc_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index af67a768a3fc..2d3f2732344f 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -121,9 +121,6 @@ void zcrypt_card_get(struct zcrypt_card *);
int zcrypt_card_put(struct zcrypt_card *);
int zcrypt_card_register(struct zcrypt_card *);
void zcrypt_card_unregister(struct zcrypt_card *);
-struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
- unsigned int, unsigned int);
-void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
struct zcrypt_queue *zcrypt_queue_alloc(size_t);
void zcrypt_queue_free(struct zcrypt_queue *);
@@ -132,8 +129,6 @@ int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
void zcrypt_queue_force_online(struct zcrypt_queue *, int);
-struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
-void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
@@ -145,5 +140,7 @@ int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
new file mode 100644
index 000000000000..c1db64a2db21
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -0,0 +1,1765 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+struct cca_info_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ u16 domain;
+ struct cca_info info;
+};
+
+/* a list with cca_info_list_entry entries */
+static LIST_HEAD(cca_info_list);
+static DEFINE_SPINLOCK(cca_info_list_lock);
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_AES) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_AES);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && t->bitsize != keybitsize) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d != %d\n",
+ __func__, (int) t->bitsize, keybitsize);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaeskeytoken);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport)
+{
+ struct cipherkeytoken *t = (struct cipherkeytoken *) token;
+ bool keybitsizeok = true;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_VLSC) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_VLSC);
+ return -EINVAL;
+ }
+ if (t->algtype != 0x02) {
+ if (dbg)
+ DBF("%s token check failed, algtype 0x%02x != 0x02\n",
+ __func__, (int) t->algtype);
+ return -EINVAL;
+ }
+ if (t->keytype != 0x0001) {
+ if (dbg)
+ DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
+ __func__, (int) t->keytype);
+ return -EINVAL;
+ }
+ if (t->plfver != 0x00 && t->plfver != 0x01) {
+ if (dbg)
+ DBF("%s token check failed, unknown plfver 0x%02x\n",
+ __func__, (int) t->plfver);
+ return -EINVAL;
+ }
+ if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
+ if (dbg)
+ DBF("%s token check failed, unknown wpllen %d\n",
+ __func__, (int) t->wpllen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0) {
+ switch (keybitsize) {
+ case 128:
+ if (t->wpllen != (t->plfver ? 640 : 512))
+ keybitsizeok = false;
+ break;
+ case 192:
+ if (t->wpllen != (t->plfver ? 640 : 576))
+ keybitsizeok = false;
+ break;
+ case 256:
+ if (t->wpllen != 640)
+ keybitsizeok = false;
+ break;
+ default:
+ keybitsizeok = false;
+ break;
+ }
+ if (!keybitsizeok) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ }
+ if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
+ if (dbg)
+ DBF("%s token check failed, XPRT_CPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaescipherkey);
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+ u8 **pcprbmem,
+ struct CPRBX **preqCPRB,
+ struct CPRBX **prepCPRB)
+{
+ u8 *cprbmem;
+ size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ struct CPRBX *preqcblk, *prepcblk;
+
+ /*
+ * allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block
+ */
+ cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
+ if (!cprbmem)
+ return -ENOMEM;
+
+ preqcblk = (struct CPRBX *) cprbmem;
+ prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+
+ /* fill request cprb struct */
+ preqcblk->cprb_len = sizeof(struct CPRBX);
+ preqcblk->cprb_ver_id = 0x02;
+ memcpy(preqcblk->func_id, "T2", 2);
+ preqcblk->rpl_msgbl = cprbplusparamblen;
+ if (paramblen) {
+ preqcblk->req_parmb =
+ ((u8 *) preqcblk) + sizeof(struct CPRBX);
+ preqcblk->rpl_parmb =
+ ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ }
+
+ *pcprbmem = cprbmem;
+ *preqCPRB = preqcblk;
+ *prepCPRB = prepcblk;
+
+ return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+{
+ if (scrub)
+ memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+ kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+ u16 cardnr,
+ struct CPRBX *preqcblk,
+ struct CPRBX *prepcblk)
+{
+ memset(pxcrb, 0, sizeof(*pxcrb));
+ pxcrb->agent_ID = 0x4341; /* 'CA' */
+ pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+ pxcrb->request_control_blk_length =
+ preqcblk->cprb_len + preqcblk->req_parml;
+ pxcrb->request_control_blk_addr = (void __user *) preqcblk;
+ pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+ pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+}
+
+/*
+ * Helper function which calls zcrypt_send_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_cprb(xcrb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain,
+ u32 keybitsize, u8 seckey[SECKEYBLOBSIZE])
+{
+ int i, rc, keysize;
+ int seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct kgreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ char key_form[8];
+ char key_length[8];
+ char key_type1[8];
+ char key_type2[8];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid[6];
+ } lv2;
+ } __packed * preqparm;
+ struct kgrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with KG request */
+ preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "KG", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
+ preqparm->lv2.len = sizeof(struct lv2);
+ for (i = 0; i < 6; i++) {
+ preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+ preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+ }
+ preqcblk->req_parml = sizeof(struct kgreqparm);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_genseckey);
+
+/*
+ * Generate an CCA AES DATA secure key with given key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
+{
+ int rc, keysize, seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct cmreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 clrkey[0];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid;
+ } lv2;
+ } __packed * preqparm;
+ struct lv2 *plv2;
+ struct cmrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with CM request */
+ preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "CM", 2);
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->lv1.len = sizeof(struct lv1) + keysize;
+ memcpy(preqparm->lv1.clrkey, clrkey, keysize);
+ plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+ plv2->len = sizeof(struct lv2);
+ plv2->keyid.len = sizeof(struct keyid);
+ plv2->keyid.attr = 0x30;
+ preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ if (seckey)
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 1);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct uskreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ } lv1;
+ struct lv2 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ u8 token[0]; /* cca secure key token */
+ } lv2;
+ } __packed * preqparm;
+ struct uskrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 len;
+ u8 key[64]; /* the key (len bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with USK request */
+ preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "US", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+ preqparm->lv1.attr_flags = 0x0001;
+ preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_len = sizeof(struct lv2)
+ - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_flags = 0x0000;
+ memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE);
+ preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+
+ /* check the returned keyblock */
+ if (prepparm->lv3.keyblock.version != 0x01) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+ __func__, (int) prepparm->lv3.keyblock.version);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (prepparm->lv3.keyblock.len) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.keyblock.len);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->lv3.keyblock.key, prepparm->lv3.keyblock.len);
+ if (protkeylen)
+ *protkeylen = prepparm->lv3.keyblock.len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_sec2protkey);
+
+/*
+ * AES cipher key skeleton created with CSNBKTB2 with these flags:
+ * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
+ * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
+ * used by cca_gencipherkey() and cca_clr2cipherkey().
+ */
+static const u8 aes_cipher_key_skeleton[] = {
+ 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
+ 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
+#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct gkreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[2*8];
+ struct {
+ u16 len;
+ u8 key_type_1[8];
+ u8 key_type_2[8];
+ u16 clear_key_bit_len;
+ u16 key_name_1_len;
+ u16 key_name_2_len;
+ u16 user_data_1_len;
+ u16 user_data_2_len;
+ u8 key_name_1[0];
+ u8 key_name_2[0];
+ u8 user_data_1[0];
+ u8 user_data_2[0];
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_1[0];
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_2[0];
+ } tlv2;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1[SIZEOF_SKELETON];
+ } tlv3;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1_label[0];
+ } tlv4;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2[0];
+ } tlv5;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2_label[0];
+ } tlv6;
+ } kb;
+ } __packed * preqparm;
+ struct gkrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key[0]; /* 120-136 bytes */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = sizeof(struct gkreqparm);
+
+ /* prepare request param block with GK request */
+ preqparm = (struct gkreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "GK", 2);
+ preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preqparm->rule_array, "AES OP ", 2*8);
+
+ /* prepare vud block */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->vud.clear_key_bit_len = keybitsize;
+ memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
+ memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
+
+ /* prepare kb block */
+ preqparm->kb.len = sizeof(preqparm->kb);
+ preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
+ preqparm->kb.tlv1.flag = 0x0030;
+ preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
+ preqparm->kb.tlv2.flag = 0x0030;
+ preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
+ preqparm->kb.tlv3.flag = 0x0030;
+ memcpy(preqparm->kb.tlv3.gen_key_id_1,
+ aes_cipher_key_skeleton, SIZEOF_SKELETON);
+ preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
+ preqparm->kb.tlv4.flag = 0x0030;
+ preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
+ preqparm->kb.tlv5.flag = 0x0030;
+ preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
+ preqparm->kb.tlv6.flag = 0x0030;
+
+ /* patch the skeleton key token export flags inside the kb block */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* and some checks on the generated key */
+ rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
+ prepparm->kb.tlv1.gen_key,
+ keybitsize, 1);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated vlsc key token */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
+ if (keybuf) {
+ if (*keybufsize >= t->len)
+ memcpy(keybuf, t, t->len);
+ else
+ rc = -EINVAL;
+ }
+ *keybufsize = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_gencipherkey);
+
+/*
+ * Helper function, does a the CSNBKPI2 CPRB.
+ */
+static int _ip_cprb_helper(u16 cardnr, u16 domain,
+ const char *rule_array_1,
+ const char *rule_array_2,
+ const char *rule_array_3,
+ const u8 *clr_key_value,
+ int clr_key_bit_size,
+ u8 *key_token,
+ int *key_token_size)
+{
+ int rc, n;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct rule_array_block {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[0];
+ } __packed * preq_ra_block;
+ struct vud_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0064 */
+ u16 clr_key_bit_len;
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0063 */
+ u8 clr_key[0]; /* clear key value bytes */
+ } tlv2;
+ } __packed * preq_vud_block;
+ struct key_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key skeleton */
+ } tlv1;
+ } __packed * preq_key_block;
+ struct iprepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key token */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+ int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = 0;
+
+ /* prepare request param block with IP request */
+ preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
+ memcpy(preq_ra_block->subfunc_code, "IP", 2);
+ preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preq_ra_block->rule_array, rule_array_1, 8);
+ memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
+ preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
+ if (rule_array_3) {
+ preq_ra_block->rule_array_len += 8;
+ memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
+ preqcblk->req_parml += 8;
+ }
+
+ /* prepare vud block */
+ preq_vud_block = (struct vud_block *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = complete ? 0 : (clr_key_bit_size + 7) / 8;
+ preq_vud_block->len = sizeof(struct vud_block) + n;
+ preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
+ preq_vud_block->tlv1.flag = 0x0064;
+ preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
+ preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
+ preq_vud_block->tlv2.flag = 0x0063;
+ if (!complete)
+ memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
+ preqcblk->req_parml += preq_vud_block->len;
+
+ /* prepare key block */
+ preq_key_block = (struct key_block *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = *key_token_size;
+ preq_key_block->len = sizeof(struct key_block) + n;
+ preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
+ preq_key_block->tlv1.flag = 0x0030;
+ memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
+ preqcblk->req_parml += preq_key_block->len;
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* do not check the key here, it may be incomplete */
+
+ /* copy the vlsc key token back */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
+ memcpy(key_token, t, t->len);
+ *key_token_size = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *token;
+ int tokensize;
+ u8 exorbuf[32];
+ struct cipherkeytoken *t;
+
+ /* fill exorbuf with random data */
+ get_random_bytes(exorbuf, sizeof(exorbuf));
+
+ /* allocate space for the key token to build */
+ token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ /* prepare the token with the key skeleton */
+ tokensize = SIZEOF_SKELETON;
+ memcpy(token, aes_cipher_key_skeleton, tokensize);
+
+ /* patch the skeleton key token export flags */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) token;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /*
+ * Do the key import with the clear key value in 4 steps:
+ * 1/4 FIRST import with only random data
+ * 2/4 EXOR the clear key
+ * 3/4 EXOR the very same random data again
+ * 4/4 COMPLETE the secure cipher key import
+ */
+ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ clrkey, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
+ NULL, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* copy the generated key token */
+ if (keybuf) {
+ if (tokensize > *keybufsize)
+ rc = -EINVAL;
+ else
+ memcpy(keybuf, token, tokensize);
+ }
+ *keybufsize = tokensize;
+
+out:
+ kfree(token);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2cipherkey);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[0]; // 64 or more
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[64]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keytoklen = ((struct cipherkeytoken *)ckey)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x01) {
+ DEBUG_ERR(
+ "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
+ __func__, (int) prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x02) {
+ DEBUG_ERR(
+ "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int) prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ switch (prepparm->vud.ckb.keylen) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ if (protkeylen)
+ *protkeylen = prepparm->vud.ckb.keylen;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_cipher2protkey);
+
+/*
+ * query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen)
+{
+ int rc;
+ u16 len;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct fqreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 data[VARDATASIZE];
+ } lv1;
+ u16 dummylen;
+ } __packed * preqparm;
+ size_t parmbsize = sizeof(struct fqreqparm);
+ struct fqrepparm {
+ u8 subfunc_code[2];
+ u8 lvdata[0];
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with FQ request */
+ preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "FQ", 2);
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ preqparm->lv1.len = sizeof(preqparm->lv1);
+ preqparm->dummylen = sizeof(preqparm->dummylen);
+ preqcblk->req_parml = parmbsize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+ ptr = prepparm->lvdata;
+
+ /* check and possibly copy reply rule array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (rarray && rarraylen && *rarraylen > 0) {
+ *rarraylen = (len > *rarraylen ? *rarraylen : len);
+ memcpy(rarray, ptr, *rarraylen);
+ }
+ ptr += len;
+ }
+ /* check and possible copy reply var array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (varray && varraylen && *varraylen > 0) {
+ *varraylen = (len > *varraylen ? *varraylen : len);
+ memcpy(varray, ptr, *varraylen);
+ }
+ ptr += len;
+ }
+
+out:
+ free_cprbmem(mem, parmbsize, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_query_crypto_facility);
+
+static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc = -ENOENT;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr && ptr->domain == domain) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+
+ return rc;
+}
+
+static void cca_info_cache_update(u16 cardnr, u16 domain,
+ const struct cca_info *ci)
+{
+ int found = 0;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&cca_info_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ ptr->domain = domain;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &cca_info_list);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void cca_info_cache_scrub(u16 cardnr, u16 domain)
+{
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void __exit mkvp_cache_free(void)
+{
+ struct cca_info_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+/*
+ * Fetch cca_info values via query_crypto_facility from adapter.
+ */
+static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray, *pg;
+ struct zcrypt_device_status_ext devstat;
+
+ memset(ci, 0, sizeof(*ci));
+
+ /* get first info from zcrypt device driver about this apqn */
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+
+ /* prep page for rule array and var array use */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+ rarray = pg;
+ varray = pg + PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE/2;
+
+ /* QF for this card/domain */
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
+ memcpy(ci->serial, rarray, 8);
+ ci->new_mk_state = (char) rarray[7*8];
+ ci->cur_mk_state = (char) rarray[8*8];
+ ci->old_mk_state = (char) rarray[9*8];
+ if (ci->old_mk_state == '2')
+ memcpy(&ci->old_mkvp, varray + 172, 8);
+ if (ci->cur_mk_state == '2')
+ memcpy(&ci->cur_mkvp, varray + 184, 8);
+ if (ci->new_mk_state == '3')
+ memcpy(&ci->new_mkvp, varray + 196, 8);
+ found = 1;
+ }
+
+ free_page((unsigned long) pg);
+
+ return found ? 0 : -ENOENT;
+}
+
+/*
+ * Fetch cca information about a CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify)
+{
+ int rc;
+
+ rc = cca_info_cache_fetch(card, dom, ci);
+ if (rc || verify) {
+ rc = fetch_cca_info(card, dom, ci);
+ if (rc == 0)
+ cca_info_cache_update(card, dom, ci);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cca_get_info);
+
+/*
+ * Search for a matching crypto card based on the
+ * Master Key Verification Pattern given.
+ */
+static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
+ int verify, int minhwtype)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u16 card, dom;
+ struct cca_info ci;
+ int i, rc, oi = -1;
+
+ /* mkvp must not be zero, minhwtype needs to be >= 0 */
+ if (mkvp == 0 || minhwtype < 0)
+ return -EINVAL;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* walk through all crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
+ /* enabled CCA card, check current mkvp from cache */
+ if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
+ ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp) {
+ if (!verify)
+ break;
+ /* verify: refresh card info */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp)
+ break;
+ }
+ }
+ } else {
+ /* Card is offline and/or not a CCA card. */
+ /* del mkvp entry from cache if it exists */
+ cca_info_cache_scrub(card, dom);
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
+ /* nothing found, so this time without cache */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
+ continue;
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* fresh fetch mkvp from adapter */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+ ci.cur_mkvp == mkvp)
+ break;
+ if (ci.hwtype >= minhwtype &&
+ ci.old_mk_state == '2' &&
+ ci.old_mkvp == mkvp &&
+ oi < 0)
+ oi = i;
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
+ /* old mkvp matched, use this card then */
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
+ }
+ }
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
+ if (pcardnr)
+ *pcardnr = card;
+ if (pdomain)
+ *pdomain = dom;
+ rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
+ } else
+ rc = -ENODEV;
+
+ kfree(device_status);
+ return rc;
+}
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key token.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
+{
+ u64 mkvp;
+ int minhwtype = 0;
+ const struct keytoken_header *hdr = (struct keytoken_header *) key;
+
+ if (hdr->type != TOKTYPE_CCA_INTERNAL)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ mkvp = ((struct secaeskeytoken *)key)->mkvp;
+ break;
+ case TOKVER_CCA_VLSC:
+ mkvp = ((struct cipherkeytoken *)key)->mkvp0;
+ minhwtype = AP_DEVICE_TYPE_CEX6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
+}
+EXPORT_SYMBOL(cca_findcard);
+
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
+{
+ struct zcrypt_device_status_ext *device_status;
+ int i, n, card, dom, curmatch, oldmatch, rc = 0;
+ struct cca_info ci;
+
+ *apqns = NULL;
+ *nr_apqns = 0;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* loop two times: first gather eligible apqns, then store them */
+ while (1) {
+ n = 0;
+ /* walk through all the crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for cca functions */
+ if (!(device_status[i].functions & 0x04))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* get cca info on this apqn */
+ if (cca_get_info(card, dom, &ci, verify))
+ continue;
+ /* current master key needs to be valid */
+ if (ci.cur_mk_state != '2')
+ continue;
+ /* check min hardware type */
+ if (minhwtype > 0 && minhwtype > ci.hwtype)
+ continue;
+ if (cur_mkvp || old_mkvp) {
+ /* check mkvps */
+ curmatch = oldmatch = 0;
+ if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_mk_state == '2' &&
+ old_mkvp == ci.old_mkvp)
+ oldmatch = 1;
+ if ((cur_mkvp || old_mkvp) &&
+ (curmatch + oldmatch < 1))
+ continue;
+ }
+ /* apqn passed all filtering criterons */
+ if (*apqns && n < *nr_apqns)
+ (*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
+ n++;
+ }
+ /* loop 2nd time: array has been filled */
+ if (*apqns)
+ break;
+ /* loop 1st time: have # of eligible apqns in n */
+ if (!n) {
+ rc = -ENODEV; /* no eligible apqns found */
+ break;
+ }
+ *nr_apqns = n;
+ /* allocate array to store n apqns into */
+ *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
+ if (!*apqns) {
+ rc = -ENOMEM;
+ break;
+ }
+ verify = 0;
+ }
+
+ kfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(cca_findcard2);
+
+void __exit zcrypt_ccamisc_exit(void)
+{
+ mkvp_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
new file mode 100644
index 000000000000..77b6cc7b8f82
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_CCAMISC_H_
+#define _ZCRYPT_CCAMISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+/* Key token types */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
+#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */
+
+/* Max size of a cca variable length cipher key token */
+#define MAXCCAVLSCTOKENSIZE 725
+
+/* header part of a CCA key token */
+struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+ u8 res0[1];
+ u16 len; /* vlsc token: total length in bytes */
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+} __packed;
+
+/* inside view of a CCA secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[3];
+ u8 version; /* should be 0x04 */
+ u8 res1[1];
+ u8 flag; /* key flags */
+ u8 res2[1];
+ u64 mkvp; /* master key verification pattern */
+ u8 key[32]; /* key value (encrypted) */
+ u8 cv[8]; /* control vector */
+ u16 bitsize; /* key bit size */
+ u16 keysize; /* key byte size */
+ u8 tvv[4]; /* token validation value */
+} __packed;
+
+/* inside view of a variable length symmetric cipher AES key token */
+struct cipherkeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[1];
+ u16 len; /* total key token length in bytes */
+ u8 version; /* should be 0x05 */
+ u8 res1[3];
+ u8 kms; /* key material state, 0x03 means wrapped with MK */
+ u8 kvpt; /* key verification pattern type, should be 0x01 */
+ u64 mkvp0; /* master key verification pattern, lo part */
+ u64 mkvp1; /* master key verification pattern, hi part (unused) */
+ u8 eskwm; /* encrypted section key wrapping method */
+ u8 hashalg; /* hash algorithmus used for wrapping key */
+ u8 plfver; /* pay load format version */
+ u8 res2[1];
+ u8 adsver; /* associated data section version */
+ u8 res3[1];
+ u16 adslen; /* associated data section length */
+ u8 kllen; /* optional key label length */
+ u8 ieaslen; /* optional extended associated data length */
+ u8 uadlen; /* optional user definable associated data length */
+ u8 res4[1];
+ u16 wpllen; /* wrapped payload length in bits: */
+ /* plfver 0x00 0x01 */
+ /* AES-128 512 640 */
+ /* AES-192 576 640 */
+ /* AES-256 640 640 */
+ u8 res5[1];
+ u8 algtype; /* 0x02 for AES cipher */
+ u16 keytype; /* 0x0001 for 'cipher' */
+ u8 kufc; /* key usage field count */
+ u16 kuf1; /* key usage field 1 */
+ u16 kuf2; /* key usage field 2 */
+ u8 kmfc; /* key management field count */
+ u16 kmf1; /* key management field 1 */
+ u16 kmf2; /* key management field 2 */
+ u16 kmf3; /* key management field 3 */
+ u8 vdata[0]; /* variable part data follows */
+} __packed;
+
+/* Some defines for the CCA AES cipherkeytoken kmf1 field */
+#define KMF1_XPRT_SYM 0x8000
+#define KMF1_XPRT_UASY 0x4000
+#define KMF1_XPRT_AASY 0x2000
+#define KMF1_XPRT_RAW 0x1000
+#define KMF1_XPRT_CPAC 0x0800
+#define KMF1_XPRT_DES 0x0080
+#define KMF1_XPRT_AES 0x0040
+#define KMF1_XPRT_RSA 0x0008
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport);
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
+
+/*
+ * Generate CCA AES DATA secure key with given clear key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen);
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ * Works with CCA AES data and cipher keys.
+ * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+ * 1 if OLD MKVP matches.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+
+/*
+ * Build a list of cca apqns meeting the following constrains:
+ * - apqn is online and is in fact a CCA apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
+ * - if old_mkvp != 0 only apqns where old_mkvp == mkvp
+ * - if verify is enabled and a cur_mkvp and/or old_mkvp
+ * value is given, then refetch the cca_info and make sure the current
+ * cur_mkvp or old_mkvp values of the apqn are used.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
+
+/* struct to hold info for each CCA queue */
+struct cca_info {
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_mk_state; /* '1' invalid, '2' valid */
+ char old_mk_state; /* '1' invalid, '2' valid */
+ u64 new_mkvp; /* truncated sha256 hash of new master key */
+ u64 cur_mkvp; /* truncated sha256 hash of current master key */
+ u64 old_mkvp; /* truncated sha256 hash of old master key */
+ char serial[9]; /* serial number string (8 ascii numbers + 0x00) */
+};
+
+/*
+ * Fetch cca information about an CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify);
+
+void zcrypt_ccamisc_exit(void);
+
+#endif /* _ZCRYPT_CCAMISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 582ffa7e0f18..f58d8dec19dc 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -18,6 +18,7 @@
#include "zcrypt_msgtype50.h"
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
+#include "zcrypt_ccamisc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
@@ -65,6 +66,85 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+/*
+ * CCA card addditional device attributes
+ */
+static ssize_t serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cca_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+static DEVICE_ATTR_RO(serialnr);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_group = {
+ .attrs = cca_card_attrs,
+};
+
+/*
+ * CCA queue addditional device attributes
+ */
+static ssize_t mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct cca_info ci;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cao_state[] = { "invalid", "valid" };
+ static const char * const new_state[] = { "empty", "partial", "full" };
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, zq->online);
+
+ if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
+ n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ else
+ n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+ if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ else
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+ if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ else
+ n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+ return n;
+}
+static DEVICE_ATTR_RO(mkvps);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_group = {
+ .attrs = cca_queue_attrs,
+};
+
/**
* Probe function for CEX4/CEX5/CEX6 card device. It always
* accepts the AP device since the bus_match already checked
@@ -194,8 +274,17 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (rc) {
ac->private = NULL;
zcrypt_card_free(zc);
+ goto out;
}
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_group);
+ if (rc)
+ zcrypt_card_unregister(zc);
+ }
+
+out:
return rc;
}
@@ -205,8 +294,11 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc = ac->private;
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
if (zc)
zcrypt_card_unregister(zc);
}
@@ -251,6 +343,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
} else {
return -ENODEV;
}
+
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
@@ -261,8 +354,17 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (rc) {
aq->private = NULL;
zcrypt_queue_free(zq);
+ goto out;
+ }
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_group);
+ if (rc)
+ zcrypt_queue_unregister(zq);
}
+out:
return rc;
}
@@ -275,6 +377,8 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 12fe9deb265e..a36251d138fb 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ /* fall through - wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 1b4ee570b712..4a8a5373cb35 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1704,6 +1704,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
grp->changed_side = 2;
break;
}
+ /* Else, fall through */
case MPCG_STATE_XID0IOWAIX:
case MPCG_STATE_XID7INITW:
case MPCG_STATE_XID7INITX:
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index e02f295d38a9..1534420a0243 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -357,6 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
/*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
if (callback)
grp->send_qllc_disc = 1;
+ /* Else, fall through */
case MPCG_STATE_XID0IOWAIT:
fsm_deltimer(&grp->timer);
grp->outstanding_xid2 = 0;
@@ -1469,6 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
break;
+ /* Else, fall through */
default:
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
}
@@ -2089,6 +2091,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
grp->estconnfunc = NULL;
break;
}
+ /* Else, fall through */
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c7ee07ce3615..28db887d38ed 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -629,6 +629,7 @@ struct qeth_seqno {
struct qeth_reply {
struct list_head list;
struct completion received;
+ spinlock_t lock;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d0caeebc802..6502b148541e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
if (reply) {
refcount_set(&reply->refcnt, 1);
init_completion(&reply->received);
+ spin_lock_init(&reply->lock);
}
return reply;
}
@@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
if (!reply->callback) {
rc = 0;
+ goto no_callback;
+ }
+
+ spin_lock_irqsave(&reply->lock, flags);
+ if (reply->rc) {
+ /* Bail out when the requestor has already left: */
+ rc = reply->rc;
} else {
if (cmd) {
reply->offset = (u16)((char *)cmd - (char *)iob->data);
@@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
rc = reply->callback(card, reply, (unsigned long)iob);
}
}
+ spin_unlock_irqrestore(&reply->lock, flags);
+no_callback:
if (rc <= 0)
qeth_notify_reply(reply, rc);
qeth_put_reply(reply);
@@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card,
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply);
+
+ if (reply_cb) {
+ /* Wait until the callback for a late reply has completed: */
+ spin_lock_irq(&reply->lock);
+ if (rc)
+ /* Zap any callback that's still pending: */
+ reply->rc = rc;
+ spin_unlock_irq(&reply->lock);
+ }
+
if (!rc)
rc = reply->rc;
qeth_put_reply(reply);
@@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
get_user(req_len, &ureq->hdr.req_len))
return -EFAULT;
+ /* Sanitize user input, to avoid overflows in iob size calculation: */
+ if (req_len > QETH_BUFSIZE)
+ return -EINVAL;
+
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
if (!iob)
return -ENOMEM;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index fd64bc3f4062..cbead3d1b2fd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -333,7 +333,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
card->osn_info.data_cb(skb);
break;
}
- /* else unknown */
+ /* Else, fall through */
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 1a55e5942d36..957889a42d2e 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,8 @@ struct airq_info {
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static DEFINE_MUTEX(airq_areas_lock);
+
static u8 *summary_indicators;
static inline u8 *get_summary_indicator(struct airq_info *info)
@@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
+ mutex_unlock(&airq_areas_lock);
if (!info)
return 0;
write_lock_irqsave(&info->lock, flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 75f66f8ad3ea..1b92f3c19ff3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1523,10 +1523,10 @@ config SCSI_VIRTIO
source "drivers/scsi/csiostor/Kconfig"
-endif # SCSI_LOWLEVEL
-
source "drivers/scsi/pcmcia/Kconfig"
+endif # SCSI_LOWLEVEL
+
source "drivers/scsi/device_handler/Kconfig"
endmenu
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index aea4fd73c862..6c68c2303638 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -603,6 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg)
msgqueue_flush(&info->scsi.msgs);
msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
info->scsi.phase = PHASE_MSGOUT_EXPECT;
+ /* fall through */
case async:
dev->period = info->ifcfg.asyncperiod / 4;
@@ -915,6 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info)
fas216_done(info, DID_ABORT);
break;
}
+ /* else, fall through */
default: /* huh? */
printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
@@ -1411,6 +1413,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */
case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */
fas216_stoptransfer(info);
+ /* fall through */
+
case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */
case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */
case STATE(STAT_STATUS, PHASE_COMMAND): /* Command -> Status */
@@ -1422,6 +1426,8 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */
case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */
fas216_stoptransfer(info);
+ /* fall through */
+
case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */
case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */
case STATE(STAT_MESGIN, PHASE_MSGOUT): /* Message Out -> Message In */
@@ -1575,6 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned
fas216_message(info);
break;
}
+ /* else, fall through */
default:
fas216_log(info, 0, "internal phase %s for function done?"
@@ -1957,6 +1964,7 @@ static void fas216_kick(FAS216_Info *info)
switch (where_from) {
case TYPE_QUEUE:
fas216_allocate_tag(info, SCpnt);
+ /* fall through */
case TYPE_OTHER:
fas216_start_command(info, SCpnt);
break;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f0066f8a1786..4971104b1817 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
#define ALUA_RTPG_DELAY_MSECS 5
+#define ALUA_RTPG_RETRY_DELAY 2
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
case SCSI_ACCESS_STATE_TRANSITIONING:
if (time_before(jiffies, pg->expiry)) {
/* State transition, retry */
- pg->interval = 2;
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
err = SCSI_DH_RETRY;
} else {
struct alua_dh_data *h;
@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
+ if (!pg->interval)
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
+ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 590ec8009f52..1791a393795d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
- unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
+ unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV);
int first = 0;
int mtu_valid;
int found = 0;
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
*/
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
{
- return (struct fcoe_rport *)(rdata + 1);
+ return container_of(rdata, struct fcoe_rport, rdata);
}
/**
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
*/
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct fip_wwn_desc *wwn = NULL;
struct fip_vn_desc *vn = NULL;
struct fip_size_desc *size = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
case FIP_DT_VN_ID:
if (dlen != sizeof(struct fip_vn_desc))
goto len_err;
vn = (struct fip_vn_desc *)desc;
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+ frport->rdata.ids.port_name =
+ get_unaligned_be64(&vn->fd_wwpn);
break;
case FIP_DT_FC4F:
if (dlen != sizeof(struct fip_fc4_feat))
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
/**
* fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
-
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
* Probe's REC bit is not set.
* If we don't reply, we will change our address.
*/
- if (fip->lp->wwpn > rdata->ids.port_name &&
+ if (fip->lp->wwpn > frport->rdata.ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"port_id collision\n");
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
case FIP_ST_VNMP_START:
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
-static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
struct fcoe_rport *frport;
u32 port_id;
- port_id = new->ids.port_id;
+ port_id = new->rdata.ids.port_id;
if (port_id == fip->port_id)
return;
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
- if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ if ((ids->port_name != -1 &&
+ ids->port_name != new->rdata.ids.port_name) ||
+ (ids->node_name != -1 &&
+ ids->node_name != new->rdata.ids.node_name)) {
mutex_unlock(&rdata->rp_mutex);
LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
fc_rport_logoff(rdata);
mutex_lock(&rdata->rp_mutex);
}
- ids->port_name = new->ids.port_name;
- ids->node_name = new->ids.node_name;
+ ids->port_name = new->rdata.ids.port_name;
+ ids->node_name = new->rdata.ids.node_name;
mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
port_id, frport->fcoe_len ? "old" : "new",
rdata->rp_state);
- *frport = *fcoe_ctlr_rport(new);
+ frport->fcoe_len = new->fcoe_len;
+ frport->flags = new->flags;
+ frport->login_count = new->login_count;
+ memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
+ memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
frport->time = 0;
}
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
/**
* fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(new);
-
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_id == fip->port_id) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, state %d\n",
fip->state);
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
- if (new->ids.port_id == fip->port_id) {
- if (new->ids.port_name > fip->lp->wwpn) {
+ if (new->rdata.ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_name > fip->lp->wwpn) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
}
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
- new->ids.port_id);
- fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
- min((u32)frport->fcoe_len,
+ new->rdata.ids.port_id);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
+ min((u32)new->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
- "ignoring claim from %x\n", new->ids.port_id);
+ "ignoring claim from %x\n",
+ new->rdata.ids.port_id);
break;
}
}
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_claim_resp() - handle received Claim Response
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Claim Response
+ * @new: newly-parsed FCoE rport from the Claim Response
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
- new->ids.port_id, fcoe_ctlr_state(fip->state));
+ new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
fcoe_ctlr_vn_add(fip, new);
}
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_beacon() - handle received beacon.
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Beacon
+ * @new: newly-parsed FCoE rport from the Beacon
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
struct fcoe_rport *frport;
- frport = fcoe_ctlr_rport(new);
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = fc_rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
if (rdata) {
- if (rdata->ids.node_name == new->ids.node_name &&
- rdata->ids.port_name == new->ids.port_name) {
+ if (rdata->ids.node_name == new->rdata.ids.node_name &&
+ rdata->ids.port_name == new->rdata.ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
+
LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
rdata->ids.port_id);
if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
* Don't add the neighbor yet.
*/
LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
- new->ids.port_id);
+ new->rdata.ids.port_id);
if (time_after(jiffies,
fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
fcoe_ctlr_vn_send_claim(fip);
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
}
- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
goto drop;
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_lock(&fip->ctlr_mutex);
switch (sub) {
case FIP_SC_VN_PROBE_REQ:
- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_req(fip, &frport);
break;
case FIP_SC_VN_PROBE_REP:
- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_reply(fip, &frport);
break;
case FIP_SC_VN_CLAIM_NOTIFY:
- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_notify(fip, &frport);
break;
case FIP_SC_VN_CLAIM_REP:
- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_resp(fip, &frport);
break;
case FIP_SC_VN_BEACON:
- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ fcoe_ctlr_vn_beacon(fip, &frport);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2802,22 +2800,18 @@ drop:
*/
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
struct fip_mac_desc *macd = NULL;
struct fip_wwn_desc *wwn = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
* @fip: The FCoE controller
+ * @frport: The newly-parsed FCoE rport from the Discovery Request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
if (fip->mode == FIP_MODE_VN2VN)
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vlan_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
goto drop;
}
mutex_lock(&fip->ctlr_mutex);
if (sub == FIP_SC_VL_REQ)
- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ fcoe_ctlr_vlan_disc_reply(fip, &frport);
mutex_unlock(&fip->ctlr_mutex);
drop:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 43a6b5350775..1bb6aada93fa 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_SERV_RESPONSE_COMPLETE:
switch (c2->error_data.status) {
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ if (cmd)
+ cmd->result = 0;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0))
+ c2->error_data.status == 0)) {
+ cmd->result = 0;
return hpsa_cmd_free_and_done(h, c, cmd);
+ }
/*
* Any RAID offload error results in retry which will use
@@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_DEVICE_BUSY;
/*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ cmd->result = 0;
+
+ /*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
*/
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
if (idx != h->last_collision_tag) { /* Print once per tag */
dev_warn(&h->pdev->dev,
"%s: tag collision (tag=%d)\n", __func__, idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
if (scmd)
scsi_print_command(scmd);
h->last_collision_tag = idx;
@@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.rst
+ * Documentation/driver-api/pci/pci.rst
*/
pci_disable_device(h->pdev); /* pci_init 1 */
pci_release_regions(h->pdev); /* pci_init 2 */
@@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */
clean1:
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.rst
+ * Documentation/driver-api/pci/pci.rst
*/
pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index acd16e0d52cf..8cdbac076a1b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
- ibmvfc_free_event_pool(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_free_event_pool(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e0f3852fdad1..da6e97d8dc3b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
lockdep_assert_held(&lport->disc.disc_mutex);
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
if (rdata)
return rdata;
- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
/* Fall through */
#endif
+ /* Fall through - only for the #else condition above. */
default:
error = -ENXIO;
pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
+ uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..d65558619ab0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
/*
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
+ *
+ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
+ * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ *
+ * Value range is [0,256]. Default value is 8.
+ */
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+ "Set the number of SCSI Queues advertised");
+
+/*
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
* will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
+ &dev_attr_lpfc_fcp_mq_threshold,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- shost->nr_hw_queues = phba->cfg_hdw_queue;
- else
- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+ if (!phba->cfg_fcp_mq_threshold ||
+ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+ phba->cfg_fcp_mq_threshold);
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -10776,12 +10778,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
/* This loop sets up all CPUs that are affinitized with a
* irq vector assigned to the driver. All affinitized CPUs
* will get a link to that vectors IRQ and EQ.
+ *
+ * NULL affinity mask handling:
+ * If irq count is greater than one, log an error message.
+ * If the null mask is received for the first irq, find the
+ * first present cpu, and assign the eq index to ensure at
+ * least one EQ is assigned.
*/
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
/* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp)
- continue;
+ if (!maskp) {
+ if (phba->cfg_irq_chann > 1)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3329 No affinity mask found "
+ "for vector %d (%d)\n",
+ idx, phba->cfg_irq_chann);
+ if (!idx) {
+ cpu = cpumask_first(cpu_present_mask);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->eq = idx;
+ cpup->irq = pci_irq_vector(phba->pcidev, idx);
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ }
+ break;
+ }
i = 0;
/* Loop through all CPUs associated with vector idx */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..a81ef0293696 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
#define LPFC_HBA_HDWQ_MAX 128
#define LPFC_HBA_HDWQ_DEF 0
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN 0
+#define LPFC_FCP_MQ_THRESHOLD_MAX 256
+#define LPFC_FCP_MQ_THRESHOLD_DEF 8
+
/* Common buffer size to accomidate SCSI and NVME IO buffers */
#define LPFC_COMMON_IO_BUF_SZ 768
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index b2339d04a700..f9f07935556e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev,
(struct megasas_instance *) shost->hostdata;
u32 size;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
@@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev,
}
size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -8763,7 +8766,7 @@ static int __init megasas_init(void)
if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
(event_log_level > MFI_EVT_CLASS_DEAD)) {
- printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
event_log_level = MFI_EVT_CLASS_CRITICAL;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a32b3f0fcd15..120e3c4de8c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return 0;
}
-int
+static int
megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
u32 max_mpt_cmd, i, j;
@@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
return 0;
}
-int
+
+static int
megasas_alloc_request_fusion(struct megasas_instance *instance)
{
struct fusion_context *fusion;
@@ -657,7 +658,7 @@ retry_alloc:
return 0;
}
-int
+static int
megasas_alloc_reply_fusion(struct megasas_instance *instance)
{
int i, count;
@@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
return 0;
}
-int
+static int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
int i, j, k, msix_count;
@@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
* and is used as SMID of the cmd.
* SMID value range is from 1 to max_fw_cmds.
*/
-int
+static int
megasas_alloc_cmds_fusion(struct megasas_instance *instance)
{
int i;
@@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
*
* This is the main function for initializing firmware.
*/
-u32
+static u32
megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct fusion_context *fusion;
@@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance)
* @ext_status : ext status of cmd returned by FW
*/
-void
+static void
map_cmd_status(struct fusion_context *fusion,
struct scsi_cmnd *scmd, u8 status, u8 ext_status,
u32 data_length, u8 *sense)
@@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
*
* Used to set the PD LBA in CDB for FP IOs
*/
-void
+static void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
@@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
* Prepares the io_request and chain elements (sg_frame) for IO
* The IO can be for PD (Fast Path) or LD
*/
-void
+static void
megasas_build_ldio_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
* Invokes helper functions to prepare request frames
* and sets flags appropriate for IO/Non-IO cmd
*/
-int
+static int
megasas_build_io_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
/* megasas_prepate_secondRaid1_IO
* It prepares the raid 1 second IO
*/
-void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd,
- struct megasas_cmd_fusion *r1_cmd)
+static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd,
+ struct megasas_cmd_fusion *r1_cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
struct fusion_context *fusion;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 684662888792..050c0f029ef9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
u64 required_mask, coherent_mask;
struct sysinfo s;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
goto try_32bit;
if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(dma_mask);
else
coherent_mask = DMA_BIT_MASK(32);
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
dma_set_coherent_mask(&pdev->dev, coherent_mask))
goto try_32bit;
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
+ ioc->dma_mask = dma_mask;
goto out;
try_32bit:
@@ -2744,7 +2746,7 @@ static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
- if (ioc->dma_mask == 64) {
+ if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
pci_name(ioc->pdev));
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index e5760c4a27f0..832af4213046 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -357,10 +357,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-#include <asm/sn/io.h>
-#endif
-
/*
* Compile time Options:
@@ -380,11 +376,6 @@
#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
-#if defined(__ia64__) && !defined(ia64_platform_is)
-#define ia64_platform_is(foo) (!strcmp(x, platform_name))
-#endif
-
-
#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
@@ -1427,15 +1418,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
ha->flags.reset_active = 0;
ha->flags.abort_isp_active = 0;
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
- if (ia64_platform_is("sn2")) {
- printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
- "dual channel lockup workaround\n", ha->host_no);
- ha->flags.use_pci_vchannel = 1;
- driver_setup.no_nvram = 1;
- }
-#endif
-
/* TODO: implement support for the 1040 nvram format */
if (IS_ISP1040(ha))
driver_setup.no_nvram = 1;
@@ -2251,13 +2233,6 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
mb[1] = nv->firmware_feature.f.enable_fast_posting;
mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
-#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
- if (ia64_platform_is("sn2")) {
- printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
- "workaround\n", ha->host_no);
- mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
- }
-#endif
status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* Retry count and delay. */
@@ -2888,12 +2863,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
break;
dma_handle = sg_dma_address(s);
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
- if (ha->flags.use_pci_vchannel)
- sn_pci_set_vchan(ha->pdev,
- (unsigned long *)&dma_handle,
- SCSI_BUS_32(cmd));
-#endif
*dword_ptr++ =
cpu_to_le32(lower_32_bits(dma_handle));
*dword_ptr++ =
@@ -2950,12 +2919,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
if (cnt == 5)
break;
dma_handle = sg_dma_address(s);
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
- if (ha->flags.use_pci_vchannel)
- sn_pci_set_vchan(ha->pdev,
- (unsigned long *)&dma_handle,
- SCSI_BUS_32(cmd));
-#endif
*dword_ptr++ =
cpu_to_le32(lower_32_bits(dma_handle));
*dword_ptr++ =
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index b496206362a9..a1a8aefc7cc3 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -1055,9 +1055,6 @@ struct scsi_qla_host {
uint32_t reset_active:1; /* 3 */
uint32_t abort_isp_active:1; /* 4 */
uint32_t disable_risc_code_load:1; /* 5 */
-#ifdef __ia64__
- uint32_t use_pci_vchannel:1;
-#endif
} flags;
struct nvram nvram;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vha->gnl.l = NULL;
+
vfree(vha->scan.l);
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4059655639d9..d4c3baec9172 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4575,20 +4575,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
- /*
- * The SN2 does not provide BIOS emulation which means you can't change
- * potentially bogus BIOS settings. Force the use of default settings
- * for link rate and frame size. Hope that the rest of the settings
- * are valid.
- */
- if (ia64_platform_is("sn2")) {
- nv->frame_payload_size = 2048;
- if (IS_QLA23XX(ha))
- nv->special_options[1] = BIT_7;
- }
-#endif
-
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -4877,7 +4863,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
- fcport = NULL;
+ return NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
return 0;
probe_failed:
+ if (base_vha->gnl.l) {
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ base_vha->gnl.l, base_vha->gnl.ldma);
+ base_vha->gnl.l = NULL;
+ }
+
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (!atomic_read(&pdev->enable_cnt)) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
-
+ base_vha->gnl.l = NULL;
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ base_vha->gnl.l = NULL;
+
vfree(base_vha->scan.l);
if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
"Alloc failed for scan database.\n");
dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
vha->gnl.l, vha->gnl.ldma);
+ vha->gnl.l = NULL;
scsi_remove_host(vha->host);
return NULL;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9381171c2fc0..4e88d7e9cf9a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1089,6 +1089,18 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+/*
+ * Only called when the request isn't completed by SCSI, and not freed by
+ * SCSI
+ */
+static void scsi_cleanup_rq(struct request *rq)
+{
+ if (rq->rq_flags & RQF_DONTPREP) {
+ scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ rq->rq_flags &= ~RQF_DONTPREP;
+ }
+}
+
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
{
@@ -1784,8 +1796,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) << SECTOR_SHIFT);
+ if (dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT);
+ }
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
@@ -1819,6 +1833,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 74ded5f3c236..3717eea37ecb 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -94,8 +94,7 @@ static int scsi_dev_type_resume(struct device *dev,
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
- if (sdev->request_queue->dev)
- blk_set_runtime_active(sdev->request_queue);
+ blk_set_runtime_active(sdev->request_queue);
}
}
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c074631086a4..5b313226f11c 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -372,15 +372,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
-static int always_match(struct device *dev, const void *data)
-{
- return 1;
-}
-
static inline struct device *next_scsi_device(struct device *start)
{
- struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
- always_match);
+ struct device *next = bus_find_next_device(&scsi_bus_type, start);
+
put_device(start);
return next;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 149d406aacc9..4b925552458f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1293,7 +1293,9 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
- return sd_zbc_setup_reset_cmnd(cmd);
+ return sd_zbc_setup_reset_cmnd(cmd, false);
+ case REQ_OP_ZONE_RESET_ALL:
+ return sd_zbc_setup_reset_cmnd(cmd, true);
default:
WARN_ON_ONCE(1);
return BLK_STS_NOTSUPP;
@@ -1959,6 +1961,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 38c50946fc42..1eab779f812b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -209,7 +209,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
@@ -225,7 +225,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
+ bool all)
{
return BLK_STS_TARGET;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 5d6ff3931632..de4019dc0f0b 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -209,10 +209,11 @@ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
/**
* sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
* @cmd: the command to setup
+ * @all: Reset all zones control.
*
* Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
*/
-blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -234,7 +235,10 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
memset(cmd->cmnd, 0, cmd->cmd_len);
cmd->cmnd[0] = ZBC_OUT;
cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
- put_unaligned_be64(block, &cmd->cmnd[2]);
+ if (all)
+ cmd->cmnd[14] = 0x1;
+ else
+ put_unaligned_be64(block, &cmd->cmnd[2]);
rq->timeout = SD_TIMEOUT;
cmd->sc_data_direction = DMA_NONE;
@@ -261,6 +265,7 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
switch (req_op(rq)) {
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
if (result &&
sshdr->sense_key == ILLEGAL_REQUEST &&
@@ -487,6 +492,9 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
/* The drive satisfies the kernel restrictions: set it up */
blk_queue_chunk_sectors(sdkp->disk->queue,
logical_to_sectors(sdkp->device, zone_blocks));
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
+ blk_queue_required_elevator_features(sdkp->disk->queue,
+ ELEVATOR_F_ZBD_SEQ_WRITE);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/* READ16/WRITE16 is mandatory for ZBC disks */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
+ if (!vreg)
+ return 0;
+
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index f3585777324c..29fbab55c3b3 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1338,12 +1338,15 @@ static int of_qcom_slim_ngd_register(struct device *parent,
continue;
ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
- if (!ngd)
+ if (!ngd) {
+ of_node_put(node);
return -ENOMEM;
+ }
ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
if (!ngd->pdev) {
kfree(ngd);
+ of_node_put(node);
return -ENOMEM;
}
ngd->id = id;
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 9be41089edde..b2f013bfe42e 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -439,7 +439,7 @@ static inline bool slim_tid_txn(u8 mt, u8 mc)
(mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
mc == SLIM_MSG_MC_REQUEST_VALUE ||
- mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION));
+ mc == SLIM_MSG_MC_REQUEST_CHANGE_VALUE));
}
static inline bool slim_ec_txn(u8 mt, u8 mc)
diff --git a/drivers/sn/Kconfig b/drivers/sn/Kconfig
deleted file mode 100644
index a6c443d31a3c..000000000000
--- a/drivers/sn/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Miscellaneous SN-specific devices
-#
-
-menu "SN Devices"
- depends on SGI_SN
-
-config SGI_IOC3
- tristate "SGI IOC3 Base IO support"
- default m
- ---help---
- This option enables basic support for the SGI IOC3-based Base IO
- controller card. This option does not enable any specific
- functions on such a card, but provides necessary infrastructure
- for other drivers to utilize.
-
- If you have an SGI Altix with an IOC3-based
- I/O controller or a PCI IOC3 serial card say Y.
- Otherwise say N.
-
-endmenu
diff --git a/drivers/sn/Makefile b/drivers/sn/Makefile
deleted file mode 100644
index f0e809a38b2d..000000000000
--- a/drivers/sn/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Altix device drivers.
-#
-#
-
-obj-$(CONFIG_SGI_IOC3) += ioc3.o
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
deleted file mode 100644
index 358025af4918..000000000000
--- a/drivers/sn/ioc3.c
+++ /dev/null
@@ -1,844 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SGI IOC3 master driver and IRQ demuxer
- *
- * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
- * Heavily based on similar work by:
- * Brent Casavant <bcasavan@sgi.com> - IOC4 master driver
- * Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ioc3.h>
-#include <linux/rwsem.h>
-#include <linux/slab.h>
-
-#define IOC3_PCI_SIZE 0x100000
-
-static LIST_HEAD(ioc3_devices);
-static int ioc3_counter;
-static DECLARE_RWSEM(ioc3_devices_rwsem);
-
-static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES];
-static struct ioc3_submodule *ioc3_ethernet;
-static DEFINE_RWLOCK(ioc3_submodules_lock);
-
-/* NIC probing code */
-
-#define GPCR_MLAN_EN 0x00200000 /* enable MCR to pin 8 */
-
-static inline unsigned mcr_pack(unsigned pulse, unsigned sample)
-{
- return (pulse << 10) | (sample << 2);
-}
-
-static int nic_wait(struct ioc3_driver_data *idd)
-{
- unsigned mcr;
-
- do {
- mcr = readl(&idd->vma->mcr);
- } while (!(mcr & 2));
-
- return mcr & 1;
-}
-
-static int nic_reset(struct ioc3_driver_data *idd)
-{
- int presence;
- unsigned long flags;
-
- local_irq_save(flags);
- writel(mcr_pack(500, 65), &idd->vma->mcr);
- presence = nic_wait(idd);
- local_irq_restore(flags);
-
- udelay(500);
-
- return presence;
-}
-
-static int nic_read_bit(struct ioc3_driver_data *idd)
-{
- int result;
- unsigned long flags;
-
- local_irq_save(flags);
- writel(mcr_pack(6, 13), &idd->vma->mcr);
- result = nic_wait(idd);
- local_irq_restore(flags);
-
- udelay(500);
-
- return result;
-}
-
-static void nic_write_bit(struct ioc3_driver_data *idd, int bit)
-{
- if (bit)
- writel(mcr_pack(6, 110), &idd->vma->mcr);
- else
- writel(mcr_pack(80, 30), &idd->vma->mcr);
-
- nic_wait(idd);
-}
-
-static unsigned nic_read_byte(struct ioc3_driver_data *idd)
-{
- unsigned result = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- result = (result >> 1) | (nic_read_bit(idd) << 7);
-
- return result;
-}
-
-static void nic_write_byte(struct ioc3_driver_data *idd, int byte)
-{
- int i, bit;
-
- for (i = 8; i; i--) {
- bit = byte & 1;
- byte >>= 1;
-
- nic_write_bit(idd, bit);
- }
-}
-
-static unsigned long
-nic_find(struct ioc3_driver_data *idd, int *last, unsigned long addr)
-{
- int a, b, index, disc;
-
- nic_reset(idd);
-
- /* Search ROM. */
- nic_write_byte(idd, 0xF0);
-
- /* Algorithm from ``Book of iButton Standards''. */
- for (index = 0, disc = 0; index < 64; index++) {
- a = nic_read_bit(idd);
- b = nic_read_bit(idd);
-
- if (a && b) {
- printk(KERN_WARNING "IOC3 NIC search failed.\n");
- *last = 0;
- return 0;
- }
-
- if (!a && !b) {
- if (index == *last) {
- addr |= 1UL << index;
- } else if (index > *last) {
- addr &= ~(1UL << index);
- disc = index;
- } else if ((addr & (1UL << index)) == 0)
- disc = index;
- nic_write_bit(idd, (addr>>index)&1);
- continue;
- } else {
- if (a)
- addr |= 1UL << index;
- else
- addr &= ~(1UL << index);
- nic_write_bit(idd, a);
- continue;
- }
- }
- *last = disc;
- return addr;
-}
-
-static void nic_addr(struct ioc3_driver_data *idd, unsigned long addr)
-{
- int index;
-
- nic_reset(idd);
- nic_write_byte(idd, 0xF0);
- for (index = 0; index < 64; index++) {
- nic_read_bit(idd);
- nic_read_bit(idd);
- nic_write_bit(idd, (addr>>index)&1);
- }
-}
-
-static void crc16_byte(unsigned int *crc, unsigned char db)
-{
- int i;
-
- for(i=0;i<8;i++) {
- *crc <<= 1;
- if((db^(*crc>>16)) & 1)
- *crc ^= 0x8005;
- db >>= 1;
- }
- *crc &= 0xFFFF;
-}
-
-static unsigned int crc16_area(unsigned char *dbs, int size, unsigned int crc)
-{
- while(size--)
- crc16_byte(&crc, *(dbs++));
- return crc;
-}
-
-static void crc8_byte(unsigned int *crc, unsigned char db)
-{
- int i,f;
-
- for(i=0;i<8;i++) {
- f = (*crc ^ db) & 1;
- *crc >>= 1;
- db >>= 1;
- if(f)
- *crc ^= 0x8c;
- }
- *crc &= 0xff;
-}
-
-static unsigned int crc8_addr(unsigned long addr)
-{
- int i;
- unsigned int crc = 0x00;
-
- for(i=0;i<8;i++)
- crc8_byte(&crc, addr>>(i<<3));
- return crc;
-}
-
-static void
-read_redir_page(struct ioc3_driver_data *idd, unsigned long addr, int page,
- unsigned char *redir, unsigned char *data)
-{
- int loops = 16, i;
-
- while(redir[page] != 0xFF) {
- page = redir[page]^0xFF;
- loops--;
- if(loops<0) {
- printk(KERN_ERR "IOC3: NIC circular redirection\n");
- return;
- }
- }
- loops = 3;
- while(loops>0) {
- nic_addr(idd, addr);
- nic_write_byte(idd, 0xF0);
- nic_write_byte(idd, (page << 5) & 0xE0);
- nic_write_byte(idd, (page >> 3) & 0x1F);
- for(i=0;i<0x20;i++)
- data[i] = nic_read_byte(idd);
- if(crc16_area(data, 0x20, 0x0000) == 0x800d)
- return;
- loops--;
- }
- printk(KERN_ERR "IOC3: CRC error in data page\n");
- for(i=0;i<0x20;i++)
- data[i] = 0x00;
-}
-
-static void
-read_redir_map(struct ioc3_driver_data *idd, unsigned long addr,
- unsigned char *redir)
-{
- int i,j,loops = 3,crc_ok;
- unsigned int crc;
-
- while(loops>0) {
- crc_ok = 1;
- nic_addr(idd, addr);
- nic_write_byte(idd, 0xAA);
- nic_write_byte(idd, 0x00);
- nic_write_byte(idd, 0x01);
- for(i=0;i<64;i+=8) {
- for(j=0;j<8;j++)
- redir[i+j] = nic_read_byte(idd);
- crc = crc16_area(redir+i, 8, (i==0)?0x8707:0x0000);
- crc16_byte(&crc, nic_read_byte(idd));
- crc16_byte(&crc, nic_read_byte(idd));
- if(crc != 0x800d)
- crc_ok = 0;
- }
- if(crc_ok)
- return;
- loops--;
- }
- printk(KERN_ERR "IOC3: CRC error in redirection page\n");
- for(i=0;i<64;i++)
- redir[i] = 0xFF;
-}
-
-static void read_nic(struct ioc3_driver_data *idd, unsigned long addr)
-{
- unsigned char redir[64];
- unsigned char data[64],part[32];
- int i,j;
-
- /* read redirections */
- read_redir_map(idd, addr, redir);
- /* read data pages */
- read_redir_page(idd, addr, 0, redir, data);
- read_redir_page(idd, addr, 1, redir, data+32);
- /* assemble the part # */
- j=0;
- for(i=0;i<19;i++)
- if(data[i+11] != ' ')
- part[j++] = data[i+11];
- for(i=0;i<6;i++)
- if(data[i+32] != ' ')
- part[j++] = data[i+32];
- part[j] = 0;
- /* skip Octane power supplies */
- if(!strncmp(part, "060-0035-", 9))
- return;
- if(!strncmp(part, "060-0038-", 9))
- return;
- strcpy(idd->nic_part, part);
- /* assemble the serial # */
- j=0;
- for(i=0;i<10;i++)
- if(data[i+1] != ' ')
- idd->nic_serial[j++] = data[i+1];
- idd->nic_serial[j] = 0;
-}
-
-static void read_mac(struct ioc3_driver_data *idd, unsigned long addr)
-{
- int i, loops = 3;
- unsigned char data[13];
-
- while(loops>0) {
- nic_addr(idd, addr);
- nic_write_byte(idd, 0xF0);
- nic_write_byte(idd, 0x00);
- nic_write_byte(idd, 0x00);
- nic_read_byte(idd);
- for(i=0;i<13;i++)
- data[i] = nic_read_byte(idd);
- if(crc16_area(data, 13, 0x0000) == 0x800d) {
- for(i=10;i>4;i--)
- idd->nic_mac[10-i] = data[i];
- return;
- }
- loops--;
- }
- printk(KERN_ERR "IOC3: CRC error in MAC address\n");
- for(i=0;i<6;i++)
- idd->nic_mac[i] = 0x00;
-}
-
-static void probe_nic(struct ioc3_driver_data *idd)
-{
- int save = 0, loops = 3;
- unsigned long first, addr;
-
- writel(GPCR_MLAN_EN, &idd->vma->gpcr_s);
-
- while(loops>0) {
- idd->nic_part[0] = 0;
- idd->nic_serial[0] = 0;
- addr = first = nic_find(idd, &save, 0);
- if(!first)
- return;
- while(1) {
- if(crc8_addr(addr))
- break;
- else {
- switch(addr & 0xFF) {
- case 0x0B:
- read_nic(idd, addr);
- break;
- case 0x09:
- case 0x89:
- case 0x91:
- read_mac(idd, addr);
- break;
- }
- }
- addr = nic_find(idd, &save, addr);
- if(addr == first)
- return;
- }
- loops--;
- }
- printk(KERN_ERR "IOC3: CRC error in NIC address\n");
-}
-
-/* Interrupts */
-
-static void write_ireg(struct ioc3_driver_data *idd, uint32_t val, int which)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&idd->ir_lock, flags);
- switch (which) {
- case IOC3_W_IES:
- writel(val, &idd->vma->sio_ies);
- break;
- case IOC3_W_IEC:
- writel(val, &idd->vma->sio_iec);
- break;
- }
- spin_unlock_irqrestore(&idd->ir_lock, flags);
-}
-static inline uint32_t get_pending_intrs(struct ioc3_driver_data *idd)
-{
- unsigned long flag;
- uint32_t intrs = 0;
-
- spin_lock_irqsave(&idd->ir_lock, flag);
- intrs = readl(&idd->vma->sio_ir);
- intrs &= readl(&idd->vma->sio_ies);
- spin_unlock_irqrestore(&idd->ir_lock, flag);
- return intrs;
-}
-
-static irqreturn_t ioc3_intr_io(int irq, void *arg)
-{
- unsigned long flags;
- struct ioc3_driver_data *idd = arg;
- int handled = 1, id;
- unsigned int pending;
-
- read_lock_irqsave(&ioc3_submodules_lock, flags);
-
- if(idd->dual_irq && readb(&idd->vma->eisr)) {
- /* send Ethernet IRQ to the driver */
- if(ioc3_ethernet && idd->active[ioc3_ethernet->id] &&
- ioc3_ethernet->intr) {
- handled = handled && !ioc3_ethernet->intr(ioc3_ethernet,
- idd, 0);
- }
- }
- pending = get_pending_intrs(idd); /* look at the IO IRQs */
-
- for(id=0;id<IOC3_MAX_SUBMODULES;id++) {
- if(idd->active[id] && ioc3_submodules[id]
- && (pending & ioc3_submodules[id]->irq_mask)
- && ioc3_submodules[id]->intr) {
- write_ireg(idd, ioc3_submodules[id]->irq_mask,
- IOC3_W_IEC);
- if(!ioc3_submodules[id]->intr(ioc3_submodules[id],
- idd, pending & ioc3_submodules[id]->irq_mask))
- pending &= ~ioc3_submodules[id]->irq_mask;
- if (ioc3_submodules[id]->reset_mask)
- write_ireg(idd, ioc3_submodules[id]->irq_mask,
- IOC3_W_IES);
- }
- }
- read_unlock_irqrestore(&ioc3_submodules_lock, flags);
- if(pending) {
- printk(KERN_WARNING
- "IOC3: Pending IRQs 0x%08x discarded and disabled\n",pending);
- write_ireg(idd, pending, IOC3_W_IEC);
- handled = 1;
- }
- return handled?IRQ_HANDLED:IRQ_NONE;
-}
-
-static irqreturn_t ioc3_intr_eth(int irq, void *arg)
-{
- unsigned long flags;
- struct ioc3_driver_data *idd = (struct ioc3_driver_data *)arg;
- int handled = 1;
-
- if(!idd->dual_irq)
- return IRQ_NONE;
- read_lock_irqsave(&ioc3_submodules_lock, flags);
- if(ioc3_ethernet && idd->active[ioc3_ethernet->id]
- && ioc3_ethernet->intr)
- handled = handled && !ioc3_ethernet->intr(ioc3_ethernet, idd, 0);
- read_unlock_irqrestore(&ioc3_submodules_lock, flags);
- return handled?IRQ_HANDLED:IRQ_NONE;
-}
-
-void ioc3_enable(struct ioc3_submodule *is,
- struct ioc3_driver_data *idd, unsigned int irqs)
-{
- write_ireg(idd, irqs & is->irq_mask, IOC3_W_IES);
-}
-
-void ioc3_ack(struct ioc3_submodule *is, struct ioc3_driver_data *idd,
- unsigned int irqs)
-{
- writel(irqs & is->irq_mask, &idd->vma->sio_ir);
-}
-
-void ioc3_disable(struct ioc3_submodule *is,
- struct ioc3_driver_data *idd, unsigned int irqs)
-{
- write_ireg(idd, irqs & is->irq_mask, IOC3_W_IEC);
-}
-
-void ioc3_gpcr_set(struct ioc3_driver_data *idd, unsigned int val)
-{
- unsigned long flags;
- spin_lock_irqsave(&idd->gpio_lock, flags);
- writel(val, &idd->vma->gpcr_s);
- spin_unlock_irqrestore(&idd->gpio_lock, flags);
-}
-
-/* Keep it simple, stupid! */
-static int find_slot(void **tab, int max)
-{
- int i;
- for(i=0;i<max;i++)
- if(!(tab[i]))
- return i;
- return -1;
-}
-
-/* Register an IOC3 submodule */
-int ioc3_register_submodule(struct ioc3_submodule *is)
-{
- struct ioc3_driver_data *idd;
- int alloc_id;
- unsigned long flags;
-
- write_lock_irqsave(&ioc3_submodules_lock, flags);
- alloc_id = find_slot((void **)ioc3_submodules, IOC3_MAX_SUBMODULES);
- if(alloc_id != -1) {
- ioc3_submodules[alloc_id] = is;
- if(is->ethernet) {
- if(ioc3_ethernet==NULL)
- ioc3_ethernet=is;
- else
- printk(KERN_WARNING
- "IOC3 Ethernet module already registered!\n");
- }
- }
- write_unlock_irqrestore(&ioc3_submodules_lock, flags);
-
- if(alloc_id == -1) {
- printk(KERN_WARNING "Increase IOC3_MAX_SUBMODULES!\n");
- return -ENOMEM;
- }
-
- is->id=alloc_id;
-
- /* Initialize submodule for each IOC3 */
- if (!is->probe)
- return 0;
-
- down_read(&ioc3_devices_rwsem);
- list_for_each_entry(idd, &ioc3_devices, list) {
- /* set to 1 for IRQs in probe */
- idd->active[alloc_id] = 1;
- idd->active[alloc_id] = !is->probe(is, idd);
- }
- up_read(&ioc3_devices_rwsem);
-
- return 0;
-}
-
-/* Unregister an IOC3 submodule */
-void ioc3_unregister_submodule(struct ioc3_submodule *is)
-{
- struct ioc3_driver_data *idd;
- unsigned long flags;
-
- write_lock_irqsave(&ioc3_submodules_lock, flags);
- if(ioc3_submodules[is->id]==is)
- ioc3_submodules[is->id]=NULL;
- else
- printk(KERN_WARNING
- "IOC3 submodule %s has wrong ID.\n",is->name);
- if(ioc3_ethernet==is)
- ioc3_ethernet = NULL;
- write_unlock_irqrestore(&ioc3_submodules_lock, flags);
-
- /* Remove submodule for each IOC3 */
- down_read(&ioc3_devices_rwsem);
- list_for_each_entry(idd, &ioc3_devices, list)
- if(idd->active[is->id]) {
- if(is->remove)
- if(is->remove(is, idd))
- printk(KERN_WARNING
- "%s: IOC3 submodule %s remove failed "
- "for pci_dev %s.\n",
- __func__, module_name(is->owner),
- pci_name(idd->pdev));
- idd->active[is->id] = 0;
- if(is->irq_mask)
- write_ireg(idd, is->irq_mask, IOC3_W_IEC);
- }
- up_read(&ioc3_devices_rwsem);
-}
-
-/*********************
- * Device management *
- *********************/
-
-static char *ioc3_class_names[] = { "unknown", "IP27 BaseIO", "IP30 system",
- "MENET 1/2/3", "MENET 4", "CADduo", "Altix Serial" };
-
-static int ioc3_class(struct ioc3_driver_data *idd)
-{
- int res = IOC3_CLASS_NONE;
- /* NIC-based logic */
- if(!strncmp(idd->nic_part, "030-0891-", 9))
- res = IOC3_CLASS_BASE_IP30;
- if(!strncmp(idd->nic_part, "030-1155-", 9))
- res = IOC3_CLASS_CADDUO;
- if(!strncmp(idd->nic_part, "030-1657-", 9))
- res = IOC3_CLASS_SERIAL;
- if(!strncmp(idd->nic_part, "030-1664-", 9))
- res = IOC3_CLASS_SERIAL;
- /* total random heuristics */
-#ifdef CONFIG_SGI_IP27
- if(!idd->nic_part[0])
- res = IOC3_CLASS_BASE_IP27;
-#endif
- /* print educational message */
- printk(KERN_INFO "IOC3 part: [%s], serial: [%s] => class %s\n",
- idd->nic_part, idd->nic_serial, ioc3_class_names[res]);
- return res;
-}
-/* Adds a new instance of an IOC3 card */
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
- struct ioc3_driver_data *idd;
- uint32_t pcmd;
- int ret, id;
-
- /* Enable IOC3 and take ownership of it */
- if ((ret = pci_enable_device(pdev))) {
- printk(KERN_WARNING
- "%s: Failed to enable IOC3 device for pci_dev %s.\n",
- __func__, pci_name(pdev));
- goto out;
- }
- pci_set_master(pdev);
-
-#ifdef USE_64BIT_DMA
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!ret) {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret < 0) {
- printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA "
- "for consistent allocations\n",
- __func__);
- }
- }
-#endif
-
- /* Set up per-IOC3 data */
- idd = kzalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL);
- if (!idd) {
- printk(KERN_WARNING
- "%s: Failed to allocate IOC3 data for pci_dev %s.\n",
- __func__, pci_name(pdev));
- ret = -ENODEV;
- goto out_idd;
- }
- spin_lock_init(&idd->ir_lock);
- spin_lock_init(&idd->gpio_lock);
- idd->pdev = pdev;
-
- /* Map all IOC3 registers. These are shared between subdevices
- * so the main IOC3 module manages them.
- */
- idd->pma = pci_resource_start(pdev, 0);
- if (!idd->pma) {
- printk(KERN_WARNING
- "%s: Unable to find IOC3 resource "
- "for pci_dev %s.\n",
- __func__, pci_name(pdev));
- ret = -ENODEV;
- goto out_pci;
- }
- if (!request_mem_region(idd->pma, IOC3_PCI_SIZE, "ioc3")) {
- printk(KERN_WARNING
- "%s: Unable to request IOC3 region "
- "for pci_dev %s.\n",
- __func__, pci_name(pdev));
- ret = -ENODEV;
- goto out_pci;
- }
- idd->vma = ioremap(idd->pma, IOC3_PCI_SIZE);
- if (!idd->vma) {
- printk(KERN_WARNING
- "%s: Unable to remap IOC3 region "
- "for pci_dev %s.\n",
- __func__, pci_name(pdev));
- ret = -ENODEV;
- goto out_misc_region;
- }
-
- /* Track PCI-device specific data */
- pci_set_drvdata(pdev, idd);
- down_write(&ioc3_devices_rwsem);
- list_add_tail(&idd->list, &ioc3_devices);
- idd->id = ioc3_counter++;
- up_write(&ioc3_devices_rwsem);
-
- idd->gpdr_shadow = readl(&idd->vma->gpdr);
-
- /* Read IOC3 NIC contents */
- probe_nic(idd);
-
- /* Detect IOC3 class */
- idd->class = ioc3_class(idd);
-
- /* Initialize IOC3 */
- pci_read_config_dword(pdev, PCI_COMMAND, &pcmd);
- pci_write_config_dword(pdev, PCI_COMMAND,
- pcmd | PCI_COMMAND_MEMORY |
- PCI_COMMAND_PARITY | PCI_COMMAND_SERR |
- PCI_SCR_DROP_MODE_EN);
-
- write_ireg(idd, ~0, IOC3_W_IEC);
- writel(~0, &idd->vma->sio_ir);
-
- /* Set up IRQs */
- if(idd->class == IOC3_CLASS_BASE_IP30
- || idd->class == IOC3_CLASS_BASE_IP27) {
- writel(0, &idd->vma->eier);
- writel(~0, &idd->vma->eisr);
-
- idd->dual_irq = 1;
- if (!request_irq(pdev->irq, ioc3_intr_eth, IRQF_SHARED,
- "ioc3-eth", (void *)idd)) {
- idd->irq_eth = pdev->irq;
- } else {
- printk(KERN_WARNING
- "%s : request_irq fails for IRQ 0x%x\n ",
- __func__, pdev->irq);
- }
- if (!request_irq(pdev->irq+2, ioc3_intr_io, IRQF_SHARED,
- "ioc3-io", (void *)idd)) {
- idd->irq_io = pdev->irq+2;
- } else {
- printk(KERN_WARNING
- "%s : request_irq fails for IRQ 0x%x\n ",
- __func__, pdev->irq+2);
- }
- } else {
- if (!request_irq(pdev->irq, ioc3_intr_io, IRQF_SHARED,
- "ioc3", (void *)idd)) {
- idd->irq_io = pdev->irq;
- } else {
- printk(KERN_WARNING
- "%s : request_irq fails for IRQ 0x%x\n ",
- __func__, pdev->irq);
- }
- }
-
- /* Add this IOC3 to all submodules */
- for(id=0;id<IOC3_MAX_SUBMODULES;id++)
- if(ioc3_submodules[id] && ioc3_submodules[id]->probe) {
- idd->active[id] = 1;
- idd->active[id] = !ioc3_submodules[id]->probe
- (ioc3_submodules[id], idd);
- }
-
- printk(KERN_INFO "IOC3 Master Driver loaded for %s\n", pci_name(pdev));
-
- return 0;
-
-out_misc_region:
- release_mem_region(idd->pma, IOC3_PCI_SIZE);
-out_pci:
- kfree(idd);
-out_idd:
- pci_disable_device(pdev);
-out:
- return ret;
-}
-
-/* Removes a particular instance of an IOC3 card. */
-static void ioc3_remove(struct pci_dev *pdev)
-{
- int id;
- struct ioc3_driver_data *idd;
-
- idd = pci_get_drvdata(pdev);
-
- /* Remove this IOC3 from all submodules */
- for(id=0;id<IOC3_MAX_SUBMODULES;id++)
- if(idd->active[id]) {
- if(ioc3_submodules[id] && ioc3_submodules[id]->remove)
- if(ioc3_submodules[id]->remove(ioc3_submodules[id],
- idd))
- printk(KERN_WARNING
- "%s: IOC3 submodule 0x%s remove failed "
- "for pci_dev %s.\n",
- __func__,
- module_name(ioc3_submodules[id]->owner),
- pci_name(pdev));
- idd->active[id] = 0;
- }
-
- /* Clear and disable all IRQs */
- write_ireg(idd, ~0, IOC3_W_IEC);
- writel(~0, &idd->vma->sio_ir);
-
- /* Release resources */
- free_irq(idd->irq_io, (void *)idd);
- if(idd->dual_irq)
- free_irq(idd->irq_eth, (void *)idd);
- iounmap(idd->vma);
- release_mem_region(idd->pma, IOC3_PCI_SIZE);
-
- /* Disable IOC3 and relinquish */
- pci_disable_device(pdev);
-
- /* Remove and free driver data */
- down_write(&ioc3_devices_rwsem);
- list_del(&idd->list);
- up_write(&ioc3_devices_rwsem);
- kfree(idd);
-}
-
-static struct pci_device_id ioc3_id_table[] = {
- {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID},
- {0}
-};
-
-static struct pci_driver ioc3_driver = {
- .name = "IOC3",
- .id_table = ioc3_id_table,
- .probe = ioc3_probe,
- .remove = ioc3_remove,
-};
-
-MODULE_DEVICE_TABLE(pci, ioc3_id_table);
-
-/*********************
- * Module management *
- *********************/
-
-/* Module load */
-static int __init ioc3_init(void)
-{
- if (ia64_platform_is("sn2"))
- return pci_register_driver(&ioc3_driver);
- return -ENODEV;
-}
-
-/* Module unload */
-static void __exit ioc3_exit(void)
-{
- pci_unregister_driver(&ioc3_driver);
-}
-
-module_init(ioc3_init);
-module_exit(ioc3_exit);
-
-MODULE_AUTHOR("Stanislaw Skowronek <skylark@linux-mips.org>");
-MODULE_DESCRIPTION("PCI driver for SGI IOC3");
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL_GPL(ioc3_register_submodule);
-EXPORT_SYMBOL_GPL(ioc3_unregister_submodule);
-EXPORT_SYMBOL_GPL(ioc3_ack);
-EXPORT_SYMBOL_GPL(ioc3_gpcr_set);
-EXPORT_SYMBOL_GPL(ioc3_disable);
-EXPORT_SYMBOL_GPL(ioc3_enable);
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 23bfb8ef4fdb..bc2c912949bd 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -37,6 +37,17 @@ config MESON_GX_PM_DOMAINS
Say yes to expose Amlogic Meson GX Power Domains as
Generic Power Domains.
+config MESON_EE_PM_DOMAINS
+ bool "Amlogic Meson Everything-Else Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson Everything-Else Power Domains as
+ Generic Power Domains.
+
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index f2e4ed171297..de79d044b545 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_MESON_CLK_MEASURE) += meson-clk-measure.o
obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
+obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 19d4cbc93a17..0fa47d77577d 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -11,6 +11,8 @@
#include <linux/debugfs.h>
#include <linux/regmap.h>
+static DEFINE_MUTEX(measure_lock);
+
#define MSR_CLK_DUTY 0x0
#define MSR_CLK_REG0 0x4
#define MSR_CLK_REG1 0x8
@@ -322,6 +324,8 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(84, "co_tx"),
CLK_MSR_ID(89, "hdmi_todig"),
CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(91, "sys_cpub_div16"),
+ CLK_MSR_ID(92, "sys_pll_cpub_div16"),
CLK_MSR_ID(94, "eth_phy_rx"),
CLK_MSR_ID(95, "eth_phy_pll"),
CLK_MSR_ID(96, "vpu_b"),
@@ -353,6 +357,136 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(122, "audio_pdm_dclk"),
};
+static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(40, "arm_ring_osc_out_4"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(60, "arm_ring_osc_out_5"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "arm_ring_osc_out_6"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(76, "arm_ring_osc_out_7"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(85, "arm_ring_osc_out_8"),
+ CLK_MSR_ID(86, "arm_ring_osc_out_9"),
+ CLK_MSR_ID(87, "mipi_dsi_phy"),
+ CLK_MSR_ID(88, "cis2_adapt"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(91, "nna_core"),
+ CLK_MSR_ID(92, "nna_axi"),
+ CLK_MSR_ID(93, "vad"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "arm_ring_osc_out_10"),
+ CLK_MSR_ID(100, "arm_ring_osc_out_11"),
+ CLK_MSR_ID(101, "arm_ring_osc_out_12"),
+ CLK_MSR_ID(102, "arm_ring_osc_out_13"),
+ CLK_MSR_ID(103, "arm_ring_osc_out_14"),
+ CLK_MSR_ID(104, "arm_ring_osc_out_15"),
+ CLK_MSR_ID(105, "arm_ring_osc_out_16"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+ CLK_MSR_ID(123, "audio_resampled"),
+ CLK_MSR_ID(124, "earcrx_pll"),
+ CLK_MSR_ID(125, "earcrx_pll_test"),
+ CLK_MSR_ID(126, "csi_phy0"),
+ CLK_MSR_ID(127, "csi2_data"),
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int duration)
{
@@ -360,6 +494,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int val;
int ret;
+ ret = mutex_lock_interruptible(&measure_lock);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, MSR_CLK_REG0, 0);
/* Set measurement duration */
@@ -377,8 +515,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
val, !(val & MSR_BUSY), 10, 10000);
- if (ret)
+ if (ret) {
+ mutex_unlock(&measure_lock);
return ret;
+ }
/* Disable */
regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
@@ -386,6 +526,8 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
/* Get the value in multiple of gate time counts */
regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+ mutex_unlock(&measure_lock);
+
if (val >= MSR_VAL_MASK)
return -EINVAL;
@@ -533,6 +675,10 @@ static const struct of_device_id meson_msr_match_table[] = {
.compatible = "amlogic,meson-g12a-clk-measure",
.data = (void *)clk_msr_g12a,
},
+ {
+ .compatible = "amlogic,meson-sm1-clk-measure",
+ .data = (void *)clk_msr_sm1,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
new file mode 100644
index 000000000000..5823f5b67d16
--- /dev/null
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/reset-controller.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <dt-bindings/power/meson-g12a-power.h>
+#include <dt-bindings/power/meson-sm1-power.h>
+
+/* AO Offsets */
+
+#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+#define AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+
+/* HHI Offsets */
+
+#define HHI_MEM_PD_REG0 (0x40 << 2)
+#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
+#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+#define HHI_VPU_MEM_PD_REG3 (0x43 << 2)
+#define HHI_VPU_MEM_PD_REG4 (0x44 << 2)
+#define HHI_AUDIO_MEM_PD_REG0 (0x45 << 2)
+#define HHI_NANOQ_MEM_PD_REG0 (0x46 << 2)
+#define HHI_NANOQ_MEM_PD_REG1 (0x47 << 2)
+#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
+
+struct meson_ee_pwrc;
+struct meson_ee_pwrc_domain;
+
+struct meson_ee_pwrc_mem_domain {
+ unsigned int reg;
+ unsigned int mask;
+};
+
+struct meson_ee_pwrc_top_domain {
+ unsigned int sleep_reg;
+ unsigned int sleep_mask;
+ unsigned int iso_reg;
+ unsigned int iso_mask;
+};
+
+struct meson_ee_pwrc_domain_desc {
+ char *name;
+ unsigned int reset_names_count;
+ unsigned int clk_names_count;
+ struct meson_ee_pwrc_top_domain *top_pd;
+ unsigned int mem_pd_count;
+ struct meson_ee_pwrc_mem_domain *mem_pd;
+ bool (*get_power)(struct meson_ee_pwrc_domain *pwrc_domain);
+};
+
+struct meson_ee_pwrc_domain_data {
+ unsigned int count;
+ struct meson_ee_pwrc_domain_desc *domains;
+};
+
+/* TOP Power Domains */
+
+static struct meson_ee_pwrc_top_domain g12a_pwrc_vpu = {
+ .sleep_reg = AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(8),
+ .iso_reg = AO_RTI_GEN_PWR_SLEEP0,
+ .iso_mask = BIT(9),
+};
+
+#define SM1_EE_PD(__bit) \
+ { \
+ .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, \
+ .sleep_mask = BIT(__bit), \
+ .iso_reg = AO_RTI_GEN_PWR_ISO0, \
+ .iso_mask = BIT(__bit), \
+ }
+
+static struct meson_ee_pwrc_top_domain sm1_pwrc_vpu = SM1_EE_PD(8);
+static struct meson_ee_pwrc_top_domain sm1_pwrc_nna = SM1_EE_PD(16);
+static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17);
+static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18);
+static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
+
+/* Memory PD Domains */
+
+#define VPU_MEMPD(__reg) \
+ { __reg, GENMASK(1, 0) }, \
+ { __reg, GENMASK(3, 2) }, \
+ { __reg, GENMASK(5, 4) }, \
+ { __reg, GENMASK(7, 6) }, \
+ { __reg, GENMASK(9, 8) }, \
+ { __reg, GENMASK(11, 10) }, \
+ { __reg, GENMASK(13, 12) }, \
+ { __reg, GENMASK(15, 14) }, \
+ { __reg, GENMASK(17, 16) }, \
+ { __reg, GENMASK(19, 18) }, \
+ { __reg, GENMASK(21, 20) }, \
+ { __reg, GENMASK(23, 22) }, \
+ { __reg, GENMASK(25, 24) }, \
+ { __reg, GENMASK(27, 26) }, \
+ { __reg, GENMASK(29, 28) }, \
+ { __reg, GENMASK(31, 30) }
+
+#define VPU_HHI_MEMPD(__reg) \
+ { __reg, BIT(8) }, \
+ { __reg, BIT(9) }, \
+ { __reg, BIT(10) }, \
+ { __reg, BIT(11) }, \
+ { __reg, BIT(12) }, \
+ { __reg, BIT(13) }, \
+ { __reg, BIT(14) }, \
+ { __reg, BIT(15) }
+
+static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
+static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_eth[] = {
+ { HHI_MEM_PD_REG0, GENMASK(3, 2) },
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG3),
+ { HHI_VPU_MEM_PD_REG4, GENMASK(1, 0) },
+ { HHI_VPU_MEM_PD_REG4, GENMASK(3, 2) },
+ { HHI_VPU_MEM_PD_REG4, GENMASK(5, 4) },
+ { HHI_VPU_MEM_PD_REG4, GENMASK(7, 6) },
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_nna[] = {
+ { HHI_NANOQ_MEM_PD_REG0, 0xff },
+ { HHI_NANOQ_MEM_PD_REG1, 0xff },
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_usb[] = {
+ { HHI_MEM_PD_REG0, GENMASK(31, 30) },
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_pcie[] = {
+ { HHI_MEM_PD_REG0, GENMASK(29, 26) },
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = {
+ { HHI_MEM_PD_REG0, GENMASK(25, 18) },
+};
+
+static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+ { HHI_MEM_PD_REG0, GENMASK(5, 4) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(3, 2) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(5, 4) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(7, 6) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(13, 12) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(15, 14) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(17, 16) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(19, 18) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(21, 20) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(23, 22) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(25, 24) },
+ { HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) },
+};
+
+#define VPU_PD(__name, __top_pd, __mem, __get_power, __resets, __clks) \
+ { \
+ .name = __name, \
+ .reset_names_count = __resets, \
+ .clk_names_count = __clks, \
+ .top_pd = __top_pd, \
+ .mem_pd_count = ARRAY_SIZE(__mem), \
+ .mem_pd = __mem, \
+ .get_power = __get_power, \
+ }
+
+#define TOP_PD(__name, __top_pd, __mem, __get_power) \
+ { \
+ .name = __name, \
+ .top_pd = __top_pd, \
+ .mem_pd_count = ARRAY_SIZE(__mem), \
+ .mem_pd = __mem, \
+ .get_power = __get_power, \
+ }
+
+#define MEM_PD(__name, __mem) \
+ TOP_PD(__name, NULL, __mem, NULL)
+
+static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
+
+static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
+ [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &g12a_pwrc_vpu, g12a_pwrc_mem_vpu,
+ pwrc_ee_get_power, 11, 2),
+ [PWRC_G12A_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
+ [PWRC_SM1_VPU_ID] = VPU_PD("VPU", &sm1_pwrc_vpu, sm1_pwrc_mem_vpu,
+ pwrc_ee_get_power, 11, 2),
+ [PWRC_SM1_NNA_ID] = TOP_PD("NNA", &sm1_pwrc_nna, sm1_pwrc_mem_nna,
+ pwrc_ee_get_power),
+ [PWRC_SM1_USB_ID] = TOP_PD("USB", &sm1_pwrc_usb, sm1_pwrc_mem_usb,
+ pwrc_ee_get_power),
+ [PWRC_SM1_PCIE_ID] = TOP_PD("PCI", &sm1_pwrc_pci, sm1_pwrc_mem_pcie,
+ pwrc_ee_get_power),
+ [PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
+ pwrc_ee_get_power),
+ [PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
+ [PWRC_SM1_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+};
+
+struct meson_ee_pwrc_domain {
+ struct generic_pm_domain base;
+ bool enabled;
+ struct meson_ee_pwrc *pwrc;
+ struct meson_ee_pwrc_domain_desc desc;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rstc;
+ int num_rstc;
+};
+
+struct meson_ee_pwrc {
+ struct regmap *regmap_ao;
+ struct regmap *regmap_hhi;
+ struct meson_ee_pwrc_domain *domains;
+ struct genpd_onecell_data xlate;
+};
+
+static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain)
+{
+ u32 reg;
+
+ regmap_read(pwrc_domain->pwrc->regmap_ao,
+ pwrc_domain->desc.top_pd->sleep_reg, &reg);
+
+ return (reg & pwrc_domain->desc.top_pd->sleep_mask);
+}
+
+static int meson_ee_pwrc_off(struct generic_pm_domain *domain)
+{
+ struct meson_ee_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_ee_pwrc_domain, base);
+ int i;
+
+ if (pwrc_domain->desc.top_pd)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
+ pwrc_domain->desc.top_pd->sleep_reg,
+ pwrc_domain->desc.top_pd->sleep_mask,
+ pwrc_domain->desc.top_pd->sleep_mask);
+ udelay(20);
+
+ for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_hhi,
+ pwrc_domain->desc.mem_pd[i].reg,
+ pwrc_domain->desc.mem_pd[i].mask,
+ pwrc_domain->desc.mem_pd[i].mask);
+
+ udelay(20);
+
+ if (pwrc_domain->desc.top_pd)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
+ pwrc_domain->desc.top_pd->iso_reg,
+ pwrc_domain->desc.top_pd->iso_mask,
+ pwrc_domain->desc.top_pd->iso_mask);
+
+ if (pwrc_domain->num_clks) {
+ msleep(20);
+ clk_bulk_disable_unprepare(pwrc_domain->num_clks,
+ pwrc_domain->clks);
+ }
+
+ return 0;
+}
+
+static int meson_ee_pwrc_on(struct generic_pm_domain *domain)
+{
+ struct meson_ee_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_ee_pwrc_domain, base);
+ int i, ret;
+
+ if (pwrc_domain->desc.top_pd)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
+ pwrc_domain->desc.top_pd->sleep_reg,
+ pwrc_domain->desc.top_pd->sleep_mask, 0);
+ udelay(20);
+
+ for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_hhi,
+ pwrc_domain->desc.mem_pd[i].reg,
+ pwrc_domain->desc.mem_pd[i].mask, 0);
+
+ udelay(20);
+
+ ret = reset_control_assert(pwrc_domain->rstc);
+ if (ret)
+ return ret;
+
+ if (pwrc_domain->desc.top_pd)
+ regmap_update_bits(pwrc_domain->pwrc->regmap_ao,
+ pwrc_domain->desc.top_pd->iso_reg,
+ pwrc_domain->desc.top_pd->iso_mask, 0);
+
+ ret = reset_control_deassert(pwrc_domain->rstc);
+ if (ret)
+ return ret;
+
+ return clk_bulk_prepare_enable(pwrc_domain->num_clks,
+ pwrc_domain->clks);
+}
+
+static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
+ struct meson_ee_pwrc *pwrc,
+ struct meson_ee_pwrc_domain *dom)
+{
+ dom->pwrc = pwrc;
+ dom->num_rstc = dom->desc.reset_names_count;
+ dom->num_clks = dom->desc.clk_names_count;
+
+ if (dom->num_rstc) {
+ int count = reset_control_get_count(&pdev->dev);
+
+ if (count != dom->num_rstc)
+ dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n",
+ count, dom->desc.name);
+
+ dom->rstc = devm_reset_control_array_get(&pdev->dev, false,
+ false);
+ if (IS_ERR(dom->rstc))
+ return PTR_ERR(dom->rstc);
+ }
+
+ if (dom->num_clks) {
+ int ret = devm_clk_bulk_get_all(&pdev->dev, &dom->clks);
+ if (ret < 0)
+ return ret;
+
+ if (dom->num_clks != ret) {
+ dev_warn(&pdev->dev, "Invalid clocks count %d for domain %s\n",
+ ret, dom->desc.name);
+ dom->num_clks = ret;
+ }
+ }
+
+ dom->base.name = dom->desc.name;
+ dom->base.power_on = meson_ee_pwrc_on;
+ dom->base.power_off = meson_ee_pwrc_off;
+
+ /*
+ * TOFIX: This is a special case for the VPU power domain, which can
+ * be enabled previously by the bootloader. In this case the VPU
+ * pipeline may be functional but no driver maybe never attach
+ * to this power domain, and if the domain is disabled it could
+ * cause system errors. This is why the pm_domain_always_on_gov
+ * is used here.
+ * For the same reason, the clocks should be enabled in case
+ * we need to power the domain off, otherwise the internal clocks
+ * prepare/enable counters won't be in sync.
+ */
+ if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) {
+ int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks);
+ if (ret)
+ return ret;
+
+ pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false);
+ } else
+ pm_genpd_init(&dom->base, NULL,
+ (dom->desc.get_power ?
+ dom->desc.get_power(dom) : true));
+
+ return 0;
+}
+
+static int meson_ee_pwrc_probe(struct platform_device *pdev)
+{
+ const struct meson_ee_pwrc_domain_data *match;
+ struct regmap *regmap_ao, *regmap_hhi;
+ struct meson_ee_pwrc *pwrc;
+ int i, ret;
+
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->xlate.domains),
+ GFP_KERNEL);
+ if (!pwrc->xlate.domains)
+ return -ENOMEM;
+
+ pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->domains), GFP_KERNEL);
+ if (!pwrc->domains)
+ return -ENOMEM;
+
+ pwrc->xlate.num_domains = match->count;
+
+ regmap_hhi = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ if (IS_ERR(regmap_hhi)) {
+ dev_err(&pdev->dev, "failed to get HHI regmap\n");
+ return PTR_ERR(regmap_hhi);
+ }
+
+ regmap_ao = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "amlogic,ao-sysctrl");
+ if (IS_ERR(regmap_ao)) {
+ dev_err(&pdev->dev, "failed to get AO regmap\n");
+ return PTR_ERR(regmap_ao);
+ }
+
+ pwrc->regmap_ao = regmap_ao;
+ pwrc->regmap_hhi = regmap_hhi;
+
+ platform_set_drvdata(pdev, pwrc);
+
+ for (i = 0 ; i < match->count ; ++i) {
+ struct meson_ee_pwrc_domain *dom = &pwrc->domains[i];
+
+ memcpy(&dom->desc, &match->domains[i], sizeof(dom->desc));
+
+ ret = meson_ee_pwrc_init_domain(pdev, pwrc, dom);
+ if (ret)
+ return ret;
+
+ pwrc->xlate.domains[i] = &dom->base;
+ }
+
+ of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
+
+ return 0;
+}
+
+static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
+{
+ struct meson_ee_pwrc *pwrc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0 ; i < pwrc->xlate.num_domains ; ++i) {
+ struct meson_ee_pwrc_domain *dom = &pwrc->domains[i];
+
+ if (dom->desc.get_power && !dom->desc.get_power(dom))
+ meson_ee_pwrc_off(&dom->base);
+ }
+}
+
+static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
+ .count = ARRAY_SIZE(g12a_pwrc_domains),
+ .domains = g12a_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
+ .count = ARRAY_SIZE(sm1_pwrc_domains),
+ .domains = sm1_pwrc_domains,
+};
+
+static const struct of_device_id meson_ee_pwrc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-g12a-pwrc",
+ .data = &meson_ee_g12a_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson-sm1-pwrc",
+ .data = &meson_ee_sm1_pwrc_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_ee_pwrc_driver = {
+ .probe = meson_ee_pwrc_probe,
+ .shutdown = meson_ee_pwrc_shutdown,
+ .driver = {
+ .name = "meson_ee_pwrc",
+ .of_match_table = meson_ee_pwrc_match_table,
+ },
+};
+builtin_platform_driver(meson_ee_pwrc_driver);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index bca34954518e..6d0d04f163cb 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -39,6 +39,7 @@ static const struct meson_gx_soc_id {
{ "TXHD", 0x27 },
{ "G12A", 0x28 },
{ "G12B", 0x29 },
+ { "SM1", 0x2b },
};
static const struct meson_gx_package_id {
@@ -65,6 +66,8 @@ static const struct meson_gx_package_id {
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
{ "S922X", 0x29, 0x40, 0xf0 },
+ { "A311D", 0x29, 0x10, 0xf0 },
+ { "S905X3", 0x2b, 0x5, 0xf },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
@@ -138,8 +141,10 @@ static int __init meson_gx_socinfo_init(void)
}
/* check if chip-id is available */
- if (!of_property_read_bool(np, "amlogic,has-chip-id"))
+ if (!of_property_read_bool(np, "amlogic,has-chip-id")) {
+ of_node_put(np);
return -ENODEV;
+ }
/* node should be a syscon */
regmap = syscon_node_to_regmap(np);
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
index 9168d8ddc932..27243f706f37 100644
--- a/drivers/soc/fsl/dpaa2-console.c
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -73,7 +73,7 @@ static u64 get_mc_fw_base_address(void)
mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
if (!mcfbaregs) {
- pr_err("could not map MC Firmaware Base registers\n");
+ pr_err("could not map MC Firmware Base registers\n");
return 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index b9539ef2c3cd..518a8e081b49 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -305,8 +305,6 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
list_del(&ctx->node);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
- if (dev)
- device_link_remove(dev, d->dev);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 1ef8068c8dd3..34810f9bb2ee 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -102,6 +102,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
.svr = 0x87360000,
.mask = 0xff3f0000,
},
+ /* Die: LS1028A, SoC: LS1028A */
+ { .die = "LS1028A",
+ .svr = 0x870b0000,
+ .mask = 0xff3f0000,
+ },
{ },
};
@@ -224,6 +229,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1012a-dcfg", },
{ .compatible = "fsl,ls1046a-dcfg", },
{ .compatible = "fsl,lx2160a-dcfg", },
+ { .compatible = "fsl,ls1028a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index f84ab596bde8..f4fb527d8301 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -635,30 +635,31 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
return 0;
}
-static int bm_shutdown_pool(u32 bpid)
+int bm_shutdown_pool(u32 bpid)
{
+ int err = 0;
struct bm_mc_command *bm_cmd;
union bm_mc_result *bm_res;
+
+ struct bman_portal *p = get_affine_portal();
while (1) {
- struct bman_portal *p = get_affine_portal();
/* Acquire buffers until empty */
bm_cmd = bm_mc_start(&p->p);
bm_cmd->bpid = bpid;
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
if (!bm_mc_result_timeout(&p->p, &bm_res)) {
- put_affine_portal();
pr_crit("BMan Acquire Command timedout\n");
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto done;
}
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
- put_affine_portal();
/* Pool is empty */
- return 0;
+ goto done;
}
- put_affine_portal();
}
-
+done:
+ put_affine_portal();
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 7c3cc968053c..cb24a08be084 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -97,17 +97,40 @@ static void bm_get_version(u16 *id, u8 *major, u8 *minor)
/* signal transactions for FBPRs with higher priority */
#define FBPR_AR_RPRIO_HI BIT(30)
-static void bm_set_memory(u64 ba, u32 size)
+/* Track if probe has occurred and if cleanup is required */
+static int __bman_probed;
+static int __bman_requires_cleanup;
+
+
+static int bm_set_memory(u64 ba, u32 size)
{
+ u32 bar, bare;
u32 exp = ilog2(size);
/* choke if size isn't within range */
DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
is_power_of_2(size));
/* choke if '[e]ba' has lower-alignment than 'size' */
DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if BMan has already been initialized */
+ bar = bm_ccsr_in(REG_FBPR_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = bm_ccsr_in(REG_FBPR_BARE);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ pr_info("BMan BAR already configured\n");
+ __bman_requires_cleanup = 1;
+ return 1;
+ }
+
bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
bm_ccsr_out(REG_FBPR_AR, exp - 1);
+ return 0;
}
/*
@@ -120,7 +143,6 @@ static void bm_set_memory(u64 ba, u32 size)
*/
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
-static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem)
{
@@ -173,6 +195,16 @@ int bman_is_probed(void)
}
EXPORT_SYMBOL_GPL(bman_is_probed);
+int bman_requires_cleanup(void)
+{
+ return __bman_requires_cleanup;
+}
+
+void bman_done_cleanup(void)
+{
+ __bman_requires_cleanup = 0;
+}
+
static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index cf4f10d6f590..923c44063a9a 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -100,7 +100,7 @@ static int bman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct bm_portal_config *pcfg;
struct resource *addr_phys[2];
- int irq, cpu, err;
+ int irq, cpu, err, i;
err = bman_is_probed();
if (!err)
@@ -135,10 +135,8 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "Can't get %pOF IRQ'\n", node);
+ if (irq <= 0)
goto err_ioremap1;
- }
pcfg->irq = irq;
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
@@ -178,6 +176,22 @@ static int bman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
bman_offline_cpu(cpu);
+ if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
+ /*
+ * BMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the buffer pools so they are in reset state
+ */
+ for (i = 0; i < BM_POOL_MAX; i++) {
+ err = bm_shutdown_pool(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown bpool %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ bman_done_cleanup();
+ }
+
return 0;
err_portal_init:
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
index 751ce90383b7..aa3981e04965 100644
--- a/drivers/soc/fsl/qbman/bman_priv.h
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -76,3 +76,8 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
const struct bm_portal_config *
bman_get_bm_portal_config(const struct bman_portal *portal);
+
+int bman_requires_cleanup(void);
+void bman_done_cleanup(void);
+
+int bm_shutdown_pool(u32 bpid);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index e6d48dccb8d5..9dd8bb571dbc 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -37,42 +37,53 @@
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
size_t *size)
{
- int ret;
struct device_node *mem_node;
- u64 size64;
+ struct reserved_mem *rmem;
+ struct property *prop;
+ int len, err;
+ __be32 *res_array;
- ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
- if (ret) {
- dev_err(dev,
- "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
- idx, ret);
- return -ENODEV;
- }
- mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (mem_node) {
- ret = of_property_read_u64(mem_node, "size", &size64);
- if (ret) {
- dev_err(dev, "of_address_to_resource fails 0x%x\n",
- ret);
- return -ENODEV;
- }
- *size = size64;
- } else {
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!mem_node) {
dev_err(dev, "No memory-region found for index %d\n", idx);
return -ENODEV;
}
- if (!dma_alloc_coherent(dev, *size, addr, 0)) {
- dev_err(dev, "DMA Alloc memory failed\n");
+ rmem = of_reserved_mem_lookup(mem_node);
+ if (!rmem) {
+ dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
return -ENODEV;
}
+ *addr = rmem->base;
+ *size = rmem->size;
/*
- * Disassociate the reserved memory area from the device
- * because a device can only have one DMA memory area. This
- * should be fine since the memory is allocated and initialized
- * and only ever accessed by the QBMan device from now on
+ * Check if the reg property exists - if not insert the node
+ * so upon kexec() the same memory region address will be preserved.
+ * This is needed because QBMan HW does not allow the base address/
+ * size to be modified once set.
*/
- of_reserved_mem_device_release(dev);
+ prop = of_find_property(mem_node, "reg", &len);
+ if (!prop) {
+ prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+ prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
+ GFP_KERNEL);
+ if (!prop->value)
+ return -ENOMEM;
+ res_array[0] = cpu_to_be32(upper_32_bits(*addr));
+ res_array[1] = cpu_to_be32(lower_32_bits(*addr));
+ res_array[2] = cpu_to_be32(upper_32_bits(*size));
+ res_array[3] = cpu_to_be32(lower_32_bits(*size));
+ prop->length = sizeof(__be32) * 4;
+ prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
+ if (!prop->name)
+ return -ENOMEM;
+ err = of_add_property(mem_node, prop);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 636f83f781f5..bf68d86d80ee 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1018,6 +1018,20 @@ static inline void put_affine_portal(void)
put_cpu_var(qman_affine_portal);
}
+
+static inline struct qman_portal *get_portal_for_channel(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i] &&
+ affine_portals[i]->config->channel == channel)
+ return affine_portals[i];
+ }
+
+ return NULL;
+}
+
static struct workqueue_struct *qm_portal_wq;
int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
@@ -1070,6 +1084,20 @@ int qman_wq_alloc(void)
return 0;
}
+
+void qman_enable_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i]) {
+ qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
+ qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
+ }
+
+ }
+}
+
/*
* This is what everything can wait on, even if it migrates to a different cpu
* to the one whose affine portal it is waiting on.
@@ -1164,6 +1192,7 @@ static int drain_mr_fqrni(struct qm_portal *p)
{
const union qm_mr_entry *msg;
loop:
+ qm_mr_pvb_update(p);
msg = qm_mr_current(p);
if (!msg) {
/*
@@ -1180,7 +1209,8 @@ loop:
* entries well before the ring has been fully consumed, so
* we're being *really* paranoid here.
*/
- msleep(1);
+ mdelay(1);
+ qm_mr_pvb_update(p);
msg = qm_mr_current(p);
if (!msg)
return 0;
@@ -1267,8 +1297,8 @@ static int qman_create_portal(struct qman_portal *portal,
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
qm_out(p, QM_REG_IER, 0);
- qm_out(p, QM_REG_ISR, 0xffffffff);
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ qm_out(p, QM_REG_IIR, 1);
if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
dev_err(c->dev, "request_irq() failed\n");
goto fail_irq;
@@ -1288,7 +1318,7 @@ static int qman_create_portal(struct qman_portal *portal,
isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
qm_out(p, QM_REG_ISDR, isdr);
if (qm_dqrr_current(p)) {
- dev_err(c->dev, "DQRR unclean\n");
+ dev_dbg(c->dev, "DQRR unclean\n");
qm_dqrr_cdc_consume_n(p, 0xffff);
}
if (qm_mr_current(p) && drain_mr_fqrni(p)) {
@@ -1301,8 +1331,10 @@ static int qman_create_portal(struct qman_portal *portal,
}
/* Success */
portal->config = c;
+ qm_out(p, QM_REG_ISR, 0xffffffff);
qm_out(p, QM_REG_ISDR, 0);
- qm_out(p, QM_REG_IIR, 0);
+ if (!qman_requires_cleanup())
+ qm_out(p, QM_REG_IIR, 0);
/* Write a sane SDQCR */
qm_dqrr_sdqcr_set(p, portal->sdqcr);
return 0;
@@ -2581,9 +2613,9 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
#define qm_dqrr_drain_nomatch(p) \
_qm_dqrr_consume_and_match(p, 0, 0, false)
-static int qman_shutdown_fq(u32 fqid)
+int qman_shutdown_fq(u32 fqid)
{
- struct qman_portal *p;
+ struct qman_portal *p, *channel_portal;
struct device *dev;
union qm_mc_command *mcc;
union qm_mc_result *mcr;
@@ -2623,17 +2655,28 @@ static int qman_shutdown_fq(u32 fqid)
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+ if (channel < qm_channel_pool1) {
+ channel_portal = get_portal_for_channel(channel);
+ if (channel_portal == NULL) {
+ dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
+ channel);
+ ret = -EIO;
+ goto out;
+ }
+ } else
+ channel_portal = p;
+
switch (state) {
case QM_MCR_NP_STATE_TEN_SCHED:
case QM_MCR_NP_STATE_TRU_SCHED:
case QM_MCR_NP_STATE_ACTIVE:
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
- mcc = qm_mc_start(&p->p);
+ mcc = qm_mc_start(&channel_portal->p);
qm_fqid_set(&mcc->fq, fqid);
- qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
- if (!qm_mc_result_timeout(&p->p, &mcr)) {
- dev_err(dev, "QUERYFQ_NP timeout\n");
+ qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
+ dev_err(dev, "ALTER_RETIRE timeout\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -2641,6 +2684,9 @@ static int qman_shutdown_fq(u32 fqid)
QM_MCR_VERB_ALTER_RETIRE);
res = mcr->result; /* Make a copy as we reuse MCR below */
+ if (res == QM_MCR_RESULT_OK)
+ drain_mr_fqrni(&channel_portal->p);
+
if (res == QM_MCR_RESULT_PENDING) {
/*
* Need to wait for the FQRN in the message ring, which
@@ -2670,21 +2716,25 @@ static int qman_shutdown_fq(u32 fqid)
}
/* Set the sdqcr to drain this channel */
if (channel < qm_channel_pool1)
- qm_dqrr_sdqcr_set(&p->p,
+ qm_dqrr_sdqcr_set(&channel_portal->p,
QM_SDQCR_TYPE_ACTIVE |
QM_SDQCR_CHANNELS_DEDICATED);
else
- qm_dqrr_sdqcr_set(&p->p,
+ qm_dqrr_sdqcr_set(&channel_portal->p,
QM_SDQCR_TYPE_ACTIVE |
QM_SDQCR_CHANNELS_POOL_CONV
(channel));
do {
/* Keep draining DQRR while checking the MR*/
- qm_dqrr_drain_nomatch(&p->p);
+ qm_dqrr_drain_nomatch(&channel_portal->p);
/* Process message ring too */
- found_fqrn = qm_mr_drain(&p->p, FQRN);
+ found_fqrn = qm_mr_drain(&channel_portal->p,
+ FQRN);
cpu_relax();
} while (!found_fqrn);
+ /* Restore SDQCR */
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ channel_portal->sdqcr);
}
if (res != QM_MCR_RESULT_OK &&
@@ -2715,9 +2765,8 @@ static int qman_shutdown_fq(u32 fqid)
* Wait for a dequeue and process the dequeues,
* making sure to empty the ring completely
*/
- } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
}
- qm_dqrr_sdqcr_set(&p->p, 0);
while (!orl_empty) {
/* Wait for the ORL to have been completely drained */
@@ -2754,7 +2803,7 @@ static int qman_shutdown_fq(u32 fqid)
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
QM_MCR_VERB_ALTER_OOS);
- if (mcr->result) {
+ if (mcr->result != QM_MCR_RESULT_OK) {
dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
fqid, mcr->result);
ret = -EIO;
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index a6bb43007d03..157659fd033a 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -274,6 +274,7 @@ static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr;
static int __qman_probed;
+static int __qman_requires_cleanup;
static inline u32 qm_ccsr_in(u32 offset)
{
@@ -340,19 +341,55 @@ static void qm_get_version(u16 *id, u8 *major, u8 *minor)
}
#define PFDR_AR_EN BIT(31)
-static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
{
+ void *ptr;
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
u32 exp = ilog2(size);
+ u32 bar, bare;
/* choke if size isn't within range */
DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
is_power_of_2(size));
/* choke if 'ba' has lower-alignment than 'size' */
DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if QMan has already been initialized */
+ bar = qm_ccsr_in(offset + REG_offset_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = qm_ccsr_in(offset);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ __qman_requires_cleanup = 1;
+ /* Return 1 to indicate memory was previously programmed */
+ return 1;
+ }
+ /* Need to temporarily map the area to make sure it is zeroed */
+ ptr = memremap(ba, size, MEMREMAP_WB);
+ if (!ptr) {
+ pr_crit("memremap() of QMan private memory failed\n");
+ return -ENOMEM;
+ }
+ memset(ptr, 0, size);
+
+#ifdef CONFIG_PPC
+ /*
+ * PPC doesn't appear to flush the cache on memunmap() but the
+ * cache must be flushed since QMan does non coherent accesses
+ * to this memory
+ */
+ flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size);
+#endif
+ memunmap(ptr);
+
qm_ccsr_out(offset, upper_32_bits(ba));
qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+ return 0;
}
static void qm_set_pfdr_threshold(u32 th, u8 k)
@@ -455,7 +492,7 @@ RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
#endif
-static unsigned int qm_get_fqid_maxcnt(void)
+unsigned int qm_get_fqid_maxcnt(void)
{
return fqd_sz / 64;
}
@@ -571,12 +608,19 @@ static int qman_init_ccsr(struct device *dev)
int i, err;
/* FQD memory */
- qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ if (err < 0)
+ return err;
/* PFDR memory */
- qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
- err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
- if (err)
+ err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ if (err < 0)
return err;
+ /* Only initialize PFDRs if the QMan was not initialized before */
+ if (err == 0) {
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ }
/* thresholds */
qm_set_pfdr_threshold(512, 64);
qm_set_sfdr_threshold(128);
@@ -693,6 +737,18 @@ int qman_is_probed(void)
}
EXPORT_SYMBOL_GPL(qman_is_probed);
+int qman_requires_cleanup(void)
+{
+ return __qman_requires_cleanup;
+}
+
+void qman_done_cleanup(void)
+{
+ qman_enable_irqs();
+ __qman_requires_cleanup = 0;
+}
+
+
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index e2186b681d87..5685b6706893 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -233,7 +233,7 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- int irq, cpu, err;
+ int irq, cpu, err, i;
u32 val;
err = qman_is_probed();
@@ -275,10 +275,8 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->channel = val;
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "Can't get %pOF IRQ\n", node);
+ if (irq <= 0)
goto err_ioremap1;
- }
pcfg->irq = irq;
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
@@ -325,6 +323,22 @@ static int qman_portal_probe(struct platform_device *pdev)
if (!cpu_online(cpu))
qman_offline_cpu(cpu);
+ if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
+ /*
+ * QMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the frame queues so they are in reset state
+ */
+ for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
+ err = qman_shutdown_fq(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown frame queue %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ qman_done_cleanup();
+ }
+
return 0;
err_portal_init:
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 04515718cfd9..fd1cf543fb81 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -272,3 +272,11 @@ extern struct qman_portal *affine_portals[NR_CPUS];
extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
+
+unsigned int qm_get_fqid_maxcnt(void);
+
+int qman_shutdown_fq(u32 fqid);
+
+int qman_requires_cleanup(void);
+void qman_done_cleanup(void);
+void qman_enable_irqs(void);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 62c6ba17991a..417df7e19281 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -10,6 +10,7 @@
* General Purpose functions for the global management of the
* QUICC Engine (QE).
*/
+#include <linux/bitmap.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -39,29 +40,32 @@ static DEFINE_SPINLOCK(qe_lock);
DEFINE_SPINLOCK(cmxgcr_lock);
EXPORT_SYMBOL(cmxgcr_lock);
-/* QE snum state */
-enum qe_snum_state {
- QE_SNUM_STATE_USED,
- QE_SNUM_STATE_FREE
-};
-
-/* QE snum */
-struct qe_snum {
- u8 num;
- enum qe_snum_state state;
-};
-
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
struct qe_immap __iomem *qe_immr;
EXPORT_SYMBOL(qe_immr);
-static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM);
static unsigned int qe_num_of_snum;
static phys_addr_t qebase = -1;
+static struct device_node *qe_get_device_node(void)
+{
+ struct device_node *qe;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (qe)
+ return qe;
+ return of_find_node_by_type(NULL, "qe");
+}
+
static phys_addr_t get_qe_base(void)
{
struct device_node *qe;
@@ -71,12 +75,9 @@ static phys_addr_t get_qe_base(void)
if (qebase != -1)
return qebase;
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!qe) {
- qe = of_find_node_by_type(NULL, "qe");
- if (!qe)
- return qebase;
- }
+ qe = qe_get_device_node();
+ if (!qe)
+ return qebase;
ret = of_address_to_resource(qe, 0, &res);
if (!ret)
@@ -170,12 +171,9 @@ unsigned int qe_get_brg_clk(void)
if (brg_clk)
return brg_clk;
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!qe) {
- qe = of_find_node_by_type(NULL, "qe");
- if (!qe)
- return brg_clk;
- }
+ qe = qe_get_device_node();
+ if (!qe)
+ return brg_clk;
prop = of_get_property(qe, "brg-frequency", &size);
if (prop && size == sizeof(*prop))
@@ -281,7 +279,6 @@ EXPORT_SYMBOL(qe_clock_source);
*/
static void qe_snums_init(void)
{
- int i;
static const u8 snum_init_76[] = {
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
@@ -302,19 +299,39 @@ static void qe_snums_init(void)
0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
};
- static const u8 *snum_init;
+ struct device_node *qe;
+ const u8 *snum_init;
+ int i;
- qe_num_of_snum = qe_get_num_of_snums();
+ bitmap_zero(snum_state, QE_NUM_OF_SNUM);
+ qe_num_of_snum = 28; /* The default number of snum for threads is 28 */
+ qe = qe_get_device_node();
+ if (qe) {
+ i = of_property_read_variable_u8_array(qe, "fsl,qe-snums",
+ snums, 1, QE_NUM_OF_SNUM);
+ if (i > 0) {
+ of_node_put(qe);
+ qe_num_of_snum = i;
+ return;
+ }
+ /*
+ * Fall back to legacy binding of using the value of
+ * fsl,qe-num-snums to choose one of the static arrays
+ * above.
+ */
+ of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum);
+ of_node_put(qe);
+ }
- if (qe_num_of_snum == 76)
+ if (qe_num_of_snum == 76) {
snum_init = snum_init_76;
- else
+ } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) {
snum_init = snum_init_46;
-
- for (i = 0; i < qe_num_of_snum; i++) {
- snums[i].num = snum_init[i];
- snums[i].state = QE_SNUM_STATE_FREE;
+ } else {
+ pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum);
+ return;
}
+ memcpy(snums, snum_init, qe_num_of_snum);
}
int qe_get_snum(void)
@@ -324,12 +341,10 @@ int qe_get_snum(void)
int i;
spin_lock_irqsave(&qe_lock, flags);
- for (i = 0; i < qe_num_of_snum; i++) {
- if (snums[i].state == QE_SNUM_STATE_FREE) {
- snums[i].state = QE_SNUM_STATE_USED;
- snum = snums[i].num;
- break;
- }
+ i = find_first_zero_bit(snum_state, qe_num_of_snum);
+ if (i < qe_num_of_snum) {
+ set_bit(i, snum_state);
+ snum = snums[i];
}
spin_unlock_irqrestore(&qe_lock, flags);
@@ -339,14 +354,10 @@ EXPORT_SYMBOL(qe_get_snum);
void qe_put_snum(u8 snum)
{
- int i;
+ const u8 *p = memchr(snums, snum, qe_num_of_snum);
- for (i = 0; i < qe_num_of_snum; i++) {
- if (snums[i].num == snum) {
- snums[i].state = QE_SNUM_STATE_FREE;
- break;
- }
- }
+ if (p)
+ clear_bit(p - snums, snum_state);
}
EXPORT_SYMBOL(qe_put_snum);
@@ -419,7 +430,7 @@ static void qe_upload_microcode(const void *base,
/*
* Upload a microcode to the I-RAM at a specific address.
*
- * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
* uploading.
*
* Currently, only version 1 is supported, so the 'version' field must be
@@ -572,16 +583,9 @@ struct qe_firmware_info *qe_get_firmware_info(void)
initialized = 1;
- /*
- * Newer device trees have an "fsl,qe" compatible property for the QE
- * node, but we still need to support older device trees.
- */
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!qe) {
- qe = of_find_node_by_type(NULL, "qe");
- if (!qe)
- return NULL;
- }
+ qe = qe_get_device_node();
+ if (!qe)
+ return NULL;
/* Find the 'firmware' child node */
fw = of_get_child_by_name(qe, "firmware");
@@ -627,16 +631,9 @@ unsigned int qe_get_num_of_risc(void)
unsigned int num_of_risc = 0;
const u32 *prop;
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!qe) {
- /* Older devices trees did not have an "fsl,qe"
- * compatible property, so we need to look for
- * the QE node by name.
- */
- qe = of_find_node_by_type(NULL, "qe");
- if (!qe)
- return num_of_risc;
- }
+ qe = qe_get_device_node();
+ if (!qe)
+ return num_of_risc;
prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
if (prop && size == sizeof(*prop))
@@ -650,37 +647,7 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
unsigned int qe_get_num_of_snums(void)
{
- struct device_node *qe;
- int size;
- unsigned int num_of_snums;
- const u32 *prop;
-
- num_of_snums = 28; /* The default number of snum for threads is 28 */
- qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!qe) {
- /* Older devices trees did not have an "fsl,qe"
- * compatible property, so we need to look for
- * the QE node by name.
- */
- qe = of_find_node_by_type(NULL, "qe");
- if (!qe)
- return num_of_snums;
- }
-
- prop = of_get_property(qe, "fsl,qe-num-snums", &size);
- if (prop && size == sizeof(*prop)) {
- num_of_snums = *prop;
- if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
- /* No QE ever has fewer than 28 SNUMs */
- pr_err("QE: number of snum is invalid\n");
- of_node_put(qe);
- return -EINVAL;
- }
- }
-
- of_node_put(qe);
-
- return num_of_snums;
+ return qe_num_of_snum;
}
EXPORT_SYMBOL(qe_get_num_of_snums);
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 31b8d002d855..b0dffb06c05d 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -198,7 +198,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
err = regulator_disable(domain->regulator);
if (err)
dev_err(domain->dev,
- "failed to disable regulator: %d\n", ret);
+ "failed to disable regulator: %d\n", err);
/* Preserve earlier error code */
ret = ret ?: err;
}
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
index 676f612f6488..50831ebf126a 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -27,6 +27,40 @@ struct imx_sc_msg_misc_get_soc_id {
} data;
} __packed;
+struct imx_sc_msg_misc_get_soc_uid {
+ struct imx_sc_rpc_msg hdr;
+ u32 uid_low;
+ u32 uid_high;
+} __packed;
+
+static ssize_t soc_uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct imx_sc_msg_misc_get_soc_uid msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ u64 soc_uid;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
+ hdr->size = 1;
+
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+ if (ret) {
+ pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ soc_uid = msg.uid_high;
+ soc_uid <<= 32;
+ soc_uid |= msg.uid_low;
+
+ return sprintf(buf, "%016llX\n", soc_uid);
+}
+
+static DEVICE_ATTR_RO(soc_uid);
+
static int imx_scu_soc_id(void)
{
struct imx_sc_msg_misc_get_soc_id msg;
@@ -102,6 +136,11 @@ static int imx_scu_soc_probe(struct platform_device *pdev)
goto free_revision;
}
+ ret = device_create_file(soc_device_to_device(soc_dev),
+ &dev_attr_soc_uid);
+ if (ret)
+ goto free_revision;
+
return 0;
free_revision:
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index f924ae8c6514..b9831576dd25 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -16,6 +16,9 @@
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+#define OCOTP_UID_LOW 0x410
+#define OCOTP_UID_HIGH 0x420
+
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
@@ -24,6 +27,16 @@ struct imx8_soc_data {
u32 (*soc_revision)(void);
};
+static u64 soc_uid;
+
+static ssize_t soc_uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%016llX\n", soc_uid);
+}
+
+static DEVICE_ATTR_RO(soc_uid);
+
static u32 __init imx8mq_soc_revision(void)
{
struct device_node *np;
@@ -42,6 +55,10 @@ static u32 __init imx8mq_soc_revision(void)
if (magic == IMX8MQ_SW_MAGIC_B1)
rev = REV_B1;
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+
iounmap(ocotp_base);
out:
@@ -49,6 +66,26 @@ out:
return rev;
}
+static void __init imx8mm_soc_uid(void)
+{
+ void __iomem *ocotp_base;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
+ if (!np)
+ return;
+
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+
+ iounmap(ocotp_base);
+ of_node_put(np);
+}
+
static u32 __init imx8mm_soc_revision(void)
{
struct device_node *np;
@@ -66,6 +103,9 @@ static u32 __init imx8mm_soc_revision(void)
iounmap(anatop_base);
of_node_put(np);
+
+ imx8mm_soc_uid();
+
return rev;
}
@@ -140,6 +180,11 @@ static int __init imx8_soc_init(void)
goto free_rev;
}
+ ret = device_create_file(soc_device_to_device(soc_dev),
+ &dev_attr_soc_uid);
+ if (ret)
+ goto free_rev;
+
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index de2e62c3310a..e3eb19b85fa4 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_IXP4XX || COMPILE_TEST
+
menu "IXP4xx SoC drivers"
config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
and is automatically selected by Ethernet and HSS drivers.
endmenu
+
+endif
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index ff9fef5a032b..7aa0517ff2f3 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -136,7 +136,7 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
return 0;
}
-int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset)
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
(subsys << CMDQ_SUBSYS_SHIFT);
@@ -145,8 +145,8 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset)
}
EXPORT_SYMBOL(cmdq_pkt_write);
-int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value,
- u32 subsys, u32 offset, u32 mask)
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
{
u32 offset_mask = offset;
int err = 0;
@@ -161,7 +161,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value,
}
EXPORT_SYMBOL(cmdq_pkt_write_mask);
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event)
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
{
u32 arg_b;
@@ -181,7 +181,7 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event)
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
-int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event)
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a6d1bfb17279..661e47acc354 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -175,6 +175,14 @@ config QCOM_SMSM
Say yes here to support the Qualcomm Shared Memory State Machine.
The state machine is represented by bits in shared memory.
+config QCOM_SOCINFO
+ tristate "Qualcomm socinfo driver"
+ depends on QCOM_SMEM
+ select SOC_BUS
+ help
+ Say yes here to support the Qualcomm socinfo driver, providing
+ information about the SoC to user space.
+
config QCOM_WCNSS_CTRL
tristate "Qualcomm WCNSS control driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index eeb088beb15f..162788701a77 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index d5cf953b4337..7d622ea1274e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
struct geni_wrapper *wrapper = se->wrapper;
u32 val;
+ if (!wrapper)
+ return -EINVAL;
+
*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(wrapper->dev, *iova))
return -EIO;
@@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
struct geni_wrapper *wrapper = se->wrapper;
u32 val;
+ if (!wrapper)
+ return -EINVAL;
+
*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(wrapper->dev, *iova))
return -EIO;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 5f885196f4d0..33a27e6c6d67 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -10,6 +10,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
@@ -40,6 +42,17 @@
/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
#define QMP_MSG_LEN 64
+#define QMP_NUM_COOLING_RESOURCES 2
+
+static bool qmp_cdev_init_state = 1;
+
+struct qmp_cooling_device {
+ struct thermal_cooling_device *cdev;
+ struct qmp *qmp;
+ char *name;
+ bool state;
+};
+
/**
* struct qmp - driver state for QMP implementation
* @msgram: iomem referencing the message RAM used for communication
@@ -69,6 +82,7 @@ struct qmp {
struct clk_hw qdss_clk;
struct genpd_onecell_data pd_data;
+ struct qmp_cooling_device *cooling_devs;
};
struct qmp_pd {
@@ -385,6 +399,118 @@ static void qmp_pd_remove(struct qmp *qmp)
pm_genpd_remove(data->domains[i]);
}
+static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = qmp_cdev_init_state;
+ return 0;
+}
+
+static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+
+ *state = qmp_cdev->state;
+ return 0;
+}
+
+static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+ char buf[QMP_MSG_LEN] = {};
+ bool cdev_state;
+ int ret;
+
+ /* Normalize state */
+ cdev_state = !!state;
+
+ if (qmp_cdev->state == state)
+ return 0;
+
+ snprintf(buf, sizeof(buf),
+ "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
+ qmp_cdev->name,
+ cdev_state ? "off" : "on");
+
+ ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
+
+ if (!ret)
+ qmp_cdev->state = cdev_state;
+
+ return ret;
+}
+
+static struct thermal_cooling_device_ops qmp_cooling_device_ops = {
+ .get_max_state = qmp_cdev_get_max_state,
+ .get_cur_state = qmp_cdev_get_cur_state,
+ .set_cur_state = qmp_cdev_set_cur_state,
+};
+
+static int qmp_cooling_device_add(struct qmp *qmp,
+ struct qmp_cooling_device *qmp_cdev,
+ struct device_node *node)
+{
+ char *cdev_name = (char *)node->name;
+
+ qmp_cdev->qmp = qmp;
+ qmp_cdev->state = qmp_cdev_init_state;
+ qmp_cdev->name = cdev_name;
+ qmp_cdev->cdev = devm_thermal_of_cooling_device_register
+ (qmp->dev, node,
+ cdev_name,
+ qmp_cdev, &qmp_cooling_device_ops);
+
+ if (IS_ERR(qmp_cdev->cdev))
+ dev_err(qmp->dev, "unable to register %s cooling device\n",
+ cdev_name);
+
+ return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
+}
+
+static int qmp_cooling_devices_register(struct qmp *qmp)
+{
+ struct device_node *np, *child;
+ int count = QMP_NUM_COOLING_RESOURCES;
+ int ret;
+
+ np = qmp->dev->of_node;
+
+ qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
+ sizeof(*qmp->cooling_devs),
+ GFP_KERNEL);
+
+ if (!qmp->cooling_devs)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "#cooling-cells", NULL))
+ continue;
+ ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
+ child);
+ if (ret)
+ goto unroll;
+ }
+
+ return 0;
+
+unroll:
+ while (--count >= 0)
+ thermal_cooling_device_unregister
+ (qmp->cooling_devs[count].cdev);
+
+ return ret;
+}
+
+static void qmp_cooling_devices_remove(struct qmp *qmp)
+{
+ int i;
+
+ for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
+ thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
+}
+
static int qmp_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -433,6 +559,10 @@ static int qmp_probe(struct platform_device *pdev)
if (ret)
goto err_remove_qdss_clk;
+ ret = qmp_cooling_devices_register(qmp);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
+
platform_set_drvdata(pdev, qmp);
return 0;
@@ -453,6 +583,7 @@ static int qmp_remove(struct platform_device *pdev)
qmp_qdss_clk_remove(qmp);
qmp_pd_remove(qmp);
+ qmp_cooling_devices_remove(qmp);
qmp_close(qmp);
mbox_free_channel(qmp->mbox_chan);
@@ -461,7 +592,9 @@ static int qmp_remove(struct platform_device *pdev)
}
static const struct of_device_id qmp_dt_match[] = {
+ { .compatible = "qcom,sc7180-aoss-qmp", },
{ .compatible = "qcom,sdm845-aoss-qmp", },
+ { .compatible = "qcom,sm8150-aoss-qmp", },
{}
};
MODULE_DEVICE_TABLE(of, qmp_dt_match);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index f27c00d82ae4..28c19bcb2f20 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -84,7 +84,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 10
+#define SMEM_HOST_COUNT 11
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
@@ -268,6 +268,7 @@ struct qcom_smem {
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
size_t cacheline[SMEM_HOST_COUNT];
u32 item_count;
+ struct platform_device *socinfo;
unsigned num_regions;
struct smem_region regions[];
@@ -963,11 +964,19 @@ static int qcom_smem_probe(struct platform_device *pdev)
__smem = smem;
+ smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
+ PLATFORM_DEVID_NONE, NULL,
+ 0);
+ if (IS_ERR(smem->socinfo))
+ dev_dbg(&pdev->dev, "failed to register socinfo device\n");
+
return 0;
}
static int qcom_smem_remove(struct platform_device *pdev)
{
+ platform_device_unregister(__smem->socinfo);
+
hwspin_lock_free(__smem->hwlock);
__smem = NULL;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
new file mode 100644
index 000000000000..a39ea5061dc5
--- /dev/null
+++ b/drivers/soc/qcom/socinfo.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+
+/*
+ * SMEM item id, used to acquire handles to respective
+ * SMEM region.
+ */
+#define SMEM_HW_SW_BUILD_ID 137
+
+#ifdef CONFIG_DEBUG_FS
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_OEM_SIZE 32
+
+/*
+ * SMEM Image table indices
+ */
+#define SMEM_IMAGE_TABLE_BOOT_INDEX 0
+#define SMEM_IMAGE_TABLE_TZ_INDEX 1
+#define SMEM_IMAGE_TABLE_RPM_INDEX 3
+#define SMEM_IMAGE_TABLE_APPS_INDEX 10
+#define SMEM_IMAGE_TABLE_MPSS_INDEX 11
+#define SMEM_IMAGE_TABLE_ADSP_INDEX 12
+#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
+#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
+#define SMEM_IMAGE_VERSION_TABLE 469
+
+/*
+ * SMEM Image table names
+ */
+static const char *const socinfo_image_names[] = {
+ [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp",
+ [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps",
+ [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot",
+ [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss",
+ [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss",
+ [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
+ [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
+ [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
+};
+
+static const char *const pmic_models[] = {
+ [0] = "Unknown PMIC model",
+ [9] = "PM8994",
+ [11] = "PM8916",
+ [13] = "PM8058",
+ [14] = "PM8028",
+ [15] = "PM8901",
+ [16] = "PM8027",
+ [17] = "ISL9519",
+ [18] = "PM8921",
+ [19] = "PM8018",
+ [20] = "PM8015",
+ [21] = "PM8014",
+ [22] = "PM8821",
+ [23] = "PM8038",
+ [24] = "PM8922",
+ [25] = "PM8917",
+};
+#endif /* CONFIG_DEBUG_FS */
+
+/* Socinfo SMEM item structure */
+struct socinfo {
+ __le32 fmt;
+ __le32 id;
+ __le32 ver;
+ char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH];
+ /* Version 2 */
+ __le32 raw_id;
+ __le32 raw_ver;
+ /* Version 3 */
+ __le32 hw_plat;
+ /* Version 4 */
+ __le32 plat_ver;
+ /* Version 5 */
+ __le32 accessory_chip;
+ /* Version 6 */
+ __le32 hw_plat_subtype;
+ /* Version 7 */
+ __le32 pmic_model;
+ __le32 pmic_die_rev;
+ /* Version 8 */
+ __le32 pmic_model_1;
+ __le32 pmic_die_rev_1;
+ __le32 pmic_model_2;
+ __le32 pmic_die_rev_2;
+ /* Version 9 */
+ __le32 foundry_id;
+ /* Version 10 */
+ __le32 serial_num;
+ /* Version 11 */
+ __le32 num_pmics;
+ __le32 pmic_array_offset;
+ /* Version 12 */
+ __le32 chip_family;
+ __le32 raw_device_family;
+ __le32 raw_device_num;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct socinfo_params {
+ u32 raw_device_family;
+ u32 hw_plat_subtype;
+ u32 accessory_chip;
+ u32 raw_device_num;
+ u32 chip_family;
+ u32 foundry_id;
+ u32 plat_ver;
+ u32 raw_ver;
+ u32 hw_plat;
+ u32 fmt;
+};
+
+struct smem_image_version {
+ char name[SMEM_IMAGE_VERSION_NAME_SIZE];
+ char variant[SMEM_IMAGE_VERSION_VARIANT_SIZE];
+ char pad;
+ char oem[SMEM_IMAGE_VERSION_OEM_SIZE];
+};
+#endif /* CONFIG_DEBUG_FS */
+
+struct qcom_socinfo {
+ struct soc_device *soc_dev;
+ struct soc_device_attribute attr;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbg_root;
+ struct socinfo_params info;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+struct soc_id {
+ unsigned int id;
+ const char *name;
+};
+
+static const struct soc_id soc_id[] = {
+ { 87, "MSM8960" },
+ { 109, "APQ8064" },
+ { 122, "MSM8660A" },
+ { 123, "MSM8260A" },
+ { 124, "APQ8060A" },
+ { 126, "MSM8974" },
+ { 130, "MPQ8064" },
+ { 138, "MSM8960AB" },
+ { 139, "APQ8060AB" },
+ { 140, "MSM8260AB" },
+ { 141, "MSM8660AB" },
+ { 178, "APQ8084" },
+ { 184, "APQ8074" },
+ { 185, "MSM8274" },
+ { 186, "MSM8674" },
+ { 194, "MSM8974PRO" },
+ { 206, "MSM8916" },
+ { 208, "APQ8074-AA" },
+ { 209, "APQ8074-AB" },
+ { 210, "APQ8074PRO" },
+ { 211, "MSM8274-AA" },
+ { 212, "MSM8274-AB" },
+ { 213, "MSM8274PRO" },
+ { 214, "MSM8674-AA" },
+ { 215, "MSM8674-AB" },
+ { 216, "MSM8674PRO" },
+ { 217, "MSM8974-AA" },
+ { 218, "MSM8974-AB" },
+ { 246, "MSM8996" },
+ { 247, "APQ8016" },
+ { 248, "MSM8216" },
+ { 249, "MSM8116" },
+ { 250, "MSM8616" },
+ { 291, "APQ8096" },
+ { 305, "MSM8996SG" },
+ { 310, "MSM8996AU" },
+ { 311, "APQ8096AU" },
+ { 312, "APQ8096SG" },
+};
+
+static const char *socinfo_machine(struct device *dev, unsigned int id)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(soc_id); idx++) {
+ if (soc_id[idx].id == id)
+ return soc_id[idx].name;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define QCOM_OPEN(name, _func) \
+static int qcom_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _func, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_ ##name## _ops = { \
+ .open = qcom_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEBUGFS_ADD(info, name) \
+ debugfs_create_file(__stringify(name), 0400, \
+ qcom_socinfo->dbg_root, \
+ info, &qcom_ ##name## _ops)
+
+
+static int qcom_show_build_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->build_id);
+
+ return 0;
+}
+
+static int qcom_show_pmic_model(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ int model = SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_model));
+
+ if (model < 0)
+ return -EINVAL;
+
+ seq_printf(seq, "%s\n", pmic_models[model]);
+
+ return 0;
+}
+
+static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%u.%u\n",
+ SOCINFO_MAJOR(le32_to_cpu(socinfo->pmic_die_rev)),
+ SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_die_rev)));
+
+ return 0;
+}
+
+QCOM_OPEN(build_id, qcom_show_build_id);
+QCOM_OPEN(pmic_model, qcom_show_pmic_model);
+QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+
+#define DEFINE_IMAGE_OPS(type) \
+static int show_image_##type(struct seq_file *seq, void *p) \
+{ \
+ struct smem_image_version *image_version = seq->private; \
+ seq_puts(seq, image_version->type); \
+ seq_puts(seq, "\n"); \
+ return 0; \
+} \
+static int open_image_##type(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, show_image_##type, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_image_##type##_ops = { \
+ .open = open_image_##type, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+DEFINE_IMAGE_OPS(name);
+DEFINE_IMAGE_OPS(variant);
+DEFINE_IMAGE_OPS(oem);
+
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info)
+{
+ struct smem_image_version *versions;
+ struct dentry *dentry;
+ size_t size;
+ int i;
+
+ qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL);
+
+ qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+
+ switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 12):
+ qcom_socinfo->info.chip_family =
+ __le32_to_cpu(info->chip_family);
+ qcom_socinfo->info.raw_device_family =
+ __le32_to_cpu(info->raw_device_family);
+ qcom_socinfo->info.raw_device_num =
+ __le32_to_cpu(info->raw_device_num);
+
+ debugfs_create_x32("chip_family", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.chip_family);
+ debugfs_create_x32("raw_device_family", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_family);
+ debugfs_create_x32("raw_device_number", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_num);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 11):
+ case SOCINFO_VERSION(0, 10):
+ case SOCINFO_VERSION(0, 9):
+ qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id);
+
+ debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.foundry_id);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 8):
+ case SOCINFO_VERSION(0, 7):
+ DEBUGFS_ADD(info, pmic_model);
+ DEBUGFS_ADD(info, pmic_die_rev);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 6):
+ qcom_socinfo->info.hw_plat_subtype =
+ __le32_to_cpu(info->hw_plat_subtype);
+
+ debugfs_create_u32("hardware_platform_subtype", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat_subtype);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 5):
+ qcom_socinfo->info.accessory_chip =
+ __le32_to_cpu(info->accessory_chip);
+
+ debugfs_create_u32("accessory_chip", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.accessory_chip);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 4):
+ qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
+
+ debugfs_create_u32("platform_version", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.plat_ver);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 3):
+ qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
+
+ debugfs_create_u32("hardware_platform", 0400,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 2):
+ qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
+
+ debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_ver);
+ /* Fall through */
+ case SOCINFO_VERSION(0, 1):
+ DEBUGFS_ADD(info, build_id);
+ break;
+ }
+
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE,
+ &size);
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) {
+ if (!socinfo_image_names[i])
+ continue;
+
+ dentry = debugfs_create_dir(socinfo_image_names[i],
+ qcom_socinfo->dbg_root);
+ debugfs_create_file("name", 0400, dentry, &versions[i],
+ &qcom_image_name_ops);
+ debugfs_create_file("variant", 0400, dentry, &versions[i],
+ &qcom_image_variant_ops);
+ debugfs_create_file("oem", 0400, dentry, &versions[i],
+ &qcom_image_oem_ops);
+ }
+}
+
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo)
+{
+ debugfs_remove_recursive(qcom_socinfo->dbg_root);
+}
+#else
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info)
+{
+}
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int qcom_socinfo_probe(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs;
+ struct socinfo *info;
+ size_t item_size;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID,
+ &item_size);
+ if (IS_ERR(info)) {
+ dev_err(&pdev->dev, "Couldn't find socinfo\n");
+ return PTR_ERR(info);
+ }
+
+ qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+
+ qs->attr.family = "Snapdragon";
+ qs->attr.machine = socinfo_machine(&pdev->dev,
+ le32_to_cpu(info->id));
+ qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
+ SOCINFO_MAJOR(le32_to_cpu(info->ver)),
+ SOCINFO_MINOR(le32_to_cpu(info->ver)));
+ if (offsetof(struct socinfo, serial_num) <= item_size)
+ qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%u",
+ le32_to_cpu(info->serial_num));
+
+ qs->soc_dev = soc_device_register(&qs->attr);
+ if (IS_ERR(qs->soc_dev))
+ return PTR_ERR(qs->soc_dev);
+
+ socinfo_debugfs_init(qs, info);
+
+ /* Feed the soc specific unique data into entropy pool */
+ add_device_randomness(info, item_size);
+
+ platform_set_drvdata(pdev, qs->soc_dev);
+
+ return 0;
+}
+
+static int qcom_socinfo_remove(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs = platform_get_drvdata(pdev);
+
+ soc_device_unregister(qs->soc_dev);
+
+ socinfo_debugfs_exit(qs);
+
+ return 0;
+}
+
+static struct platform_driver qcom_socinfo_driver = {
+ .probe = qcom_socinfo_probe,
+ .remove = qcom_socinfo_remove,
+ .driver = {
+ .name = "qcom-socinfo",
+ },
+};
+
+module_platform_driver(qcom_socinfo_driver);
+
+MODULE_DESCRIPTION("Qualcomm SoCinfo driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-socinfo");
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 2bbf49e5d441..3c5e017bacba 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -55,6 +55,7 @@ config ARCH_EMEV2
config ARCH_R7S72100
bool "RZ/A1H (R7S72100)"
+ select ARM_ERRATA_754322
select PM
select PM_GENERIC_DOMAINS
select RENESAS_OSTM
@@ -72,12 +73,14 @@ config ARCH_R8A73A4
bool "R-Mobile APE6 (R8A73A40)"
select ARCH_RMOBILE
select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
select HAVE_ARM_ARCH_TIMER
select RENESAS_IRQC
config ARCH_R8A7740
bool "R-Mobile A1 (R8A77400)"
select ARCH_RMOBILE
+ select ARM_ERRATA_754322
select RENESAS_INTC_IRQPIN
config ARCH_R8A7743
@@ -95,20 +98,24 @@ config ARCH_R8A7744
config ARCH_R8A7745
bool "RZ/G1E (R8A77450)"
select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
select SYSC_R8A7745
config ARCH_R8A77470
bool "RZ/G1C (R8A77470)"
select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
select SYSC_R8A77470
config ARCH_R8A7778
bool "R-Car M1A (R8A77781)"
select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
config ARCH_R8A7779
bool "R-Car H1 (R8A77790)"
select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select SYSC_R8A7779
@@ -117,6 +124,7 @@ config ARCH_R8A7790
bool "R-Car H2 (R8A77900)"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
select I2C
select SYSC_R8A7790
@@ -143,15 +151,18 @@ config ARCH_R8A7793
config ARCH_R8A7794
bool "R-Car E2 (R8A77940)"
select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
select SYSC_R8A7794
config ARCH_R9A06G032
bool "RZ/N1D (R9A06G032)"
select ARCH_RZN1
+ select ARM_ERRATA_814220
config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
select ARCH_RMOBILE
+ select ARM_ERRATA_754322
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select RENESAS_INTC_IRQPIN
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 0c80fab4f8de..59b5e6b10272 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -170,7 +170,7 @@ struct rcar_sysc_pd {
struct generic_pm_domain genpd;
struct rcar_sysc_ch ch;
unsigned int flags;
- char name[0];
+ char name[];
};
static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)
@@ -200,7 +200,6 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
- struct dev_power_governor *gov = &simple_qos_governor;
int error;
if (pd->flags & PD_CPU) {
@@ -254,7 +253,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
rcar_sysc_power(&pd->ch, true);
finalize:
- error = pm_genpd_init(genpd, gov, false);
+ error = pm_genpd_init(genpd, &simple_qos_governor, false);
if (error)
pr_err("Failed to init PM domain %s: %d\n", name, error);
@@ -346,7 +345,7 @@ static int __init rcar_sysc_pd_init(void)
if (info->init) {
error = info->init();
if (error)
- return error;
+ goto out_put;
}
has_cpg_mstp = of_find_compatible_node(NULL, NULL,
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
index 421ae1c887d8..54b616ad4a62 100644
--- a/drivers/soc/renesas/rmobile-sysc.c
+++ b/drivers/soc/renesas/rmobile-sysc.c
@@ -48,12 +48,8 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
{
struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
- unsigned int mask;
+ unsigned int mask = BIT(rmobile_pd->bit_shift);
- if (rmobile_pd->bit_shift == ~0)
- return -EBUSY;
-
- mask = BIT(rmobile_pd->bit_shift);
if (rmobile_pd->suspend) {
int ret = rmobile_pd->suspend();
@@ -80,14 +76,10 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
{
- unsigned int mask;
+ unsigned int mask = BIT(rmobile_pd->bit_shift);
unsigned int retry_count;
int ret = 0;
- if (rmobile_pd->bit_shift == ~0)
- return 0;
-
- mask = BIT(rmobile_pd->bit_shift);
if (__raw_readl(rmobile_pd->base + PSTR) & mask)
return ret;
@@ -122,11 +114,15 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
struct dev_power_governor *gov = rmobile_pd->gov;
genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- genpd->power_off = rmobile_pd_power_down;
- genpd->power_on = rmobile_pd_power_up;
- genpd->attach_dev = cpg_mstp_attach_dev;
- genpd->detach_dev = cpg_mstp_detach_dev;
- __rmobile_pd_power_up(rmobile_pd);
+ genpd->attach_dev = cpg_mstp_attach_dev;
+ genpd->detach_dev = cpg_mstp_detach_dev;
+
+ if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) {
+ genpd->power_off = rmobile_pd_power_down;
+ genpd->power_on = rmobile_pd_power_up;
+ __rmobile_pd_power_up(rmobile_pd);
+ }
+
pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
}
@@ -270,6 +266,11 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
break;
case PD_NORMAL:
+ if (pd->bit_shift == ~0) {
+ /* Top-level always-on domain */
+ pr_debug("PM domain %s is always-on domain\n", name);
+ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
+ }
break;
}
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 2186285fda92..33ad0de2de3c 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -7,6 +7,12 @@ menuconfig SOC_SAMSUNG
if SOC_SAMSUNG
+config EXYNOS_CHIPID
+ bool "Exynos Chipid controller driver" if COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select MFD_SYSCON
+ select SOC_BUS
+
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 29f294baac6e..3b6a8797416c 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
new file mode 100644
index 000000000000..c55a47cfe617
--- /dev/null
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS - CHIP ID support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ * Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+#include <linux/sys_soc.h>
+
+static const struct exynos_soc_id {
+ const char *name;
+ unsigned int id;
+} soc_ids[] = {
+ { "EXYNOS3250", 0xE3472000 },
+ { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
+ { "EXYNOS4210", 0x43210000 },
+ { "EXYNOS4212", 0x43220000 },
+ { "EXYNOS4412", 0xE4412000 },
+ { "EXYNOS5250", 0x43520000 },
+ { "EXYNOS5260", 0xE5260000 },
+ { "EXYNOS5410", 0xE5410000 },
+ { "EXYNOS5420", 0xE5420000 },
+ { "EXYNOS5440", 0xE5440000 },
+ { "EXYNOS5800", 0xE5422000 },
+ { "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS5433", 0xE5433000 },
+};
+
+static const char * __init product_id_to_soc_id(unsigned int product_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(soc_ids); i++)
+ if ((product_id & EXYNOS_MASK) == soc_ids[i].id)
+ return soc_ids[i].name;
+ return NULL;
+}
+
+int __init exynos_chipid_early_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *regmap;
+ u32 product_id;
+ u32 revision;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_compatible("samsung,exynos4210-chipid");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
+ if (ret < 0)
+ return ret;
+
+ revision = product_id & EXYNOS_REV_MASK;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Samsung Exynos";
+
+ root = of_find_node_by_path("/");
+ of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x", revision);
+ soc_dev_attr->soc_id = product_id_to_soc_id(product_id);
+ if (!soc_dev_attr->soc_id) {
+ pr_err("Unknown SoC\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* please note that the actual registration will be deferred */
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err;
+ }
+
+ /* it is too early to use dev_info() here (soc_dev is NULL) */
+ pr_info("soc soc0: Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, product_id, revision);
+
+ return 0;
+
+err:
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+early_initcall(exynos_chipid_early_init);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index bb77c220b6f8..ccc6d53fe788 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
}
#ifdef CONFIG_SUSPEND
-struct wkup_m3_wakeup_src rtc_wake_src(void)
+static struct wkup_m3_wakeup_src rtc_wake_src(void)
{
u32 i;
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
return rtc_ext_wakeup;
}
-int am33xx_rtc_only_idle(unsigned long wfi_flags)
+static int am33xx_rtc_only_idle(unsigned long wfi_flags)
{
omap_rtc_power_off_program(&omap_rtc->dev);
am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
"omap_rtc_scratch0");
- if (nvmem)
+ if (!IS_ERR(nvmem))
nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
(void *)&rtc_magic_val);
rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
struct nvmem_device *nvmem;
nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
+ if (IS_ERR(nvmem))
+ return;
+
m3_ipc->ops->finish_low_power(m3_ipc);
if (rtc_only_idle) {
- if (retrigger_irq)
+ if (retrigger_irq) {
/*
* 32 bits of Interrupt Set-Pending correspond to 32
* 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
writel_relaxed(1 << (retrigger_irq & 31),
gic_dist_base + GIC_INT_SET_PENDING_BASE
+ retrigger_irq / 32 * 4);
- nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
- (void *)&val);
+ }
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
}
rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
"omap_rtc_scratch0");
- if (nvmem) {
+ if (!IS_ERR(nvmem)) {
nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
4, (void *)&rtc_magic_val);
if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 97817dd7ba24..8c2a2f23982c 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -15,15 +15,19 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
/**
* struct ti_sci_genpd_dev_data: holds data needed for every device attached
* to this genpd
* @idx: index of the device that identifies it with the system
* control processor.
+ * @exclusive: Permissions for exclusive request or shared request of the
+ * device.
*/
struct ti_sci_genpd_dev_data {
int idx;
+ u8 exclusive;
};
/**
@@ -55,6 +59,14 @@ static int ti_sci_dev_id(struct device *dev)
return sci_dev_data->idx;
}
+static u8 is_ti_sci_dev_exclusive(struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
+ struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+
+ return sci_dev_data->exclusive;
+}
+
/**
* ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
* @dev: pointer to device associated with this genpd
@@ -79,7 +91,10 @@ static int ti_sci_dev_start(struct device *dev)
const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
int idx = ti_sci_dev_id(dev);
- return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+ if (is_ti_sci_dev_exclusive(dev))
+ return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci, idx);
+ else
+ return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
}
/**
@@ -110,7 +125,7 @@ static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
if (ret < 0)
return ret;
- if (pd_args.args_count != 1)
+ if (pd_args.args_count != 1 && pd_args.args_count != 2)
return -EINVAL;
idx = pd_args.args[0];
@@ -128,6 +143,10 @@ static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
return -ENOMEM;
sci_dev_data->idx = idx;
+ /* Enable the exclusive permissions by default */
+ sci_dev_data->exclusive = TI_SCI_PD_EXCLUSIVE;
+ if (pd_args.args_count == 2)
+ sci_dev_data->exclusive = pd_args.args[1] & 0x1;
genpd_data = dev_gpd_data(dev);
genpd_data->data = sci_dev_data;
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
index ea5fd2e5e340..d64feeb51a40 100644
--- a/drivers/soc/ux500/ux500-soc-id.c
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -203,10 +203,13 @@ static int __init ux500_soc_device_init(void)
ux500_setup_id();
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
+ if (!soc_dev_attr) {
+ of_node_put(backupram);
return -ENOMEM;
+ }
soc_info_populate(soc_dev_attr, backupram);
+ of_node_put(backupram);
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 3a01cfd70fdc..f518273cfbe3 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig SOUNDWIRE
- bool "SoundWire support"
+ tristate "SoundWire support"
help
SoundWire is a 2-Pin interface with data and clock line ratified
by the MIPI Alliance. SoundWire is used for transporting data
@@ -17,17 +17,12 @@ if SOUNDWIRE
comment "SoundWire Devices"
-config SOUNDWIRE_BUS
- tristate
- select REGMAP_SOUNDWIRE
-
config SOUNDWIRE_CADENCE
tristate
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- select SOUNDWIRE_BUS
depends on X86 && ACPI && SND_SOC
help
SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index fd99a831b92a..45b7e5001653 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -5,7 +5,7 @@
#Bus Objs
soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
#Cadence Objs
soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ff4badc9b3de..60e8bdee5c75 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
#define CDNS_MCP_INTSET 0x4C
-#define CDNS_SDW_SLAVE_STAT 0x50
-#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0)
+#define CDNS_MCP_SLAVE_STAT 0x50
+#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
#define CDNS_MCP_SLAVE_INTSTAT0 0x54
#define CDNS_MCP_SLAVE_INTSTAT1 0x58
@@ -96,8 +96,8 @@
#define CDNS_MCP_SLAVE_INTMASK0 0x5C
#define CDNS_MCP_SLAVE_INTMASK1 0x60
-#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0)
-#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0)
+#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
#define CDNS_MCP_PORT_INTSTAT 0x64
#define CDNS_MCP_PDI_STAT 0x6C
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3a1d8f1170de..6f7fdcbb9151 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -433,6 +433,16 @@ config SPI_MT7621
help
This selects a driver for the MediaTek MT7621 SPI Controller.
+config SPI_NPCM_FIU
+ tristate "Nuvoton NPCM FLASH Interface Unit"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Flash Interface Unit SPI controller
+ in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_NPCM_PSPI
tristate "Nuvoton NPCM PSPI Controller"
depends on ARCH_NPCM || COMPILE_TEST
@@ -440,13 +450,6 @@ config SPI_NPCM_PSPI
This driver provides support for Nuvoton NPCM BMC
Peripheral SPI controller in master mode.
-config SPI_NUC900
- tristate "Nuvoton NUC900 series SPI"
- depends on ARCH_W90X900
- select SPI_BITBANG
- help
- SPI driver for Nuvoton NUC900 series ARM SoCs
-
config SPI_LANTIQ_SSC
tristate "Lantiq SSC SPI controller"
depends on LANTIQ || COMPILE_TEST
@@ -543,7 +546,7 @@ config SPI_PXA2XX
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
controller. The driver can be configured to use any SSP port and
- additional documentation can be found a Documentation/spi/pxa2xx.
+ additional documentation can be found a Documentation/spi/pxa2xx.rst.
config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI && COMMON_CLK
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 63dcab552bcb..bb49c9e6d0a0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -63,8 +63,8 @@ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
-obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 6a7d7b553d95..fd8007ebb145 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -526,7 +526,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "missing IRQ\n");
err = irq;
goto disable_qspick;
}
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index ea160f117f88..41d71ba7fd32 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -170,7 +170,6 @@ static int altera_spi_probe(struct platform_device *pdev)
{
struct altera_spi *hw;
struct spi_master *master;
- struct resource *res;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
@@ -189,8 +188,7 @@ static int altera_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
/* find and map our resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->base = devm_ioremap_resource(&pdev->dev, res);
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base)) {
err = PTR_ERR(hw->base);
goto exit;
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 032888344822..e450ee17787f 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -817,7 +817,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
- struct resource *res;
struct spi_master *master;
struct a3700_spi *spi;
u32 num_cs = 0;
@@ -855,8 +854,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
spi->master = master;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi->base = devm_ioremap_resource(dev, res);
+ spi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->base)) {
ret = PTR_ERR(spi->base);
goto error;
@@ -864,7 +862,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "could not get irq: %d\n", irq);
ret = -ENXIO;
goto error;
}
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 032a615e4ccd..eb9a243e9526 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -139,7 +139,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct ath79_spi *sp;
struct ath79_spi_platform_data *pdata;
- struct resource *r;
unsigned long rate;
int ret;
@@ -169,8 +168,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.flags = SPI_CS_HIGH;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sp->base = devm_ioremap_resource(&pdev->dev, r);
+ sp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sp->base)) {
ret = PTR_ERR(sp->base);
goto err_put_master;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f00b367523cd..acf318e7330c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -23,6 +23,7 @@
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <trace/events/spi.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -1409,9 +1410,13 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
msg->actual_length = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
ret = atmel_spi_one_transfer(master, msg, xfer);
if (ret)
goto msg_done;
+
+ trace_spi_transfer_stop(msg, xfer);
}
if (as->use_pdc)
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 3b1833e6c7ad..74842f6019ed 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -460,7 +460,6 @@ static int spi_engine_probe(struct platform_device *pdev)
struct spi_engine *spi_engine;
struct spi_master *master;
unsigned int version;
- struct resource *res;
int irq;
int ret;
@@ -480,8 +479,7 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
+ spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_engine->base)) {
ret = PTR_ERR(spi_engine->base);
goto err_put_master;
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 902bdbfedea8..7a3531856491 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -343,7 +343,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
{
int bpc = 0, bpp = 0;
u8 command = op->cmd.opcode;
- int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
+ int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
int addrlen = op->addr.nbytes;
int flex_mode = 1;
@@ -897,6 +897,7 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
read_from_hw(qspi, slots);
}
+ bcm_qspi_enable_bspi(qspi);
return 0;
}
@@ -981,7 +982,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
if (mspi_read)
return bcm_qspi_mspi_exec_mem_op(spi, op);
- ret = bcm_qspi_bspi_set_mode(qspi, op, -1);
+ ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
if (!ret)
ret = bcm_qspi_bspi_exec_mem_op(spi, op);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 6f243a90c844..b4070c0de3df 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -25,7 +25,9 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h> /* FIXME: using chip internals */
+#include <linux/gpio/driver.h> /* FIXME: using chip internals */
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
@@ -66,6 +68,7 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_DMA_MIN_LENGTH 96
+#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
@@ -92,7 +95,8 @@ MODULE_PARM_DESC(polling_limit_us,
* @rx_prologue: bytes received without DMA if first RX sglist entry's
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
- * @dma_pending: whether a DMA transfer is in progress
+ * @prepare_cs: precalculated CS register value for ->prepare_message()
+ * (uses slave-specific clock polarity and phase settings)
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
* unloading the module
* @count_transfer_polling: count of how often polling mode is used
@@ -102,6 +106,19 @@ MODULE_PARM_DESC(polling_limit_us,
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
+ * @chip_select: SPI slave currently selected
+ * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
+ * @tx_dma_active: whether a TX DMA descriptor is in progress
+ * @rx_dma_active: whether a RX DMA descriptor is in progress
+ * (used by bcm2835_spi_dma_tx_done() to handle a race)
+ * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
+ * (cyclically copies from zero page to TX FIFO)
+ * @fill_tx_addr: bus address of zero page
+ * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
+ * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
+ * @clear_rx_addr: bus address of @clear_rx_cs
+ * @clear_rx_cs: precalculated CS register value to clear RX FIFO
+ * (uses slave-specific clock polarity and phase settings)
*/
struct bcm2835_spi {
void __iomem *regs;
@@ -115,13 +132,22 @@ struct bcm2835_spi {
int tx_prologue;
int rx_prologue;
unsigned int tx_spillover;
- unsigned int dma_pending;
+ u32 prepare_cs[BCM2835_SPI_NUM_CS];
struct dentry *debugfs_dir;
u64 count_transfer_polling;
u64 count_transfer_irq;
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
+
+ u8 chip_select;
+ unsigned int tx_dma_active;
+ unsigned int rx_dma_active;
+ struct dma_async_tx_descriptor *fill_tx_desc;
+ dma_addr_t fill_tx_addr;
+ struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
+ dma_addr_t clear_rx_addr;
+ u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
};
#if defined(CONFIG_DEBUG_FS)
@@ -319,6 +345,13 @@ static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
BCM2835_SPI_CS_INTD |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_TA);
+ /*
+ * Transmission sometimes breaks unless the DONE bit is written at the
+ * end of every transfer. The spec says it's a RO bit. Either the
+ * spec is wrong and the bit is actually of type RW1C, or it's a
+ * hardware erratum.
+ */
+ cs |= BCM2835_SPI_CS_DONE;
/* and reset RX/TX FIFOS */
cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
@@ -448,14 +481,14 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
bs->rx_prologue = 0;
bs->tx_spillover = false;
- if (!sg_is_last(&tfr->tx_sg.sgl[0]))
+ if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
- if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
+ if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
if (bs->rx_prologue > bs->tx_prologue) {
- if (sg_is_last(&tfr->tx_sg.sgl[0])) {
+ if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
bs->tx_prologue = bs->rx_prologue;
} else {
bs->tx_prologue += 4;
@@ -477,7 +510,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
- bcm2835_spi_reset_hw(ctlr);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
+ | BCM2835_SPI_CS_CLEAR_TX
+ | BCM2835_SPI_CS_DONE);
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
@@ -487,6 +522,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
}
+ if (!bs->tx_buf)
+ return;
+
/*
* Write remaining TX prologue. Adjust first entry in TX sglist.
* Also adjust second entry if prologue spills over to it.
@@ -498,7 +536,8 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
| BCM2835_SPI_CS_DMAEN);
bcm2835_wr_fifo_count(bs, tx_remaining);
bcm2835_wait_tx_fifo_empty(bs);
- bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
+ | BCM2835_SPI_CS_DONE);
}
if (likely(!bs->tx_spillover)) {
@@ -531,6 +570,9 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
}
+ if (!bs->tx_buf)
+ goto out;
+
if (likely(!bs->tx_spillover)) {
sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
@@ -539,32 +581,85 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
}
+out:
+ bs->tx_prologue = 0;
}
-static void bcm2835_spi_dma_done(void *data)
+/**
+ * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
+ * @data: SPI master controller
+ *
+ * Used for bidirectional and RX-only transfers.
+ */
+static void bcm2835_spi_dma_rx_done(void *data)
{
struct spi_controller *ctlr = data;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- /* reset fifo and HW */
- bcm2835_spi_reset_hw(ctlr);
-
- /* and terminate tx-dma as we do not have an irq for it
+ /* terminate tx-dma as we do not have an irq for it
* because when the rx dma will terminate and this callback
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
- if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_async(ctlr->dma_tx);
- bcm2835_spi_undo_prologue(bs);
- }
+ dmaengine_terminate_async(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ bs->rx_dma_active = false;
+ bcm2835_spi_undo_prologue(bs);
+
+ /* reset fifo and HW */
+ bcm2835_spi_reset_hw(ctlr);
/* and mark as completed */;
complete(&ctlr->xfer_completion);
}
+/**
+ * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
+ * @data: SPI master controller
+ *
+ * Used for TX-only transfers.
+ */
+static void bcm2835_spi_dma_tx_done(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* busy-wait for TX FIFO to empty */
+ while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ bs->clear_rx_cs[bs->chip_select]);
+
+ bs->tx_dma_active = false;
+ smp_wmb();
+
+ /*
+ * In case of a very short transfer, RX DMA may not have been
+ * issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
+ * to terminate it immediately after issuing.
+ */
+ if (cmpxchg(&bs->rx_dma_active, true, false))
+ dmaengine_terminate_async(ctlr->dma_rx);
+
+ bcm2835_spi_undo_prologue(bs);
+ bcm2835_spi_reset_hw(ctlr);
+ complete(&ctlr->xfer_completion);
+}
+
+/**
+ * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
+ * @ctlr: SPI master controller
+ * @spi: SPI slave
+ * @tfr: SPI transfer
+ * @bs: BCM2835 SPI controller
+ * @is_tx: whether to submit DMA descriptor for TX or RX sglist
+ *
+ * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
+ * Return 0 on success or a negative error number.
+ */
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
+ struct spi_device *spi,
struct spi_transfer *tfr,
+ struct bcm2835_spi *bs,
bool is_tx)
{
struct dma_chan *chan;
@@ -581,8 +676,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
chan = ctlr->dma_tx;
nents = tfr->tx_sg.nents;
sgl = tfr->tx_sg.sgl;
- flags = 0 /* no tx interrupt */;
-
+ flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
} else {
dir = DMA_DEV_TO_MEM;
chan = ctlr->dma_rx;
@@ -595,10 +689,17 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
if (!desc)
return -EINVAL;
- /* set callback for rx */
+ /*
+ * Completion is signaled by the RX channel for bidirectional and
+ * RX-only transfers; else by the TX channel for TX-only transfers.
+ */
if (!is_tx) {
- desc->callback = bcm2835_spi_dma_done;
+ desc->callback = bcm2835_spi_dma_rx_done;
+ desc->callback_param = ctlr;
+ } else if (!tfr->rx_buf) {
+ desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
+ bs->chip_select = spi->chip_select;
}
/* submit it to DMA-engine */
@@ -607,12 +708,60 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
return dma_submit_error(cookie);
}
+/**
+ * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
+ * @ctlr: SPI master controller
+ * @spi: SPI slave
+ * @tfr: SPI transfer
+ * @cs: CS register
+ *
+ * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
+ * the TX and RX DMA channel to copy between memory and FIFO register.
+ *
+ * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
+ * memory is pointless. However not reading the RX FIFO isn't an option either
+ * because transmission is halted once it's full. As a workaround, cyclically
+ * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
+ *
+ * The CS register value is precalculated in bcm2835_spi_setup(). Normally
+ * this is called only once, on slave registration. A DMA descriptor to write
+ * this value is preallocated in bcm2835_dma_init(). All that's left to do
+ * when performing a TX-only transfer is to submit this descriptor to the RX
+ * DMA channel. Latency is thereby minimized. The descriptor does not
+ * generate any interrupts while running. It must be terminated once the
+ * TX DMA channel is done.
+ *
+ * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
+ * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
+ * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
+ * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
+ * reduction in bus traffic and thus energy consumption is achieved.
+ *
+ * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
+ * copying from the zero page. The DMA descriptor to do this is preallocated
+ * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
+ * done and can then be reused.
+ *
+ * The BCM2835 DMA driver autodetects when a transaction copies from the zero
+ * page and utilizes the DMA controller's ability to synthesize zeroes instead
+ * of copying them from memory. This reduces traffic on the memory bus. The
+ * feature is not available on so-called "lite" channels, but normally TX DMA
+ * is backed by a full-featured channel.
+ *
+ * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
+ * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
+ * has been counted down to zero (hardware erratum). Thus, when the transfer
+ * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
+ * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
+ * performed at the end of an RX-only transfer.
+ */
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ dma_cookie_t cookie;
int ret;
/* update usage statistics */
@@ -625,16 +774,15 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
/* setup tx-DMA */
- ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
+ if (bs->tx_buf) {
+ ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
+ } else {
+ cookie = dmaengine_submit(bs->fill_tx_desc);
+ ret = dma_submit_error(cookie);
+ }
if (ret)
goto err_reset_hw;
- /* start TX early */
- dma_async_issue_pending(ctlr->dma_tx);
-
- /* mark as dma pending */
- bs->dma_pending = 1;
-
/* set the DMA length */
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
@@ -642,20 +790,43 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
bcm2835_wr(bs, BCM2835_SPI_CS,
cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
+ bs->tx_dma_active = true;
+ smp_wmb();
+
+ /* start TX early */
+ dma_async_issue_pending(ctlr->dma_tx);
+
/* setup rx-DMA late - to run transfers while
* mapping of the rx buffers still takes place
* this saves 10us or more.
*/
- ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
+ if (bs->rx_buf) {
+ ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
+ } else {
+ cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
+ ret = dma_submit_error(cookie);
+ }
if (ret) {
/* need to reset on errors */
dmaengine_terminate_sync(ctlr->dma_tx);
- bs->dma_pending = false;
+ bs->tx_dma_active = false;
goto err_reset_hw;
}
/* start rx dma late */
dma_async_issue_pending(ctlr->dma_rx);
+ bs->rx_dma_active = true;
+ smp_mb();
+
+ /*
+ * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
+ * may run before RX DMA is issued. Terminate RX DMA if so.
+ */
+ if (!bs->rx_buf && !bs->tx_dma_active &&
+ cmpxchg(&bs->rx_dma_active, true, false)) {
+ dmaengine_terminate_async(ctlr->dma_rx);
+ bcm2835_spi_reset_hw(ctlr);
+ }
/* wait for wakeup in framework */
return 1;
@@ -678,26 +849,52 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
return true;
}
-static void bcm2835_dma_release(struct spi_controller *ctlr)
+static void bcm2835_dma_release(struct spi_controller *ctlr,
+ struct bcm2835_spi *bs)
{
+ int i;
+
if (ctlr->dma_tx) {
dmaengine_terminate_sync(ctlr->dma_tx);
+
+ if (bs->fill_tx_desc)
+ dmaengine_desc_free(bs->fill_tx_desc);
+
+ if (bs->fill_tx_addr)
+ dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
+ bs->fill_tx_addr, sizeof(u32),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
}
+
if (ctlr->dma_rx) {
dmaengine_terminate_sync(ctlr->dma_rx);
+
+ for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
+ if (bs->clear_rx_desc[i])
+ dmaengine_desc_free(bs->clear_rx_desc[i]);
+
+ if (bs->clear_rx_addr)
+ dma_unmap_single(ctlr->dma_rx->device->dev,
+ bs->clear_rx_addr,
+ sizeof(bs->clear_rx_cs),
+ DMA_TO_DEVICE);
+
dma_release_channel(ctlr->dma_rx);
ctlr->dma_rx = NULL;
}
}
-static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
+static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
+ struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
- int ret;
+ int ret, i;
/* base address in dma-space */
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
@@ -719,7 +916,11 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
goto err_release;
}
- /* configure DMAs */
+ /*
+ * The TX DMA channel either copies a transfer's TX buffer to the FIFO
+ * or, in case of an RX-only transfer, cyclically copies from the zero
+ * page to the FIFO using a preallocated, reusable descriptor.
+ */
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -727,17 +928,74 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
if (ret)
goto err_config;
+ bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
+ ZERO_PAGE(0), 0, sizeof(u32),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
+ dev_err(dev, "cannot map zero page - not using DMA mode\n");
+ bs->fill_tx_addr = 0;
+ goto err_release;
+ }
+
+ bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
+ bs->fill_tx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!bs->fill_tx_desc) {
+ dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
+ goto err_release;
+ }
+
+ ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
+ if (ret) {
+ dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
+ goto err_release;
+ }
+
+ /*
+ * The RX DMA channel is used bidirectionally: It either reads the
+ * RX FIFO or, in case of a TX-only transfer, cyclically writes a
+ * precalculated value to the CS register to clear the RX FIFO.
+ */
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
if (ret)
goto err_config;
+ bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+ bs->clear_rx_cs,
+ sizeof(bs->clear_rx_cs),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
+ dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
+ bs->clear_rx_addr = 0;
+ goto err_release;
+ }
+
+ for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
+ bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+ bs->clear_rx_addr + i * sizeof(u32),
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!bs->clear_rx_desc[i]) {
+ dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
+ goto err_release;
+ }
+
+ ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
+ if (ret) {
+ dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
+ goto err_release;
+ }
+ }
+
/* all went well, so set can_dma */
ctlr->can_dma = bcm2835_spi_can_dma;
- /* need to do TX AND RX DMA, so we need dummy buffers */
- ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
return;
@@ -745,7 +1003,7 @@ err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
ret);
err_release:
- bcm2835_dma_release(ctlr);
+ bcm2835_dma_release(ctlr, bs);
err:
return;
}
@@ -812,7 +1070,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
unsigned long hz_per_byte, byte_limit;
- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ u32 cs = bs->prepare_cs[spi->chip_select];
/* set clock */
spi_hz = tfr->speed_hz;
@@ -834,17 +1092,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ if (spi->mode & SPI_3WIRE && tfr->rx_buf)
cs |= BCM2835_SPI_CS_REN;
- else
- cs &= ~BCM2835_SPI_CS_REN;
-
- /*
- * The driver always uses software-controlled GPIO Chip Select.
- * Set the hardware-controlled native Chip Select to an invalid
- * value to prevent it from interfering.
- */
- cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
@@ -881,7 +1130,6 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
int ret;
if (ctlr->can_dma) {
@@ -896,14 +1144,11 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
return ret;
}
- cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
-
- if (spi->mode & SPI_CPOL)
- cs |= BCM2835_SPI_CS_CPOL;
- if (spi->mode & SPI_CPHA)
- cs |= BCM2835_SPI_CS_CPHA;
-
- bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+ /*
+ * Set up clock polarity before spi_transfer_one_message() asserts
+ * chip select to avoid a gratuitous clock signal edge.
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
return 0;
}
@@ -914,11 +1159,12 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
- if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_sync(ctlr->dma_tx);
- dmaengine_terminate_sync(ctlr->dma_rx);
- bcm2835_spi_undo_prologue(bs);
- }
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ bcm2835_spi_undo_prologue(bs);
+
/* and reset */
bcm2835_spi_reset_hw(ctlr);
}
@@ -930,14 +1176,50 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
static int bcm2835_spi_setup(struct spi_device *spi)
{
- int err;
+ struct spi_controller *ctlr = spi->controller;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct gpio_chip *chip;
+ enum gpio_lookup_flags lflags;
+ u32 cs;
+
+ /*
+ * Precalculate SPI slave's CS register value for ->prepare_message():
+ * The driver always uses software-controlled GPIO chip select, hence
+ * set the hardware-controlled native chip select to an invalid value
+ * to prevent it from interfering.
+ */
+ cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+ bs->prepare_cs[spi->chip_select] = cs;
+
+ /*
+ * Precalculate SPI slave's CS register value to clear RX FIFO
+ * in case of a TX-only DMA transfer.
+ */
+ if (ctlr->dma_rx) {
+ bs->clear_rx_cs[spi->chip_select] = cs |
+ BCM2835_SPI_CS_TA |
+ BCM2835_SPI_CS_DMAEN |
+ BCM2835_SPI_CS_CLEAR_RX;
+ dma_sync_single_for_device(ctlr->dma_rx->device->dev,
+ bs->clear_rx_addr,
+ sizeof(bs->clear_rx_cs),
+ DMA_TO_DEVICE);
+ }
+
/*
* sanity checking the native-chipselects
*/
if (spi->mode & SPI_NO_CS)
return 0;
- if (gpio_is_valid(spi->cs_gpio))
+ /*
+ * The SPI core has successfully requested the CS GPIO line from the
+ * device tree, so we are done.
+ */
+ if (spi->cs_gpiod)
return 0;
if (spi->chip_select > 1) {
/* error in the case of native CS requested with CS > 1
@@ -948,29 +1230,43 @@ static int bcm2835_spi_setup(struct spi_device *spi)
"setup: only two native chip-selects are supported\n");
return -EINVAL;
}
- /* now translate native cs to GPIO */
+
+ /*
+ * Translate native CS to GPIO
+ *
+ * FIXME: poking around in the gpiolib internals like this is
+ * not very good practice. Find a way to locate the real problem
+ * and fix it. Why is the GPIO descriptor in spi->cs_gpiod
+ * sometimes not assigned correctly? Erroneous device trees?
+ */
/* get the gpio chip for the base */
chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
if (!chip)
return 0;
- /* and calculate the real CS */
- spi->cs_gpio = chip->base + 8 - spi->chip_select;
+ /*
+ * Retrieve the corresponding GPIO line used for CS.
+ * The inversion semantics will be handled by the GPIO core
+ * code, so we pass GPIOS_OUT_LOW for "unasserted" and
+ * the correct flag for inversion semantics. The SPI_CS_HIGH
+ * on spi->mode cannot be checked for polarity in this case
+ * as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
+ */
+ if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
+ lflags = GPIO_ACTIVE_HIGH;
+ else
+ lflags = GPIO_ACTIVE_LOW;
+ spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
+ DRV_NAME,
+ lflags,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(spi->cs_gpiod))
+ return PTR_ERR(spi->cs_gpiod);
/* and set up the "mode" and level */
- dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
- spi->chip_select, spi->cs_gpio);
-
- /* set up GPIO as output and pull to the correct level */
- err = gpio_direction_output(spi->cs_gpio,
- (spi->mode & SPI_CS_HIGH) ? 0 : 1);
- if (err) {
- dev_err(&spi->dev,
- "could not set CS%i gpio %i as output: %i",
- spi->chip_select, spi->cs_gpio, err);
- return err;
- }
+ dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
+ spi->chip_select);
return 0;
}
@@ -979,18 +1275,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct bcm2835_spi *bs;
- struct resource *res;
int err;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
+ dma_get_cache_alignment()));
if (!ctlr)
return -ENOMEM;
platform_set_drvdata(pdev, ctlr);
+ ctlr->use_gpio_descriptors = true;
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- ctlr->num_chipselect = 3;
+ ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
ctlr->setup = bcm2835_spi_setup;
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
@@ -999,8 +1296,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs = spi_controller_get_devdata(ctlr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
goto out_controller_put;
@@ -1015,14 +1311,13 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
- dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
goto out_controller_put;
}
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(ctlr, &pdev->dev);
+ bcm2835_dma_init(ctlr, &pdev->dev, bs);
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -1066,7 +1361,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
- bcm2835_dma_release(ctlr);
+ bcm2835_dma_release(ctlr, bs);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index bb57035c5770..a2162ff56a12 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -491,7 +491,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct bcm2835aux_spi *bs;
- struct resource *res;
unsigned long clk_hz;
int err;
@@ -524,8 +523,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
bs = spi_master_get_devdata(master);
/* the main area */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ bs->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
goto out_master_put;
@@ -540,7 +538,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
- dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
goto out_master_put;
}
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 9a06ffdb73b8..c6836a931dbf 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -330,7 +330,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct bcm63xx_hsspi *bs;
- struct resource *res_mem;
void __iomem *regs;
struct device *dev = &pdev->dev;
struct clk *clk, *pll_clk = NULL;
@@ -338,13 +337,10 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res_mem);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index df1c94a131e6..fdd7eaa0b8ed 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -520,10 +520,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
clk = devm_clk_get(dev, "spi");
if (IS_ERR(clk)) {
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 8c77d1114ad3..7e71a351f3b7 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -23,7 +23,7 @@
* with a battery powered AVR microcontroller and lots of goodies. You
* can use GCC to develop firmware for this.
*
- * See Documentation/spi/butterfly for information about how to build
+ * See Documentation/spi/butterfly.rst for information about how to build
* and use this custom parallel port cable.
*/
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7c41e4e82849..c36587b42e95 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -474,7 +474,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
int ret = 0, irq;
struct spi_master *master;
struct cdns_spi *xspi;
- struct resource *res;
u32 num_cs;
master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
@@ -485,8 +484,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ xspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->regs)) {
ret = PTR_ERR(xspi->regs);
goto remove_master;
@@ -540,7 +538,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
ret = -ENXIO;
- dev_err(&pdev->dev, "irq number is invalid\n");
goto clk_dis_all;
}
diff --git a/drivers/spi/spi-cavium-octeon.c b/drivers/spi/spi-cavium-octeon.c
index ee4703e84622..1a2de6ce9064 100644
--- a/drivers/spi/spi-cavium-octeon.c
+++ b/drivers/spi/spi-cavium-octeon.c
@@ -18,7 +18,6 @@
static int octeon_spi_probe(struct platform_device *pdev)
{
- struct resource *res_mem;
void __iomem *reg_base;
struct spi_master *master;
struct octeon_spi *p;
@@ -30,8 +29,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, master);
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base)) {
err = PTR_ERR(reg_base);
goto fail;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 4daba12ec843..5e900f228919 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -91,7 +91,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
{
struct spi_clps711x_data *hw;
struct spi_master *master;
- struct resource *res;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -125,8 +124,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
goto err_out;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ hw->syncio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->syncio)) {
ret = PTR_ERR(hw->syncio);
goto err_out;
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 5ff48ab2f534..f80e06c87fbe 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -339,7 +339,6 @@ static int mcfqspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct mcfqspi *mcfqspi;
- struct resource *res;
struct mcfqspi_platform_data *pdata;
int status;
@@ -362,8 +361,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
mcfqspi = spi_master_get_devdata(master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+ mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcfqspi->iobase)) {
status = PTR_ERR(mcfqspi->iobase);
goto fail0;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 18c06568805e..bd46fca3f094 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -79,14 +79,12 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
const char *cpu_syscon, u32 if_si_owner_offset)
{
struct dw_spi_mscc *dwsmscc;
- struct resource *res;
dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
if (!dwsmscc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwsmscc->spi_mst = devm_ioremap_resource(&pdev->dev, res);
+ dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwsmscc->spi_mst)) {
dev_err(&pdev->dev, "SPI_MST region map failed\n");
return PTR_ERR(dwsmscc->spi_mst);
@@ -138,7 +136,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
struct dw_spi_mmio *dwsmmio);
struct dw_spi_mmio *dwsmmio;
struct dw_spi *dws;
- struct resource *mem;
int ret;
int num_cs;
@@ -150,18 +147,15 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws = &dwsmmio->dws;
/* Get basic io resource and map it */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+ dws->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dws->regs)) {
dev_err(&pdev->dev, "SPI region map failed\n");
return PTR_ERR(dws->regs);
}
dws->irq = platform_get_irq(pdev, 0);
- if (dws->irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
+ if (dws->irq < 0)
return dws->irq; /* -ENXIO */
- }
dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dwsmmio->clk))
@@ -172,8 +166,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
/* Optional clock needed to access the registers */
dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
- if (IS_ERR(dwsmmio->pclk))
- return PTR_ERR(dwsmmio->pclk);
+ if (IS_ERR(dwsmmio->pclk)) {
+ ret = PTR_ERR(dwsmmio->pclk);
+ goto out_clk;
+ }
ret = clk_prepare_enable(dwsmmio->pclk);
if (ret)
goto out_clk;
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 9651679ee7f7..140644913e6c 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -19,6 +19,7 @@ struct spi_pci_desc {
int (*setup)(struct dw_spi *);
u16 num_cs;
u16 bus_num;
+ u32 max_freq;
};
static struct spi_pci_desc spi_pci_mid_desc_1 = {
@@ -33,6 +34,12 @@ static struct spi_pci_desc spi_pci_mid_desc_2 = {
.bus_num = 1,
};
+static struct spi_pci_desc spi_pci_ehl_desc = {
+ .num_cs = 1,
+ .bus_num = -1,
+ .max_freq = 100000000,
+};
+
static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct dw_spi *dws;
@@ -65,6 +72,7 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (desc) {
dws->num_cs = desc->num_cs;
dws->bus_num = desc->bus_num;
+ dws->max_freq = desc->max_freq;
if (desc->setup) {
ret = desc->setup(dws);
@@ -98,16 +106,14 @@ static void spi_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int spi_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi *dws = pci_get_drvdata(pdev);
+ struct dw_spi *dws = dev_get_drvdata(dev);
return dw_spi_suspend_host(dws);
}
static int spi_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dw_spi *dws = pci_get_drvdata(pdev);
+ struct dw_spi *dws = dev_get_drvdata(dev);
return dw_spi_resume_host(dws);
}
@@ -125,8 +131,14 @@ static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
/* Intel MID platform SPI controller 2 */
{ PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
+ /* Intel Elkhart Lake PSE SPI controllers */
+ { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&spi_pci_ehl_desc},
+ { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&spi_pci_ehl_desc},
{},
};
+MODULE_DEVICE_TABLE(pci, pci_ids);
static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index eb1f2142a335..64d4c441b641 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -400,10 +400,8 @@ static int efm32_spi_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
+ if (ret <= 0)
goto err;
- }
ddata->rxirq = ret;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 4034e3ec0ba2..4e1ccd4e52b6 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -656,10 +656,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resources\n");
+ if (irq < 0)
return -EBUSY;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e967ac564761..858f0544289e 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -305,12 +305,10 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
}
if (mspi->flags & SPI_CPM1) {
- struct resource *res;
void *pram;
- res = platform_get_resource(to_platform_device(dev),
- IORESOURCE_MEM, 1);
- pram = devm_ioremap_resource(dev, res);
+ pram = devm_platform_ioremap_resource(to_platform_device(dev),
+ 1);
if (IS_ERR(pram))
mspi->pram = NULL;
else
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 53335ccc98f6..bec758e978fb 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -9,26 +9,16 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-fsl-dspi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/time.h>
-#define DRIVER_NAME "fsl-dspi"
+#define DRIVER_NAME "fsl-dspi"
#ifdef CONFIG_M5441x
#define DSPI_FIFO_SIZE 16
@@ -37,101 +27,97 @@
#endif
#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
-#define SPI_MCR 0x00
-#define SPI_MCR_MASTER (1 << 31)
-#define SPI_MCR_PCSIS (0x3F << 16)
-#define SPI_MCR_CLR_TXF (1 << 11)
-#define SPI_MCR_CLR_RXF (1 << 10)
-#define SPI_MCR_XSPI (1 << 3)
-
-#define SPI_TCR 0x08
-#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
-
-#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
-#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
-#define SPI_CTAR_CPOL(x) ((x) << 26)
-#define SPI_CTAR_CPHA(x) ((x) << 25)
-#define SPI_CTAR_LSBFE(x) ((x) << 24)
-#define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
-#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
-#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
-#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
-#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
-#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
-#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
-#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
-#define SPI_CTAR_SCALE_BITS 0xf
-
-#define SPI_CTAR0_SLAVE 0x0c
-
-#define SPI_SR 0x2c
-#define SPI_SR_EOQF 0x10000000
-#define SPI_SR_TCFQF 0x80000000
-#define SPI_SR_CLEAR 0x9aaf0000
-
-#define SPI_RSER_TFFFE BIT(25)
-#define SPI_RSER_TFFFD BIT(24)
-#define SPI_RSER_RFDFE BIT(17)
-#define SPI_RSER_RFDFD BIT(16)
-
-#define SPI_RSER 0x30
-#define SPI_RSER_EOQFE 0x10000000
-#define SPI_RSER_TCFQE 0x80000000
-
-#define SPI_PUSHR 0x34
-#define SPI_PUSHR_CMD_CONT (1 << 15)
-#define SPI_PUSHR_CONT (SPI_PUSHR_CMD_CONT << 16)
-#define SPI_PUSHR_CMD_CTAS(x) (((x) & 0x0003) << 12)
-#define SPI_PUSHR_CTAS(x) (SPI_PUSHR_CMD_CTAS(x) << 16)
-#define SPI_PUSHR_CMD_EOQ (1 << 11)
-#define SPI_PUSHR_EOQ (SPI_PUSHR_CMD_EOQ << 16)
-#define SPI_PUSHR_CMD_CTCNT (1 << 10)
-#define SPI_PUSHR_CTCNT (SPI_PUSHR_CMD_CTCNT << 16)
-#define SPI_PUSHR_CMD_PCS(x) ((1 << x) & 0x003f)
-#define SPI_PUSHR_PCS(x) (SPI_PUSHR_CMD_PCS(x) << 16)
-#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
-
-#define SPI_PUSHR_SLAVE 0x34
-
-#define SPI_POPR 0x38
-#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
-
-#define SPI_TXFR0 0x3c
-#define SPI_TXFR1 0x40
-#define SPI_TXFR2 0x44
-#define SPI_TXFR3 0x48
-#define SPI_RXFR0 0x7c
-#define SPI_RXFR1 0x80
-#define SPI_RXFR2 0x84
-#define SPI_RXFR3 0x88
-
-#define SPI_CTARE(x) (0x11c + (((x) & 0x3) * 4))
-#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
-#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
-
-#define SPI_SREX 0x13c
-
-#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
-#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
-#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
-#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
-
-#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
-#define SPI_FRAME_EBITS_MASK SPI_CTARE_FMSZE(1)
+#define SPI_MCR 0x00
+#define SPI_MCR_MASTER BIT(31)
+#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_CLR_TXF BIT(11)
+#define SPI_MCR_CLR_RXF BIT(10)
+#define SPI_MCR_XSPI BIT(3)
+
+#define SPI_TCR 0x08
+#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
+#define SPI_CTAR_CPOL BIT(26)
+#define SPI_CTAR_CPHA BIT(25)
+#define SPI_CTAR_LSBFE BIT(24)
+#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
+#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
+#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
+#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
+#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
+#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
+#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
+#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
+#define SPI_CTAR_SCALE_BITS 0xf
+
+#define SPI_CTAR0_SLAVE 0x0c
+
+#define SPI_SR 0x2c
+#define SPI_SR_TCFQF BIT(31)
+#define SPI_SR_EOQF BIT(28)
+#define SPI_SR_TFUF BIT(27)
+#define SPI_SR_TFFF BIT(25)
+#define SPI_SR_CMDTCF BIT(23)
+#define SPI_SR_SPEF BIT(21)
+#define SPI_SR_RFOF BIT(19)
+#define SPI_SR_TFIWF BIT(18)
+#define SPI_SR_RFDF BIT(17)
+#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_CLEAR (SPI_SR_TCFQF | SPI_SR_EOQF | \
+ SPI_SR_TFUF | SPI_SR_TFFF | \
+ SPI_SR_CMDTCF | SPI_SR_SPEF | \
+ SPI_SR_RFOF | SPI_SR_TFIWF | \
+ SPI_SR_RFDF | SPI_SR_CMDFFF)
+
+#define SPI_RSER_TFFFE BIT(25)
+#define SPI_RSER_TFFFD BIT(24)
+#define SPI_RSER_RFDFE BIT(17)
+#define SPI_RSER_RFDFD BIT(16)
+
+#define SPI_RSER 0x30
+#define SPI_RSER_TCFQE BIT(31)
+#define SPI_RSER_EOQFE BIT(28)
+
+#define SPI_PUSHR 0x34
+#define SPI_PUSHR_CMD_CONT BIT(15)
+#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
+#define SPI_PUSHR_CMD_EOQ BIT(11)
+#define SPI_PUSHR_CMD_CTCNT BIT(10)
+#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
+
+#define SPI_PUSHR_SLAVE 0x34
+
+#define SPI_POPR 0x38
+
+#define SPI_TXFR0 0x3c
+#define SPI_TXFR1 0x40
+#define SPI_TXFR2 0x44
+#define SPI_TXFR3 0x48
+#define SPI_RXFR0 0x7c
+#define SPI_RXFR1 0x80
+#define SPI_RXFR2 0x84
+#define SPI_RXFR3 0x88
+
+#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
+#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
+#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
+
+#define SPI_SREX 0x13c
+
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
/* Register offsets for regmap_pushr */
-#define PUSHR_CMD 0x0
-#define PUSHR_TX 0x2
+#define PUSHR_CMD 0x0
+#define PUSHR_TX 0x2
-#define SPI_CS_INIT 0x01
-#define SPI_CS_ASSERT 0x02
-#define SPI_CS_DROP 0x04
-
-#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
+#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
- u32 ctar_val;
- u16 void_write_data;
+ u32 ctar_val;
+ u16 void_write_data;
};
enum dspi_trans_mode {
@@ -141,75 +127,75 @@ enum dspi_trans_mode {
};
struct fsl_dspi_devtype_data {
- enum dspi_trans_mode trans_mode;
- u8 max_clock_factor;
- bool xspi_mode;
+ enum dspi_trans_mode trans_mode;
+ u8 max_clock_factor;
+ bool xspi_mode;
};
static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_DMA_MODE,
- .max_clock_factor = 2,
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 2,
};
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
- .xspi_mode = true,
+ .trans_mode = DSPI_TCFQ_MODE,
+ .max_clock_factor = 8,
+ .xspi_mode = true,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
+ .trans_mode = DSPI_TCFQ_MODE,
+ .max_clock_factor = 8,
};
static const struct fsl_dspi_devtype_data coldfire_data = {
- .trans_mode = DSPI_EOQ_MODE,
- .max_clock_factor = 8,
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
};
struct fsl_dspi_dma {
/* Length of transfer in words of DSPI_FIFO_SIZE */
- u32 curr_xfer_len;
-
- u32 *tx_dma_buf;
- struct dma_chan *chan_tx;
- dma_addr_t tx_dma_phys;
- struct completion cmd_tx_complete;
- struct dma_async_tx_descriptor *tx_desc;
-
- u32 *rx_dma_buf;
- struct dma_chan *chan_rx;
- dma_addr_t rx_dma_phys;
- struct completion cmd_rx_complete;
- struct dma_async_tx_descriptor *rx_desc;
+ u32 curr_xfer_len;
+
+ u32 *tx_dma_buf;
+ struct dma_chan *chan_tx;
+ dma_addr_t tx_dma_phys;
+ struct completion cmd_tx_complete;
+ struct dma_async_tx_descriptor *tx_desc;
+
+ u32 *rx_dma_buf;
+ struct dma_chan *chan_rx;
+ dma_addr_t rx_dma_phys;
+ struct completion cmd_rx_complete;
+ struct dma_async_tx_descriptor *rx_desc;
};
struct fsl_dspi {
- struct spi_master *master;
- struct platform_device *pdev;
-
- struct regmap *regmap;
- struct regmap *regmap_pushr;
- int irq;
- struct clk *clk;
-
- struct spi_transfer *cur_transfer;
- struct spi_message *cur_msg;
- struct chip_data *cur_chip;
- size_t len;
- const void *tx;
- void *rx;
- void *rx_end;
- u16 void_write_data;
- u16 tx_cmd;
- u8 bits_per_word;
- u8 bytes_per_word;
- const struct fsl_dspi_devtype_data *devtype_data;
-
- wait_queue_head_t waitq;
- u32 waitflags;
-
- struct fsl_dspi_dma *dma;
+ struct spi_controller *ctlr;
+ struct platform_device *pdev;
+
+ struct regmap *regmap;
+ struct regmap *regmap_pushr;
+ int irq;
+ struct clk *clk;
+
+ struct spi_transfer *cur_transfer;
+ struct spi_message *cur_msg;
+ struct chip_data *cur_chip;
+ size_t len;
+ const void *tx;
+ void *rx;
+ void *rx_end;
+ u16 void_write_data;
+ u16 tx_cmd;
+ u8 bits_per_word;
+ u8 bytes_per_word;
+ const struct fsl_dspi_devtype_data *devtype_data;
+
+ wait_queue_head_t waitq;
+ u32 waitflags;
+
+ struct fsl_dspi_dma *dma;
};
static u32 dspi_pop_tx(struct fsl_dspi *dspi)
@@ -233,7 +219,7 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
- if (spi_controller_is_slave(dspi->master))
+ if (spi_controller_is_slave(dspi->ctlr))
return data;
if (dspi->len > 0)
@@ -246,7 +232,7 @@ static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
if (!dspi->rx)
return;
- /* Mask of undefined bits */
+ /* Mask off undefined bits */
rxdata &= (1 << dspi->bits_per_word) - 1;
if (dspi->bytes_per_word == 1)
@@ -282,8 +268,8 @@ static void dspi_rx_dma_callback(void *arg)
static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
{
- struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
+ struct fsl_dspi_dma *dma = dspi->dma;
int time_left;
int i;
@@ -332,13 +318,13 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
- if (spi_controller_is_slave(dspi->master)) {
+ if (spi_controller_is_slave(dspi->ctlr)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
return 0;
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
- DMA_COMPLETION_TIMEOUT);
+ DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
dev_err(dev, "DMA tx timeout\n");
dmaengine_terminate_all(dma->chan_tx);
@@ -347,7 +333,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
- DMA_COMPLETION_TIMEOUT);
+ DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
dev_err(dev, "DMA rx timeout\n");
dmaengine_terminate_all(dma->chan_tx);
@@ -360,9 +346,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
- struct fsl_dspi_dma *dma = dspi->dma;
- struct device *dev = &dspi->pdev->dev;
struct spi_message *message = dspi->cur_msg;
+ struct device *dev = &dspi->pdev->dev;
+ struct fsl_dspi_dma *dma = dspi->dma;
int curr_remaining_bytes;
int bytes_per_buffer;
int ret = 0;
@@ -397,9 +383,9 @@ exit:
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
- struct fsl_dspi_dma *dma;
- struct dma_slave_config cfg;
struct device *dev = &dspi->pdev->dev;
+ struct dma_slave_config cfg;
+ struct fsl_dspi_dma *dma;
int ret;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
@@ -421,14 +407,14 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
}
dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->tx_dma_phys, GFP_KERNEL);
+ &dma->tx_dma_phys, GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->rx_dma_phys, GFP_KERNEL);
+ &dma->rx_dma_phys, GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
@@ -485,30 +471,31 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
struct fsl_dspi_dma *dma = dspi->dma;
struct device *dev = &dspi->pdev->dev;
- if (dma) {
- if (dma->chan_tx) {
- dma_unmap_single(dev, dma->tx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
- dma_release_channel(dma->chan_tx);
- }
+ if (!dma)
+ return;
- if (dma->chan_rx) {
- dma_unmap_single(dev, dma->rx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
- dma_release_channel(dma->chan_rx);
- }
+ if (dma->chan_tx) {
+ dma_unmap_single(dev, dma->tx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->chan_tx);
+ }
+
+ if (dma->chan_rx) {
+ dma_unmap_single(dev, dma->rx_dma_phys,
+ DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_release_channel(dma->chan_rx);
}
}
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
- unsigned long clkrate)
+ unsigned long clkrate)
{
/* Valid baud rate pre-scaler values */
int pbr_tbl[4] = {2, 3, 5, 7};
int brs[16] = { 2, 4, 6, 8,
- 16, 32, 64, 128,
- 256, 512, 1024, 2048,
- 4096, 8192, 16384, 32768 };
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768 };
int scale_needed, scale, minscale = INT_MAX;
int i, j;
@@ -538,15 +525,15 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
}
static void ns_delay_scale(char *psc, char *sc, int delay_ns,
- unsigned long clkrate)
+ unsigned long clkrate)
{
- int pscale_tbl[4] = {1, 3, 5, 7};
int scale_needed, scale, minscale = INT_MAX;
- int i, j;
+ int pscale_tbl[4] = {1, 3, 5, 7};
u32 remainder;
+ int i, j;
scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
- &remainder);
+ &remainder);
if (remainder)
scale_needed++;
@@ -601,7 +588,7 @@ static void dspi_tcfq_write(struct fsl_dspi *dspi)
*/
u32 data = dspi_pop_tx(dspi);
- if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE(1)) {
+ if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
/* LSB */
tx_fifo_write(dspi, data & 0xFFFF);
tx_fifo_write(dspi, data >> 16);
@@ -655,19 +642,90 @@ static void dspi_eoq_read(struct fsl_dspi *dspi)
{
int fifo_size = DSPI_FIFO_SIZE;
- /* Read one FIFO entry at and push to rx buffer */
+ /* Read one FIFO entry and push to rx buffer */
while ((dspi->rx < dspi->rx_end) && fifo_size--)
dspi_push_rx(dspi, fifo_read(dspi));
}
-static int dspi_transfer_one_message(struct spi_master *master,
- struct spi_message *message)
+static int dspi_rxtx(struct fsl_dspi *dspi)
{
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_message *msg = dspi->cur_msg;
+ enum dspi_trans_mode trans_mode;
+ u16 spi_tcnt;
+ u32 spi_tcr;
+
+ /* Get transfer counter (in number of SPI transfers). It was
+ * reset to 0 when transfer(s) were started.
+ */
+ regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
+ spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+ /* Update total number of bytes that were transferred */
+ msg->actual_length += spi_tcnt * dspi->bytes_per_word;
+
+ trans_mode = dspi->devtype_data->trans_mode;
+ if (trans_mode == DSPI_EOQ_MODE)
+ dspi_eoq_read(dspi);
+ else if (trans_mode == DSPI_TCFQ_MODE)
+ dspi_tcfq_read(dspi);
+
+ if (!dspi->len)
+ /* Success! */
+ return 0;
+
+ if (trans_mode == DSPI_EOQ_MODE)
+ dspi_eoq_write(dspi);
+ else if (trans_mode == DSPI_TCFQ_MODE)
+ dspi_tcfq_write(dspi);
+
+ return -EINPROGRESS;
+}
+
+static int dspi_poll(struct fsl_dspi *dspi)
+{
+ int tries = 1000;
+ u32 spi_sr;
+
+ do {
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))
+ break;
+ } while (--tries);
+
+ if (!tries)
+ return -ETIMEDOUT;
+
+ return dspi_rxtx(dspi);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+ u32 spi_sr;
+
+ regmap_read(dspi->regmap, SPI_SR, &spi_sr);
+ regmap_write(dspi->regmap, SPI_SR, spi_sr);
+
+ if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)))
+ return IRQ_NONE;
+
+ if (dspi_rxtx(dspi) == 0) {
+ dspi->waitflags = 1;
+ wake_up_interruptible(&dspi->waitq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dspi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
+ enum dspi_trans_mode trans_mode;
struct spi_transfer *transfer;
int status = 0;
- enum dspi_trans_mode trans_mode;
message->actual_length = 0;
@@ -677,7 +735,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
dspi->cur_chip = spi_get_ctldata(spi);
/* Prepare command word for CMD FIFO */
dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
- SPI_PUSHR_CMD_PCS(spi->chip_select);
+ SPI_PUSHR_CMD_PCS(spi->chip_select);
if (list_is_last(&dspi->cur_transfer->transfer_list,
&dspi->cur_msg->transfers)) {
/* Leave PCS activated after last transfer when
@@ -718,8 +776,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
SPI_FRAME_BITS(transfer->bits_per_word));
if (dspi->devtype_data->xspi_mode)
regmap_write(dspi->regmap, SPI_CTARE(0),
- SPI_FRAME_EBITS(transfer->bits_per_word)
- | SPI_CTARE_DTCP(1));
+ SPI_FRAME_EBITS(transfer->bits_per_word) |
+ SPI_CTARE_DTCP(1));
trans_mode = dspi->devtype_data->trans_mode;
switch (trans_mode) {
@@ -733,8 +791,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
break;
case DSPI_DMA_MODE:
regmap_write(dspi->regmap, SPI_RSER,
- SPI_RSER_TFFFE | SPI_RSER_TFFFD |
- SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
status = dspi_dma_xfer(dspi);
break;
default:
@@ -744,13 +802,18 @@ static int dspi_transfer_one_message(struct spi_master *master,
goto out;
}
- if (trans_mode != DSPI_DMA_MODE) {
- if (wait_event_interruptible(dspi->waitq,
- dspi->waitflags))
- dev_err(&dspi->pdev->dev,
- "wait transfer complete fail!\n");
+ if (!dspi->irq) {
+ do {
+ status = dspi_poll(dspi);
+ } while (status == -EINPROGRESS);
+ } else if (trans_mode != DSPI_DMA_MODE) {
+ status = wait_event_interruptible(dspi->waitq,
+ dspi->waitflags);
dspi->waitflags = 0;
}
+ if (status)
+ dev_err(&dspi->pdev->dev,
+ "Waiting for transfer to complete failed!\n");
if (transfer->delay_usecs)
udelay(transfer->delay_usecs);
@@ -758,19 +821,19 @@ static int dspi_transfer_one_message(struct spi_master *master,
out:
message->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return status;
}
static int dspi_setup(struct spi_device *spi)
{
- struct chip_data *chip;
- struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- struct fsl_dspi_platform_data *pdata;
- u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
+ struct chip_data *chip;
unsigned long clkrate;
/* Only alloc on first setup */
@@ -785,10 +848,10 @@ static int dspi_setup(struct spi_device *spi)
if (!pdata) {
of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
+ &cs_sck_delay);
of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ &sck_cs_delay);
} else {
cs_sck_delay = pdata->cs_sck_delay;
sck_cs_delay = pdata->sck_cs_delay;
@@ -805,18 +868,22 @@ static int dspi_setup(struct spi_device *spi)
/* Set After SCK delay scale values */
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
- chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
- | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
-
- if (!spi_controller_is_slave(dspi->master)) {
- chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
- SPI_LSB_FIRST ? 1 : 0)
- | SPI_CTAR_PCSSCK(pcssck)
- | SPI_CTAR_CSSCK(cssck)
- | SPI_CTAR_PASC(pasc)
- | SPI_CTAR_ASC(asc)
- | SPI_CTAR_PBR(pbr)
- | SPI_CTAR_BR(br);
+ chip->ctar_val = 0;
+ if (spi->mode & SPI_CPOL)
+ chip->ctar_val |= SPI_CTAR_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->ctar_val |= SPI_CTAR_CPHA;
+
+ if (!spi_controller_is_slave(dspi->ctlr)) {
+ chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
+ SPI_CTAR_CSSCK(cssck) |
+ SPI_CTAR_PASC(pasc) |
+ SPI_CTAR_ASC(asc) |
+ SPI_CTAR_PBR(pbr) |
+ SPI_CTAR_BR(br);
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctar_val |= SPI_CTAR_LSBFE;
}
spi_set_ctldata(spi, chip);
@@ -829,68 +896,11 @@ static void dspi_cleanup(struct spi_device *spi)
struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
- spi->master->bus_num, spi->chip_select);
+ spi->controller->bus_num, spi->chip_select);
kfree(chip);
}
-static irqreturn_t dspi_interrupt(int irq, void *dev_id)
-{
- struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
- struct spi_message *msg = dspi->cur_msg;
- enum dspi_trans_mode trans_mode;
- u32 spi_sr, spi_tcr;
- u16 spi_tcnt;
-
- regmap_read(dspi->regmap, SPI_SR, &spi_sr);
- regmap_write(dspi->regmap, SPI_SR, spi_sr);
-
-
- if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
- /* Get transfer counter (in number of SPI transfers). It was
- * reset to 0 when transfer(s) were started.
- */
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
- spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
- /* Update total number of bytes that were transferred */
- msg->actual_length += spi_tcnt * dspi->bytes_per_word;
-
- trans_mode = dspi->devtype_data->trans_mode;
- switch (trans_mode) {
- case DSPI_EOQ_MODE:
- dspi_eoq_read(dspi);
- break;
- case DSPI_TCFQ_MODE:
- dspi_tcfq_read(dspi);
- break;
- default:
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
- trans_mode);
- return IRQ_HANDLED;
- }
-
- if (!dspi->len) {
- dspi->waitflags = 1;
- wake_up_interruptible(&dspi->waitq);
- } else {
- switch (trans_mode) {
- case DSPI_EOQ_MODE:
- dspi_eoq_write(dspi);
- break;
- case DSPI_TCFQ_MODE:
- dspi_tcfq_write(dspi);
- break;
- default:
- dev_err(&dspi->pdev->dev,
- "unsupported trans_mode %u\n",
- trans_mode);
- }
- }
- }
-
- return IRQ_HANDLED;
-}
-
static const struct of_device_id fsl_dspi_dt_ids[] = {
{ .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
{ .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
@@ -902,10 +912,10 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
#ifdef CONFIG_PM_SLEEP
static int dspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
- spi_master_suspend(master);
+ spi_controller_suspend(ctlr);
clk_disable_unprepare(dspi->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -915,8 +925,8 @@ static int dspi_suspend(struct device *dev)
static int dspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
int ret;
pinctrl_pm_select_default_state(dev);
@@ -924,7 +934,7 @@ static int dspi_resume(struct device *dev)
ret = clk_prepare_enable(dspi->clk);
if (ret)
return ret;
- spi_master_resume(master);
+ spi_controller_resume(ctlr);
return 0;
}
@@ -939,16 +949,16 @@ static const struct regmap_range dspi_volatile_ranges[] = {
};
static const struct regmap_access_table dspi_volatile_table = {
- .yes_ranges = dspi_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
+ .yes_ranges = dspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
};
static const struct regmap_config dspi_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x88,
- .volatile_table = &dspi_volatile_table,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
+ .volatile_table = &dspi_volatile_table,
};
static const struct regmap_range dspi_xspi_volatile_ranges[] = {
@@ -959,33 +969,34 @@ static const struct regmap_range dspi_xspi_volatile_ranges[] = {
};
static const struct regmap_access_table dspi_xspi_volatile_table = {
- .yes_ranges = dspi_xspi_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
+ .yes_ranges = dspi_xspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
};
static const struct regmap_config dspi_xspi_regmap_config[] = {
{
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x13c,
- .volatile_table = &dspi_xspi_volatile_table,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x13c,
+ .volatile_table = &dspi_xspi_volatile_table,
},
{
- .name = "pushr",
- .reg_bits = 16,
- .val_bits = 16,
- .reg_stride = 2,
- .max_register = 0x2,
+ .name = "pushr",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x2,
},
};
static void dspi_init(struct fsl_dspi *dspi)
{
- unsigned int mcr = SPI_MCR_PCSIS |
- (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
+ unsigned int mcr = SPI_MCR_PCSIS;
- if (!spi_controller_is_slave(dspi->master))
+ if (dspi->devtype_data->xspi_mode)
+ mcr |= SPI_MCR_XSPI;
+ if (!spi_controller_is_slave(dspi->ctlr))
mcr |= SPI_MCR_MASTER;
regmap_write(dspi->regmap, SPI_MCR, mcr);
@@ -998,34 +1009,33 @@ static void dspi_init(struct fsl_dspi *dspi)
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ const struct regmap_config *regmap_config;
+ struct fsl_dspi_platform_data *pdata;
+ struct spi_controller *ctlr;
+ int ret, cs_num, bus_num;
struct fsl_dspi *dspi;
struct resource *res;
- const struct regmap_config *regmap_config;
void __iomem *base;
- struct fsl_dspi_platform_data *pdata;
- int ret = 0, cs_num, bus_num;
- master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
- if (!master)
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
+ if (!ctlr)
return -ENOMEM;
- dspi = spi_master_get_devdata(master);
+ dspi = spi_controller_get_devdata(ctlr);
dspi->pdev = pdev;
- dspi->master = master;
+ dspi->ctlr = ctlr;
- master->transfer = NULL;
- master->setup = dspi_setup;
- master->transfer_one_message = dspi_transfer_one_message;
- master->dev.of_node = pdev->dev.of_node;
+ ctlr->setup = dspi_setup;
+ ctlr->transfer_one_message = dspi_transfer_one_message;
+ ctlr->dev.of_node = pdev->dev.of_node;
- master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ ctlr->cleanup = dspi_cleanup;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
- master->num_chipselect = pdata->cs_num;
- master->bus_num = pdata->bus_num;
+ ctlr->num_chipselect = pdata->cs_num;
+ ctlr->bus_num = pdata->bus_num;
dspi->devtype_data = &coldfire_data;
} else {
@@ -1033,38 +1043,38 @@ static int dspi_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
if (ret < 0) {
dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
- goto out_master_put;
+ goto out_ctlr_put;
}
- master->num_chipselect = cs_num;
+ ctlr->num_chipselect = cs_num;
ret = of_property_read_u32(np, "bus-num", &bus_num);
if (ret < 0) {
dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_master_put;
+ goto out_ctlr_put;
}
- master->bus_num = bus_num;
+ ctlr->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
- master->slave = true;
+ ctlr->slave = true;
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");
ret = -EFAULT;
- goto out_master_put;
+ goto out_ctlr_put;
}
}
if (dspi->devtype_data->xspi_mode)
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_master_put;
+ goto out_ctlr_put;
}
if (dspi->devtype_data->xspi_mode)
@@ -1076,7 +1086,7 @@ static int dspi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
ret = PTR_ERR(dspi->regmap);
- goto out_master_put;
+ goto out_ctlr_put;
}
if (dspi->devtype_data->xspi_mode) {
@@ -1088,7 +1098,7 @@ static int dspi_probe(struct platform_device *pdev)
"failed to init pushr regmap: %ld\n",
PTR_ERR(dspi->regmap_pushr));
ret = PTR_ERR(dspi->regmap_pushr);
- goto out_master_put;
+ goto out_ctlr_put;
}
}
@@ -1096,18 +1106,20 @@ static int dspi_probe(struct platform_device *pdev)
if (IS_ERR(dspi->clk)) {
ret = PTR_ERR(dspi->clk);
dev_err(&pdev->dev, "unable to get clock\n");
- goto out_master_put;
+ goto out_ctlr_put;
}
ret = clk_prepare_enable(dspi->clk);
if (ret)
- goto out_master_put;
+ goto out_ctlr_put;
dspi_init(dspi);
+
dspi->irq = platform_get_irq(pdev, 0);
- if (dspi->irq < 0) {
- dev_err(&pdev->dev, "can't get platform irq\n");
- ret = dspi->irq;
- goto out_clk_put;
+ if (dspi->irq <= 0) {
+ dev_info(&pdev->dev,
+ "can't get platform irq, using poll mode\n");
+ dspi->irq = 0;
+ goto poll_mode;
}
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
@@ -1117,6 +1129,9 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
+ init_waitqueue_head(&dspi->waitq);
+
+poll_mode:
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
@@ -1125,15 +1140,14 @@ static int dspi_probe(struct platform_device *pdev)
}
}
- master->max_speed_hz =
+ ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
- init_waitqueue_head(&dspi->waitq);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, ctlr);
- ret = spi_register_master(master);
+ ret = spi_register_controller(ctlr);
if (ret != 0) {
- dev_err(&pdev->dev, "Problem registering DSPI master\n");
+ dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
goto out_clk_put;
}
@@ -1141,32 +1155,32 @@ static int dspi_probe(struct platform_device *pdev)
out_clk_put:
clk_disable_unprepare(dspi->clk);
-out_master_put:
- spi_master_put(master);
+out_ctlr_put:
+ spi_controller_put(ctlr);
return ret;
}
static int dspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
/* Disconnect from the SPI framework */
dspi_release_dma(dspi);
clk_disable_unprepare(dspi->clk);
- spi_unregister_master(dspi->master);
+ spi_unregister_controller(dspi->ctlr);
return 0;
}
static struct platform_driver fsl_dspi_driver = {
- .driver.name = DRIVER_NAME,
- .driver.of_match_table = fsl_dspi_dt_ids,
- .driver.owner = THIS_MODULE,
- .driver.pm = &dspi_pm,
- .probe = dspi_probe,
- .remove = dspi_remove,
+ .driver.name = DRIVER_NAME,
+ .driver.of_match_table = fsl_dspi_dt_ids,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &dspi_pm,
+ .probe = dspi_probe,
+ .remove = dspi_remove,
};
module_platform_driver(fsl_dspi_driver);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 3576167283dc..015a1abb6a84 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -91,9 +91,6 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
struct mpc8xxx_spi_probe_info {
struct fsl_spi_platform_data pdata;
- int ngpios;
- int *gpios;
- bool *alow_flags;
__be32 __iomem *immr_spi_cs;
};
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 41a49b93ca60..c02e24c01136 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -206,7 +206,7 @@ static const struct fsl_qspi_devtype_data imx6sx_data = {
};
static const struct fsl_qspi_devtype_data imx7d_data = {
- .rxfifo = SZ_512,
+ .rxfifo = SZ_128,
.txfifo = SZ_512,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
@@ -860,10 +860,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
/* find the irq */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get the irq: %d\n", ret);
+ if (ret < 0)
goto err_disable_clk;
- }
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 1d9b33aa1a3b..4b80ace1d137 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -28,7 +28,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -481,32 +480,6 @@ static int fsl_spi_setup(struct spi_device *spi)
return retval;
}
- if (mpc8xxx_spi->type == TYPE_GRLIB) {
- if (gpio_is_valid(spi->cs_gpio)) {
- int desel;
-
- retval = gpio_request(spi->cs_gpio,
- dev_name(&spi->dev));
- if (retval)
- return retval;
-
- desel = !(spi->mode & SPI_CS_HIGH);
- retval = gpio_direction_output(spi->cs_gpio, desel);
- if (retval) {
- gpio_free(spi->cs_gpio);
- return retval;
- }
- } else if (spi->cs_gpio != -ENOENT) {
- if (spi->cs_gpio < 0)
- return spi->cs_gpio;
- return -EINVAL;
- }
- /* When spi->cs_gpio == -ENOENT, a hole in the phandle list
- * indicates to use native chipselect if present, or allow for
- * an always selected chip
- */
- }
-
/* Initialize chipselect - might be active for SPI_CS_HIGH mode */
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
@@ -515,12 +488,8 @@ static int fsl_spi_setup(struct spi_device *spi)
static void fsl_spi_cleanup(struct spi_device *spi)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
- if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
- gpio_free(spi->cs_gpio);
-
kfree(cs);
spi_set_ctldata(spi, NULL);
}
@@ -586,8 +555,8 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
u32 slvsel;
u16 cs = spi->chip_select;
- if (gpio_is_valid(spi->cs_gpio)) {
- gpio_set_value(spi->cs_gpio, on);
+ if (spi->cs_gpiod) {
+ gpiod_set_value(spi->cs_gpiod, on);
} else if (cs < mpc8xxx_spi->native_chipselects) {
slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
@@ -718,139 +687,19 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = spi->dev.parent->parent;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
- u16 cs = spi->chip_select;
-
- if (cs < pinfo->ngpios) {
- int gpio = pinfo->gpios[cs];
- bool alow = pinfo->alow_flags[cs];
-
- gpio_set_value(gpio, on ^ alow);
+ if (spi->cs_gpiod) {
+ gpiod_set_value(spi->cs_gpiod, on);
} else {
- if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
+ struct device *dev = spi->dev.parent->parent;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+
+ if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
return;
iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
}
}
-static int of_fsl_spi_get_chipselects(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
- bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
- of_property_read_bool(np, "fsl,spisel_boot");
- int ngpios;
- int i = 0;
- int ret;
-
- ngpios = of_gpio_count(np);
- ngpios = max(ngpios, 0);
- if (ngpios == 0 && !spisel_boot) {
- /*
- * SPI w/o chip-select line. One SPI device is still permitted
- * though.
- */
- pdata->max_chipselect = 1;
- return 0;
- }
-
- pinfo->ngpios = ngpios;
- pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
- GFP_KERNEL);
- if (!pinfo->gpios)
- return -ENOMEM;
- memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
-
- pinfo->alow_flags = kcalloc(ngpios, sizeof(*pinfo->alow_flags),
- GFP_KERNEL);
- if (!pinfo->alow_flags) {
- ret = -ENOMEM;
- goto err_alloc_flags;
- }
-
- for (; i < ngpios; i++) {
- int gpio;
- enum of_gpio_flags flags;
-
- gpio = of_get_gpio_flags(np, i, &flags);
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
- ret = gpio;
- goto err_loop;
- }
-
- ret = gpio_request(gpio, dev_name(dev));
- if (ret) {
- dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
- goto err_loop;
- }
-
- pinfo->gpios[i] = gpio;
- pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
-
- ret = gpio_direction_output(pinfo->gpios[i],
- pinfo->alow_flags[i]);
- if (ret) {
- dev_err(dev,
- "can't set output direction for gpio #%d: %d\n",
- i, ret);
- goto err_loop;
- }
- }
-
-#if IS_ENABLED(CONFIG_FSL_SOC)
- if (spisel_boot) {
- pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
- if (!pinfo->immr_spi_cs) {
- ret = -ENOMEM;
- i = ngpios - 1;
- goto err_loop;
- }
- }
-#endif
-
- pdata->max_chipselect = ngpios + spisel_boot;
- pdata->cs_control = fsl_spi_cs_control;
-
- return 0;
-
-err_loop:
- while (i >= 0) {
- if (gpio_is_valid(pinfo->gpios[i]))
- gpio_free(pinfo->gpios[i]);
- i--;
- }
-
- kfree(pinfo->alow_flags);
- pinfo->alow_flags = NULL;
-err_alloc_flags:
- kfree(pinfo->gpios);
- pinfo->gpios = NULL;
- return ret;
-}
-
-static int of_fsl_spi_free_chipselects(struct device *dev)
-{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
- int i;
-
- if (!pinfo->gpios)
- return 0;
-
- for (i = 0; i < pdata->max_chipselect; i++) {
- if (gpio_is_valid(pinfo->gpios[i]))
- gpio_free(pinfo->gpios[i]);
- }
-
- kfree(pinfo->gpios);
- kfree(pinfo->alow_flags);
- return 0;
-}
-
static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -866,9 +715,21 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
type = fsl_spi_get_type(&ofdev->dev);
if (type == TYPE_FSL) {
- ret = of_fsl_spi_get_chipselects(dev);
- if (ret)
- goto err;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+#if IS_ENABLED(CONFIG_FSL_SOC)
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
+
+ if (spisel_boot) {
+ pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
+ if (!pinfo->immr_spi_cs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+#endif
+
+ pdata->cs_control = fsl_spi_cs_control;
}
ret = of_address_to_resource(np, 0, &mem);
@@ -891,8 +752,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
err:
irq_dispose_mapping(irq);
- if (type == TYPE_FSL)
- of_fsl_spi_free_chipselects(dev);
return ret;
}
@@ -902,8 +761,6 @@ static int of_fsl_spi_remove(struct platform_device *ofdev)
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
fsl_spi_cpm_free(mpc8xxx_spi);
- if (mpc8xxx_spi->type == TYPE_FSL)
- of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 5f0b0d5bfef4..6f3d64a1a2b3 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -534,18 +534,14 @@ static int spi_geni_probe(struct platform_device *pdev)
int ret, irq;
struct spi_master *spi;
struct spi_geni_master *mas;
- struct resource *res;
void __iomem *base;
struct clk *clk;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Err getting IRQ %d\n", irq);
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index eca9d52ecf65..1d3e23ec20a6 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -290,10 +290,7 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
return PTR_ERR(spi_gpio->miso);
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
- if (IS_ERR(spi_gpio->sck))
- return PTR_ERR(spi_gpio->sck);
-
- return 0;
+ return PTR_ERR_OR_ZERO(spi_gpio->sck);
}
#ifdef CONFIG_OF
@@ -410,6 +407,12 @@ static int spi_gpio_probe(struct platform_device *pdev)
bb = &spi_gpio->bitbang;
bb->master = master;
+ /*
+ * There is some additional business, apart from driving the CS GPIO
+ * line, that we need to do on selection. This makes the local
+ * callback for chipselect always get called.
+ */
+ master->flags |= SPI_MASTER_GPIO_SS;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 8f01858c0ae6..9dfe8b04e688 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -819,22 +819,16 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
}
rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
- if (rx_irq < 0) {
- dev_err(dev, "failed to get %s\n", LTQ_SPI_RX_IRQ_NAME);
+ if (rx_irq < 0)
return -ENXIO;
- }
tx_irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
- if (tx_irq < 0) {
- dev_err(dev, "failed to get %s\n", LTQ_SPI_TX_IRQ_NAME);
+ if (tx_irq < 0)
return -ENXIO;
- }
err_irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
- if (err_irq < 0) {
- dev_err(dev, "failed to get %s\n", LTQ_SPI_ERR_IRQ_NAME);
+ if (err_irq < 0)
return -ENXIO;
- }
master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
if (!master)
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f18f912c9dea..174dba29b1dd 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -34,7 +34,7 @@
* available (on page 4) here:
* http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
*
- * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
+ * Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
* (heavily) based on spi-butterfly by David Brownell.
*
* The LM70 LLP connects to the PC parallel port in the following manner:
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
index f50779fd329c..2d436541d6c2 100644
--- a/drivers/spi/spi-lp8841-rtc.c
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -185,7 +185,6 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct spi_lp8841_rtc *data;
- void *iomem;
master = spi_alloc_master(&pdev->dev, sizeof(*data));
if (!master)
@@ -207,8 +206,7 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
data = spi_master_get_devdata(master);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
+ data->iomem = devm_platform_ioremap_resource(pdev, 0);
ret = PTR_ERR_OR_ZERO(data->iomem);
if (ret) {
dev_err(&pdev->dev, "failed to get IO address\n");
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7fe4488ace57..f3f10443f9e2 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -503,7 +503,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spicc_device *spicc;
- struct resource *res;
int ret, irq, rate;
master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
@@ -517,8 +516,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
spicc->pdev = pdev;
platform_set_drvdata(pdev, spicc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spicc->base = devm_ioremap_resource(&pdev->dev, res);
+ spicc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spicc->base)) {
dev_err(&pdev->dev, "io resource mapping failed\n");
ret = PTR_ERR(spicc->base);
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index f7fe9b13d122..c7b039980291 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -286,7 +286,6 @@ static int meson_spifc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spifc *spifc;
- struct resource *res;
void __iomem *base;
unsigned int rate;
int ret = 0;
@@ -300,8 +299,7 @@ static int meson_spifc_probe(struct platform_device *pdev)
spifc = spi_master_get_devdata(master);
spifc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(spifc->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_err;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 45d8a7048b6c..6888a4dcff6d 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -17,6 +17,7 @@
#include <linux/platform_data/spi-mt65xx.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
+#include <linux/dma-mapping.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
@@ -28,6 +29,8 @@
#define SPI_STATUS0_REG 0x001c
#define SPI_PAD_SEL_REG 0x0024
#define SPI_CFG2_REG 0x0028
+#define SPI_TX_SRC_REG_64 0x002c
+#define SPI_RX_DST_REG_64 0x0030
#define SPI_CFG0_SCK_HIGH_OFFSET 0
#define SPI_CFG0_SCK_LOW_OFFSET 8
@@ -73,6 +76,10 @@
#define MTK_SPI_MAX_FIFO_SIZE 32U
#define MTK_SPI_PACKET_SIZE 1024
+#define MTK_SPI_32BITS_MASK (0xffffffff)
+
+#define DMA_ADDR_EXT_BITS (36)
+#define DMA_ADDR_DEF_BITS (32)
struct mtk_spi_compatible {
bool need_pad_sel;
@@ -80,6 +87,8 @@ struct mtk_spi_compatible {
bool must_tx;
/* some IC design adjust cfg register to enhance time accuracy */
bool enhance_timing;
+ /* some IC support DMA addr extension */
+ bool dma_ext;
};
struct mtk_spi {
@@ -102,6 +111,13 @@ static const struct mtk_spi_compatible mt2712_compat = {
.must_tx = true,
};
+static const struct mtk_spi_compatible mt6765_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+ .enhance_timing = true,
+ .dma_ext = true,
+};
+
static const struct mtk_spi_compatible mt7622_compat = {
.must_tx = true,
.enhance_timing = true,
@@ -137,6 +153,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,mt6589-spi",
.data = (void *)&mtk_common_compat,
},
+ { .compatible = "mediatek,mt6765-spi",
+ .data = (void *)&mt6765_compat,
+ },
{ .compatible = "mediatek,mt7622-spi",
.data = (void *)&mt7622_compat,
},
@@ -371,10 +390,25 @@ static void mtk_spi_setup_dma_addr(struct spi_master *master,
{
struct mtk_spi *mdata = spi_master_get_devdata(master);
- if (mdata->tx_sgl)
- writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
- if (mdata->rx_sgl)
- writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
+ if (mdata->tx_sgl) {
+ writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_TX_SRC_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(xfer->tx_dma >> 32),
+ mdata->base + SPI_TX_SRC_REG_64);
+#endif
+ }
+
+ if (mdata->rx_sgl) {
+ writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
+ mdata->base + SPI_RX_DST_REG);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (mdata->dev_comp->dma_ext)
+ writel((u32)(xfer->rx_dma >> 32),
+ mdata->base + SPI_RX_DST_REG_64);
+#endif
+ }
}
static int mtk_spi_fifo_transfer(struct spi_master *master,
@@ -586,7 +620,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
struct mtk_spi *mdata;
const struct of_device_id *of_id;
struct resource *res;
- int i, irq, ret;
+ int i, irq, ret, addr_bits;
master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
if (!master) {
@@ -664,7 +698,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
ret = irq;
goto err_put_master;
}
@@ -753,6 +786,15 @@ static int mtk_spi_probe(struct platform_device *pdev)
}
}
+ if (mdata->dev_comp->dma_ext)
+ addr_bits = DMA_ADDR_EXT_BITS;
+ else
+ addr_bits = DMA_ADDR_DEF_BITS;
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
+ if (ret)
+ dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ addr_bits, ret);
+
return 0;
err_disable_runtime_pm:
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index ff85982464d2..2c3b7a2a1ec7 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -327,7 +327,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
- struct resource *r;
int status = 0;
struct clk *clk;
int ret;
@@ -336,8 +335,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
if (!match)
return -EINVAL;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, r);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 7bf53cfc25d6..996c1c8a9c71 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -532,7 +532,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct mxs_spi *spi;
struct mxs_ssp *ssp;
- struct resource *iores;
struct clk *clk;
void __iomem *base;
int devid, clk_freq;
@@ -545,12 +544,11 @@ static int mxs_spi_probe(struct platform_device *pdev)
*/
const int clk_freq_default = 160000000;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
if (irq_err < 0)
return irq_err;
- base = devm_ioremap_resource(&pdev->dev, iores);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
new file mode 100644
index 000000000000..cb52fd8008d0
--- /dev/null
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mfd/syscon.h>
+
+/* NPCM7xx GCR module */
+#define NPCM7XX_INTCR3_OFFSET 0x9C
+#define NPCM7XX_INTCR3_FIU_FIX BIT(6)
+
+/* Flash Interface Unit (FIU) Registers */
+#define NPCM_FIU_DRD_CFG 0x00
+#define NPCM_FIU_DWR_CFG 0x04
+#define NPCM_FIU_UMA_CFG 0x08
+#define NPCM_FIU_UMA_CTS 0x0C
+#define NPCM_FIU_UMA_CMD 0x10
+#define NPCM_FIU_UMA_ADDR 0x14
+#define NPCM_FIU_PRT_CFG 0x18
+#define NPCM_FIU_UMA_DW0 0x20
+#define NPCM_FIU_UMA_DW1 0x24
+#define NPCM_FIU_UMA_DW2 0x28
+#define NPCM_FIU_UMA_DW3 0x2C
+#define NPCM_FIU_UMA_DR0 0x30
+#define NPCM_FIU_UMA_DR1 0x34
+#define NPCM_FIU_UMA_DR2 0x38
+#define NPCM_FIU_UMA_DR3 0x3C
+#define NPCM_FIU_MAX_REG_LIMIT 0x80
+
+/* FIU Direct Read Configuration Register */
+#define NPCM_FIU_DRD_CFG_LCK BIT(31)
+#define NPCM_FIU_DRD_CFG_R_BURST GENMASK(25, 24)
+#define NPCM_FIU_DRD_CFG_ADDSIZ GENMASK(17, 16)
+#define NPCM_FIU_DRD_CFG_DBW GENMASK(13, 12)
+#define NPCM_FIU_DRD_CFG_ACCTYPE GENMASK(9, 8)
+#define NPCM_FIU_DRD_CFG_RDCMD GENMASK(7, 0)
+#define NPCM_FIU_DRD_ADDSIZ_SHIFT 16
+#define NPCM_FIU_DRD_DBW_SHIFT 12
+#define NPCM_FIU_DRD_ACCTYPE_SHIFT 8
+
+/* FIU Direct Write Configuration Register */
+#define NPCM_FIU_DWR_CFG_LCK BIT(31)
+#define NPCM_FIU_DWR_CFG_W_BURST GENMASK(25, 24)
+#define NPCM_FIU_DWR_CFG_ADDSIZ GENMASK(17, 16)
+#define NPCM_FIU_DWR_CFG_ABPCK GENMASK(11, 10)
+#define NPCM_FIU_DWR_CFG_DBPCK GENMASK(9, 8)
+#define NPCM_FIU_DWR_CFG_WRCMD GENMASK(7, 0)
+#define NPCM_FIU_DWR_ADDSIZ_SHIFT 16
+#define NPCM_FIU_DWR_ABPCK_SHIFT 10
+#define NPCM_FIU_DWR_DBPCK_SHIFT 8
+
+/* FIU UMA Configuration Register */
+#define NPCM_FIU_UMA_CFG_LCK BIT(31)
+#define NPCM_FIU_UMA_CFG_CMMLCK BIT(30)
+#define NPCM_FIU_UMA_CFG_RDATSIZ GENMASK(28, 24)
+#define NPCM_FIU_UMA_CFG_DBSIZ GENMASK(23, 21)
+#define NPCM_FIU_UMA_CFG_WDATSIZ GENMASK(20, 16)
+#define NPCM_FIU_UMA_CFG_ADDSIZ GENMASK(13, 11)
+#define NPCM_FIU_UMA_CFG_CMDSIZ BIT(10)
+#define NPCM_FIU_UMA_CFG_RDBPCK GENMASK(9, 8)
+#define NPCM_FIU_UMA_CFG_DBPCK GENMASK(7, 6)
+#define NPCM_FIU_UMA_CFG_WDBPCK GENMASK(5, 4)
+#define NPCM_FIU_UMA_CFG_ADBPCK GENMASK(3, 2)
+#define NPCM_FIU_UMA_CFG_CMBPCK GENMASK(1, 0)
+#define NPCM_FIU_UMA_CFG_ADBPCK_SHIFT 2
+#define NPCM_FIU_UMA_CFG_WDBPCK_SHIFT 4
+#define NPCM_FIU_UMA_CFG_DBPCK_SHIFT 6
+#define NPCM_FIU_UMA_CFG_RDBPCK_SHIFT 8
+#define NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT 11
+#define NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT 16
+#define NPCM_FIU_UMA_CFG_DBSIZ_SHIFT 21
+#define NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT 24
+
+/* FIU UMA Control and Status Register */
+#define NPCM_FIU_UMA_CTS_RDYIE BIT(25)
+#define NPCM_FIU_UMA_CTS_RDYST BIT(24)
+#define NPCM_FIU_UMA_CTS_SW_CS BIT(16)
+#define NPCM_FIU_UMA_CTS_DEV_NUM GENMASK(9, 8)
+#define NPCM_FIU_UMA_CTS_EXEC_DONE BIT(0)
+#define NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT 8
+
+/* FIU UMA Command Register */
+#define NPCM_FIU_UMA_CMD_DUM3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_CMD_DUM2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_CMD_DUM1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_CMD_CMD GENMASK(7, 0)
+
+/* FIU UMA Address Register */
+#define NPCM_FIU_UMA_ADDR_UMA_ADDR GENMASK(31, 0)
+#define NPCM_FIU_UMA_ADDR_AB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_ADDR_AB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_ADDR_AB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_ADDR_AB0 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DW0_WB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW0_WB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW0_WB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW0_WB0 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DW1_WB7 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW1_WB6 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW1_WB5 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW1_WB4 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DW2_WB11 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW2_WB10 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW2_WB9 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW2_WB8 GENMASK(7, 0)
+
+/* FIU UMA Write Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DW3_WB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DW3_WB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DW3_WB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DW3_WB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 0-3 Register */
+#define NPCM_FIU_UMA_DR0_RB3 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR0_RB2 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR0_RB1 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR0_RB0 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 4-7 Register */
+#define NPCM_FIU_UMA_DR1_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR1_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR1_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR1_RB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 8-11 Register */
+#define NPCM_FIU_UMA_DR2_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR2_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR2_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR2_RB12 GENMASK(7, 0)
+
+/* FIU UMA Read Data Bytes 12-15 Register */
+#define NPCM_FIU_UMA_DR3_RB15 GENMASK(31, 24)
+#define NPCM_FIU_UMA_DR3_RB14 GENMASK(23, 16)
+#define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8)
+#define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0)
+
+/* FIU Read Mode */
+enum {
+ DRD_SINGLE_WIRE_MODE = 0,
+ DRD_DUAL_IO_MODE = 1,
+ DRD_QUAD_IO_MODE = 2,
+ DRD_SPI_X_MODE = 3,
+};
+
+enum {
+ DWR_ABPCK_BIT_PER_CLK = 0,
+ DWR_ABPCK_2_BIT_PER_CLK = 1,
+ DWR_ABPCK_4_BIT_PER_CLK = 2,
+};
+
+enum {
+ DWR_DBPCK_BIT_PER_CLK = 0,
+ DWR_DBPCK_2_BIT_PER_CLK = 1,
+ DWR_DBPCK_4_BIT_PER_CLK = 2,
+};
+
+#define NPCM_FIU_DRD_16_BYTE_BURST 0x3000000
+#define NPCM_FIU_DWR_16_BYTE_BURST 0x3000000
+
+#define MAP_SIZE_128MB 0x8000000
+#define MAP_SIZE_16MB 0x1000000
+#define MAP_SIZE_8MB 0x800000
+
+#define NUM_BITS_IN_BYTE 8
+#define FIU_DRD_MAX_DUMMY_NUMBER 3
+#define NPCM_MAX_CHIP_NUM 4
+#define CHUNK_SIZE 16
+#define UMA_MICRO_SEC_TIMEOUT 150
+
+enum {
+ FIU0 = 0,
+ FIU3,
+ FIUX,
+};
+
+struct npcm_fiu_info {
+ char *name;
+ u32 fiu_id;
+ u32 max_map_size;
+ u32 max_cs;
+};
+
+struct fiu_data {
+ const struct npcm_fiu_info *npcm_fiu_data_info;
+ int fiu_max;
+};
+
+static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
+ {.name = "FIU0", .fiu_id = FIU0,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 2},
+ {.name = "FIU3", .fiu_id = FIU3,
+ .max_map_size = MAP_SIZE_128MB, .max_cs = 4},
+ {.name = "FIUX", .fiu_id = FIUX,
+ .max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
+
+static const struct fiu_data npxm7xx_fiu_data = {
+ .npcm_fiu_data_info = npxm7xx_fiu_info,
+ .fiu_max = 3,
+};
+
+struct npcm_fiu_spi;
+
+struct npcm_fiu_chip {
+ void __iomem *flash_region_mapped_ptr;
+ struct npcm_fiu_spi *fiu;
+ unsigned long clkrate;
+ u32 chipselect;
+};
+
+struct npcm_fiu_spi {
+ struct npcm_fiu_chip chip[NPCM_MAX_CHIP_NUM];
+ const struct npcm_fiu_info *info;
+ struct spi_mem_op drd_op;
+ struct resource *res_mem;
+ struct regmap *regmap;
+ unsigned long clkrate;
+ struct device *dev;
+ struct clk *clk;
+ bool spix_mode;
+};
+
+static const struct regmap_config npcm_mtd_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NPCM_FIU_MAX_REG_LIMIT,
+};
+
+static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
+ const struct spi_mem_op *op)
+{
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ACCTYPE,
+ ilog2(op->addr.buswidth) <<
+ NPCM_FIU_DRD_ACCTYPE_SHIFT);
+ fiu->drd_op.addr.buswidth = op->addr.buswidth;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_DBW,
+ ((op->dummy.nbytes * ilog2(op->addr.buswidth))
+ / NUM_BITS_IN_BYTE) << NPCM_FIU_DRD_DBW_SHIFT);
+ fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
+ fiu->drd_op.cmd.opcode = op->cmd.opcode;
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ADDSIZ,
+ (op->addr.nbytes - 3) << NPCM_FIU_DRD_ADDSIZ_SHIFT);
+ fiu->drd_op.addr.nbytes = op->addr.nbytes;
+}
+
+static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
+ offs);
+ u8 *buf_rx = buf;
+ u32 i;
+
+ if (fiu->spix_mode) {
+ for (i = 0 ; i < len ; i++)
+ *(buf_rx + i) = ioread8(src + i);
+ } else {
+ if (desc->info.op_tmpl.addr.buswidth != fiu->drd_op.addr.buswidth ||
+ desc->info.op_tmpl.dummy.nbytes != fiu->drd_op.dummy.nbytes ||
+ desc->info.op_tmpl.cmd.opcode != fiu->drd_op.cmd.opcode ||
+ desc->info.op_tmpl.addr.nbytes != fiu->drd_op.addr.nbytes)
+ npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+
+ memcpy_fromio(buf_rx, src, len);
+ }
+
+ return len;
+}
+
+static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
+ offs);
+ const u8 *buf_tx = buf;
+ u32 i;
+
+ if (fiu->spix_mode)
+ for (i = 0 ; i < len ; i++)
+ iowrite8(*(buf_tx + i), dst + i);
+ else
+ memcpy_toio(dst, buf_tx, len);
+
+ return len;
+}
+
+static int npcm_fiu_uma_read(struct spi_mem *mem,
+ const struct spi_mem_op *op, u32 addr,
+ bool is_address_size, u8 *data, u32 data_size)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u32 uma_cfg = BIT(10);
+ u32 data_reg[4];
+ int ret;
+ u32 val;
+ u32 i;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+ NPCM_FIU_UMA_CMD_CMD, op->cmd.opcode);
+
+ if (is_address_size) {
+ uma_cfg |= ilog2(op->cmd.buswidth);
+ uma_cfg |= ilog2(op->addr.buswidth)
+ << NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+ uma_cfg |= ilog2(op->dummy.buswidth)
+ << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ uma_cfg |= ilog2(op->data.buswidth)
+ << NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
+ uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
+ uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, addr);
+ } else {
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+ }
+
+ uma_cfg |= data_size << NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+ regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_EXEC_DONE,
+ NPCM_FIU_UMA_CTS_EXEC_DONE);
+ ret = regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+ (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+ UMA_MICRO_SEC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+ regmap_read(fiu->regmap, NPCM_FIU_UMA_DR0 + (i * 4),
+ &data_reg[i]);
+ memcpy(data, data_reg, data_size);
+ }
+
+ return 0;
+}
+
+static int npcm_fiu_uma_write(struct spi_mem *mem,
+ const struct spi_mem_op *op, u8 cmd,
+ bool is_address_size, u8 *data, u32 data_size)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u32 uma_cfg = BIT(10);
+ u32 data_reg[4] = {0};
+ u32 val;
+ u32 i;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
+ NPCM_FIU_UMA_CMD_CMD, cmd);
+
+ if (data_size) {
+ memcpy(data_reg, data, data_size);
+ for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_DW0 + (i * 4),
+ data_reg[i]);
+ }
+
+ if (is_address_size) {
+ uma_cfg |= ilog2(op->cmd.buswidth);
+ uma_cfg |= ilog2(op->addr.buswidth) <<
+ NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
+ uma_cfg |= ilog2(op->data.buswidth) <<
+ NPCM_FIU_UMA_CFG_WDBPCK_SHIFT;
+ uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, op->addr.val);
+ } else {
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
+ }
+
+ uma_cfg |= (data_size << NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT);
+ regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
+
+ regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_EXEC_DONE,
+ NPCM_FIU_UMA_CTS_EXEC_DONE);
+
+ return regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
+ (!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
+ UMA_MICRO_SEC_TIMEOUT);
+}
+
+static int npcm_fiu_manualwrite(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ u8 *data = (u8 *)op->data.buf.out;
+ u32 num_data_chunks;
+ u32 remain_data;
+ u32 idx = 0;
+ int ret;
+
+ num_data_chunks = op->data.nbytes / CHUNK_SIZE;
+ remain_data = op->data.nbytes % CHUNK_SIZE;
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_DEV_NUM,
+ (mem->spi->chip_select <<
+ NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_SW_CS, 0);
+
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, true, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Starting the data writing loop in multiples of 8 */
+ for (idx = 0; idx < num_data_chunks; ++idx) {
+ ret = npcm_fiu_uma_write(mem, op, data[0], false,
+ &data[1], CHUNK_SIZE - 1);
+ if (ret)
+ return ret;
+
+ data += CHUNK_SIZE;
+ }
+
+ /* Handling chunk remains */
+ if (remain_data > 0) {
+ ret = npcm_fiu_uma_write(mem, op, data[0], false,
+ &data[1], remain_data - 1);
+ if (ret)
+ return ret;
+ }
+
+ regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
+ NPCM_FIU_UMA_CTS_SW_CS, NPCM_FIU_UMA_CTS_SW_CS);
+
+ return 0;
+}
+
+static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ u8 *data = op->data.buf.in;
+ int i, readlen, currlen;
+ u8 *buf_ptr;
+ u32 addr;
+ int ret;
+
+ i = 0;
+ currlen = op->data.nbytes;
+
+ do {
+ addr = ((u32)op->addr.val + i);
+ if (currlen < 16)
+ readlen = currlen;
+ else
+ readlen = 16;
+
+ buf_ptr = data + i;
+ ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
+ readlen);
+ if (ret)
+ return ret;
+
+ i += readlen;
+ currlen -= 16;
+ } while (currlen > 0);
+
+ return 0;
+}
+
+static void npcm_fiux_set_direct_wr(struct npcm_fiu_spi *fiu)
+{
+ regmap_write(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_16_BYTE_BURST);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_CFG_ABPCK,
+ DWR_ABPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_ABPCK_SHIFT);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
+ NPCM_FIU_DWR_CFG_DBPCK,
+ DWR_DBPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_DBPCK_SHIFT);
+}
+
+static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
+{
+ u32 rx_dummy = 0;
+
+ regmap_write(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_16_BYTE_BURST);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_ACCTYPE,
+ DRD_SPI_X_MODE << NPCM_FIU_DRD_ACCTYPE_SHIFT);
+ regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
+ NPCM_FIU_DRD_CFG_DBW,
+ rx_dummy << NPCM_FIU_DRD_DBW_SHIFT);
+}
+
+static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[mem->spi->chip_select];
+ int ret = 0;
+ u8 *buf;
+
+ dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth, op->addr.val,
+ op->data.nbytes);
+
+ if (fiu->spix_mode || op->addr.nbytes > 4)
+ return -ENOTSUPP;
+
+ if (fiu->clkrate != chip->clkrate) {
+ ret = clk_set_rate(fiu->clk, chip->clkrate);
+ if (ret < 0)
+ dev_warn(fiu->dev, "Failed setting %lu frequency, stay at %lu frequency\n",
+ chip->clkrate, fiu->clkrate);
+ else
+ fiu->clkrate = chip->clkrate;
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (!op->addr.nbytes) {
+ buf = op->data.buf.in;
+ ret = npcm_fiu_uma_read(mem, op, op->addr.val, false,
+ buf, op->data.nbytes);
+ } else {
+ ret = npcm_fiu_read(mem, op);
+ }
+ } else {
+ if (!op->addr.nbytes && !op->data.nbytes)
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ NULL, 0);
+ if (op->addr.nbytes && !op->data.nbytes) {
+ int i;
+ u8 buf_addr[4];
+ u32 addr = op->addr.val;
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--) {
+ buf_addr[i] = addr & 0xff;
+ addr >>= 8;
+ }
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ buf_addr, op->addr.nbytes);
+ }
+ if (!op->addr.nbytes && op->data.nbytes)
+ ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
+ (u8 *)op->data.buf.out,
+ op->data.nbytes);
+ if (op->addr.nbytes && op->data.nbytes)
+ ret = npcm_fiu_manualwrite(mem, op);
+ }
+
+ return ret;
+}
+
+static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct npcm_fiu_spi *fiu =
+ spi_controller_get_devdata(desc->mem->spi->master);
+ struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
+ struct regmap *gcr_regmap;
+
+ if (!fiu->res_mem) {
+ dev_warn(fiu->dev, "Reserved memory not defined, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+
+ if (!fiu->spix_mode &&
+ desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) {
+ desc->nodirmap = true;
+ return 0;
+ }
+
+ if (!chip->flash_region_mapped_ptr) {
+ chip->flash_region_mapped_ptr =
+ devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
+ (fiu->info->max_map_size *
+ desc->mem->spi->chip_select)),
+ (u32)desc->info.length);
+ if (!chip->flash_region_mapped_ptr) {
+ dev_warn(fiu->dev, "Error mapping memory region, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+ }
+
+ if (of_device_is_compatible(fiu->dev->of_node, "nuvoton,npcm750-fiu")) {
+ gcr_regmap =
+ syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(gcr_regmap)) {
+ dev_warn(fiu->dev, "Didn't find nuvoton,npcm750-gcr, direct read disabled\n");
+ desc->nodirmap = true;
+ return 0;
+ }
+ regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_FIU_FIX,
+ NPCM7XX_INTCR3_FIU_FIX);
+ }
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
+ if (!fiu->spix_mode)
+ npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
+ else
+ npcm_fiux_set_direct_rd(fiu);
+
+ } else {
+ npcm_fiux_set_direct_wr(fiu);
+ }
+
+ return 0;
+}
+
+static int npcm_fiu_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
+ struct npcm_fiu_chip *chip;
+
+ chip = &fiu->chip[spi->chip_select];
+ chip->fiu = fiu;
+ chip->chipselect = spi->chip_select;
+ chip->clkrate = spi->max_speed_hz;
+
+ fiu->clkrate = clk_get_rate(fiu->clk);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
+ .exec_op = npcm_fiu_exec_op,
+ .dirmap_create = npcm_fiu_dirmap_create,
+ .dirmap_read = npcm_fiu_direct_read,
+ .dirmap_write = npcm_fiu_direct_write,
+};
+
+static const struct of_device_id npcm_fiu_dt_ids[] = {
+ { .compatible = "nuvoton,npcm750-fiu", .data = &npxm7xx_fiu_data },
+ { /* sentinel */ }
+};
+
+static int npcm_fiu_probe(struct platform_device *pdev)
+{
+ const struct fiu_data *fiu_data_match;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct npcm_fiu_spi *fiu;
+ void __iomem *regbase;
+ struct resource *res;
+ int ret;
+ int id;
+
+ ctrl = spi_alloc_master(dev, sizeof(*fiu));
+ if (!ctrl)
+ return -ENOMEM;
+
+ fiu = spi_controller_get_devdata(ctrl);
+
+ match = of_match_device(npcm_fiu_dt_ids, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "No compatible OF match\n");
+ return -ENODEV;
+ }
+
+ fiu_data_match = match->data;
+ id = of_alias_get_id(dev->of_node, "fiu");
+ if (id < 0 || id >= fiu_data_match->fiu_max) {
+ dev_err(dev, "Invalid platform device id: %d\n", id);
+ return -EINVAL;
+ }
+
+ fiu->info = &fiu_data_match->npcm_fiu_data_info[id];
+
+ platform_set_drvdata(pdev, fiu);
+ fiu->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ fiu->regmap = devm_regmap_init_mmio(dev, regbase,
+ &npcm_mtd_regmap_config);
+ if (IS_ERR(fiu->regmap)) {
+ dev_err(dev, "Failed to create regmap\n");
+ return PTR_ERR(fiu->regmap);
+ }
+
+ fiu->res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "memory");
+ fiu->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(fiu->clk))
+ return PTR_ERR(fiu->clk);
+
+ fiu->spix_mode = of_property_read_bool(dev->of_node,
+ "nuvoton,spix-mode");
+
+ platform_set_drvdata(pdev, fiu);
+ clk_prepare_enable(fiu->clk);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
+ | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->setup = npcm_fiu_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &npcm_fiu_mem_ops;
+ ctrl->num_chipselect = fiu->info->max_cs;
+ ctrl->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_master(dev, ctrl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int npcm_fiu_remove(struct platform_device *pdev)
+{
+ struct npcm_fiu_spi *fiu = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(fiu->clk);
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, npcm_fiu_dt_ids);
+
+static struct platform_driver npcm_fiu_driver = {
+ .driver = {
+ .name = "NPCM-FIU",
+ .bus = &platform_bus_type,
+ .of_match_table = npcm_fiu_dt_ids,
+ },
+ .probe = npcm_fiu_probe,
+ .remove = npcm_fiu_remove,
+};
+module_platform_driver(npcm_fiu_driver);
+
+MODULE_DESCRIPTION("Nuvoton FLASH Interface Unit SPI Controller Driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 734a2b956959..b191d57d1dc0 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -341,7 +341,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
{
struct npcm_pspi *priv;
struct spi_master *master;
- struct resource *res;
unsigned long clk_hz;
struct device_node *np = pdev->dev.of_node;
int num_cs, i;
@@ -368,8 +367,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
priv->is_save_param = false;
priv->id = pdev->id;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
@@ -388,7 +386,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
ret = irq;
goto out_disable_clk;
}
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
deleted file mode 100644
index 37e2034ad4d5..000000000000
--- a/drivers/spi/spi-nuc900.c
+++ /dev/null
@@ -1,429 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009 Nuvoton technology.
- * Wan ZongShun <mcuos.com@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-
-#include <linux/platform_data/spi-nuc900.h>
-
-/* usi registers offset */
-#define USI_CNT 0x00
-#define USI_DIV 0x04
-#define USI_SSR 0x08
-#define USI_RX0 0x10
-#define USI_TX0 0x10
-
-/* usi register bit */
-#define ENINT (0x01 << 17)
-#define ENFLG (0x01 << 16)
-#define SLEEP (0x0f << 12)
-#define TXNUM (0x03 << 8)
-#define TXBITLEN (0x1f << 3)
-#define TXNEG (0x01 << 2)
-#define RXNEG (0x01 << 1)
-#define LSB (0x01 << 10)
-#define SELECTLEV (0x01 << 2)
-#define SELECTPOL (0x01 << 31)
-#define SELECTSLAVE 0x01
-#define GOBUSY 0x01
-
-struct nuc900_spi {
- struct spi_bitbang bitbang;
- struct completion done;
- void __iomem *regs;
- int irq;
- int len;
- int count;
- const unsigned char *tx;
- unsigned char *rx;
- struct clk *clk;
- struct spi_master *master;
- struct nuc900_spi_info *pdata;
- spinlock_t lock;
-};
-
-static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
-{
- return spi_master_get_devdata(sdev->master);
-}
-
-static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr)
-{
- struct nuc900_spi *hw = to_hw(spi);
- unsigned int val;
- unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0;
- unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_SSR);
-
- if (!cs)
- val &= ~SELECTLEV;
- else
- val |= SELECTLEV;
-
- if (!ssr)
- val &= ~SELECTSLAVE;
- else
- val |= SELECTSLAVE;
-
- __raw_writel(val, hw->regs + USI_SSR);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- if (!cpol)
- val &= ~SELECTPOL;
- else
- val |= SELECTPOL;
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_spi_chipsel(struct spi_device *spi, int value)
-{
- switch (value) {
- case BITBANG_CS_INACTIVE:
- nuc900_slave_select(spi, 0);
- break;
-
- case BITBANG_CS_ACTIVE:
- nuc900_slave_select(spi, 1);
- break;
- }
-}
-
-static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
-
- if (txnum)
- val |= txnum << 0x08;
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-
-}
-
-static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
- unsigned int txbitlen)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
-
- val |= (txbitlen << 0x03);
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_spi_gobusy(struct nuc900_spi *hw)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- val |= GOBUSY;
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
-{
- return hw->tx ? hw->tx[count] : 0;
-}
-
-static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
-{
- struct nuc900_spi *hw = to_hw(spi);
-
- hw->tx = t->tx_buf;
- hw->rx = t->rx_buf;
- hw->len = t->len;
- hw->count = 0;
-
- __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0);
-
- nuc900_spi_gobusy(hw);
-
- wait_for_completion(&hw->done);
-
- return hw->count;
-}
-
-static irqreturn_t nuc900_spi_irq(int irq, void *dev)
-{
- struct nuc900_spi *hw = dev;
- unsigned int status;
- unsigned int count = hw->count;
-
- status = __raw_readl(hw->regs + USI_CNT);
- __raw_writel(status, hw->regs + USI_CNT);
-
- if (status & ENFLG) {
- hw->count++;
-
- if (hw->rx)
- hw->rx[count] = __raw_readl(hw->regs + USI_RX0);
- count++;
-
- if (count < hw->len) {
- __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0);
- nuc900_spi_gobusy(hw);
- } else {
- complete(&hw->done);
- }
-
- return IRQ_HANDLED;
- }
-
- complete(&hw->done);
- return IRQ_HANDLED;
-}
-
-static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- if (edge)
- val |= TXNEG;
- else
- val &= ~TXNEG;
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- if (edge)
- val |= RXNEG;
- else
- val &= ~RXNEG;
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- if (lsb)
- val |= LSB;
- else
- val &= ~LSB;
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
-
- if (sleep)
- val |= (sleep << 12);
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_enable_int(struct nuc900_spi *hw)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&hw->lock, flags);
-
- val = __raw_readl(hw->regs + USI_CNT);
-
- val |= ENINT;
-
- __raw_writel(val, hw->regs + USI_CNT);
-
- spin_unlock_irqrestore(&hw->lock, flags);
-}
-
-static void nuc900_set_divider(struct nuc900_spi *hw)
-{
- __raw_writel(hw->pdata->divider, hw->regs + USI_DIV);
-}
-
-static void nuc900_init_spi(struct nuc900_spi *hw)
-{
- clk_enable(hw->clk);
- spin_lock_init(&hw->lock);
-
- nuc900_tx_edge(hw, hw->pdata->txneg);
- nuc900_rx_edge(hw, hw->pdata->rxneg);
- nuc900_send_first(hw, hw->pdata->lsb);
- nuc900_set_sleep(hw, hw->pdata->sleep);
- nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen);
- nuc900_spi_setup_txnum(hw, hw->pdata->txnum);
- nuc900_set_divider(hw);
- nuc900_enable_int(hw);
-}
-
-static int nuc900_spi_probe(struct platform_device *pdev)
-{
- struct nuc900_spi *hw;
- struct spi_master *master;
- struct resource *res;
- int err = 0;
-
- master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
- if (master == NULL) {
- dev_err(&pdev->dev, "No memory for spi_master\n");
- return -ENOMEM;
- }
-
- hw = spi_master_get_devdata(master);
- hw->master = master;
- hw->pdata = dev_get_platdata(&pdev->dev);
-
- if (hw->pdata == NULL) {
- dev_err(&pdev->dev, "No platform data supplied\n");
- err = -ENOENT;
- goto err_pdata;
- }
-
- platform_set_drvdata(pdev, hw);
- init_completion(&hw->done);
-
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- if (hw->pdata->lsb)
- master->mode_bits |= SPI_LSB_FIRST;
- master->num_chipselect = hw->pdata->num_cs;
- master->bus_num = hw->pdata->bus_num;
- hw->bitbang.master = hw->master;
- hw->bitbang.chipselect = nuc900_spi_chipsel;
- hw->bitbang.txrx_bufs = nuc900_spi_txrx;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hw->regs)) {
- err = PTR_ERR(hw->regs);
- goto err_pdata;
- }
-
- hw->irq = platform_get_irq(pdev, 0);
- if (hw->irq < 0) {
- dev_err(&pdev->dev, "No IRQ specified\n");
- err = -ENOENT;
- goto err_pdata;
- }
-
- err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0,
- pdev->name, hw);
- if (err) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_pdata;
- }
-
- hw->clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(hw->clk)) {
- dev_err(&pdev->dev, "No clock for device\n");
- err = PTR_ERR(hw->clk);
- goto err_pdata;
- }
-
- mfp_set_groupg(&pdev->dev, NULL);
- nuc900_init_spi(hw);
-
- err = spi_bitbang_start(&hw->bitbang);
- if (err) {
- dev_err(&pdev->dev, "Failed to register SPI master\n");
- goto err_register;
- }
-
- return 0;
-
-err_register:
- clk_disable(hw->clk);
-err_pdata:
- spi_master_put(hw->master);
- return err;
-}
-
-static int nuc900_spi_remove(struct platform_device *dev)
-{
- struct nuc900_spi *hw = platform_get_drvdata(dev);
-
- spi_bitbang_stop(&hw->bitbang);
- clk_disable(hw->clk);
- spi_master_put(hw->master);
- return 0;
-}
-
-static struct platform_driver nuc900_spi_driver = {
- .probe = nuc900_spi_probe,
- .remove = nuc900_spi_remove,
- .driver = {
- .name = "nuc900-spi",
- },
-};
-module_platform_driver(nuc900_spi_driver);
-
-MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("nuc900 spi driver!");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:nuc900-spi");
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 8894f98cc99c..501b923f2c27 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1007,10 +1007,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the irq */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get the irq: %d\n", ret);
+ if (ret < 0)
goto err_disable_clk;
- }
ret = devm_request_irq(dev, ret,
nxp_fspi_irq_handler, 0, pdev->name, f);
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index bbc4ba66571f..e2331eb7b47a 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -240,7 +240,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
- struct resource *res;
unsigned int i;
int err = -ENODEV;
@@ -264,8 +263,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->base = devm_ioremap_resource(&pdev->dev, res);
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base)) {
err = PTR_ERR(hw->base);
goto exit;
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index b635526ad414..86ad17597f5f 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -570,7 +570,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct pic32_sqi *sqi;
- struct resource *reg;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
@@ -580,8 +579,7 @@ static int pic32_sqi_probe(struct platform_device *pdev)
sqi = spi_master_get_devdata(master);
sqi->master = master;
- reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+ sqi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sqi->regs)) {
ret = PTR_ERR(sqi->regs);
goto err_free_master;
@@ -590,7 +588,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
/* irq */
sqi->irq = platform_get_irq(pdev, 0);
if (sqi->irq < 0) {
- dev_err(&pdev->dev, "no irq found\n");
ret = sqi->irq;
goto err_free_master;
}
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 10cebeaa1e6b..69f517ec59c6 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -711,22 +711,16 @@ static int pic32_spi_hw_probe(struct platform_device *pdev,
/* get irq resources: err-irq, rx-irq, tx-irq */
pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
- if (pic32s->fault_irq < 0) {
- dev_err(&pdev->dev, "fault-irq not found\n");
+ if (pic32s->fault_irq < 0)
return pic32s->fault_irq;
- }
pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
- if (pic32s->rx_irq < 0) {
- dev_err(&pdev->dev, "rx-irq not found\n");
+ if (pic32s->rx_irq < 0)
return pic32s->rx_irq;
- }
pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
- if (pic32s->tx_irq < 0) {
- dev_err(&pdev->dev, "tx-irq not found\n");
+ if (pic32s->tx_irq < 0)
return pic32s->tx_irq;
- }
/* get clock */
pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fc7ab4b26880..bb6a14d1ab0f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1457,6 +1457,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
+ /* TGL-LP */
+ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
{ },
};
@@ -1831,14 +1839,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
status = devm_spi_register_controller(&pdev->dev, controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi controller\n");
- goto out_error_clock_enabled;
+ goto out_error_pm_runtime_enabled;
}
return status;
-out_error_clock_enabled:
+out_error_pm_runtime_enabled:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+out_error_clock_enabled:
clk_disable_unprepare(ssp->clk);
out_error_dma_irq_alloc:
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index e0f061139c8f..250fd60e1678 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -424,7 +424,6 @@ static int qcom_qspi_probe(struct platform_device *pdev)
{
int ret;
struct device *dev;
- struct resource *res;
struct spi_master *master;
struct qcom_qspi *ctrl;
@@ -440,8 +439,7 @@ static int qcom_qspi_probe(struct platform_device *pdev)
spin_lock_init(&ctrl->lock);
ctrl->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctrl->base = devm_ioremap_resource(dev, res);
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctrl->base)) {
ret = PTR_ERR(ctrl->base);
goto exit_probe_master_put;
@@ -454,10 +452,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
goto exit_probe_master_put;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to get irq %d\n", ret);
+ if (ret < 0)
goto exit_probe_master_put;
- }
ret = devm_request_irq(dev, ret, qcom_qspi_irq,
IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
if (ret) {
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index 51f03d977ad6..4c9620e0d18c 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -135,12 +135,10 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct clk *ahb_clk;
struct rb4xx_spi *rbspi;
- struct resource *r;
int err;
void __iomem *spi_base;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi_base = devm_ioremap_resource(&pdev->dev, r);
+ spi_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_base))
return PTR_ERR(spi_base);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 48d8dff05a3a..2d6e37f25e2d 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -487,7 +487,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
struct s3c2410_spi_info *pdata;
struct s3c24xx_spi *hw;
struct spi_master *master;
- struct resource *res;
int err = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
@@ -536,8 +535,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
/* find and map our resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ hw->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->regs)) {
err = PTR_ERR(hw->regs);
goto err_no_pdata;
@@ -545,7 +543,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq < 0) {
- dev_err(&pdev->dev, "No IRQ specified\n");
err = -ENOENT;
goto err_no_pdata;
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index b50bdbc27e58..8f134735291f 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1297,7 +1297,6 @@ static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
- struct resource *r;
struct spi_controller *ctlr;
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
@@ -1346,13 +1345,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
i = platform_get_irq(pdev, 0);
if (i < 0) {
- dev_err(&pdev->dev, "cannot get IRQ\n");
ret = i;
goto err1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+ p->mapbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->mapbase)) {
ret = PTR_ERR(p->mapbase);
goto err1;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index f1ee58208216..20bdae5fdf3b 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -437,10 +437,8 @@ static int spi_sh_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq error: %d\n", irq);
+ if (irq < 0)
return irq;
- }
master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
if (master == NULL) {
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
index 93ec2c6cdbfd..35254bdc42c4 100644
--- a/drivers/spi/spi-sifive.c
+++ b/drivers/spi/spi-sifive.c
@@ -292,7 +292,6 @@ sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
static int sifive_spi_probe(struct platform_device *pdev)
{
struct sifive_spi *spi;
- struct resource *res;
int ret, irq, num_cs;
u32 cs_bits, max_bits_per_word;
struct spi_master *master;
@@ -307,8 +306,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
init_completion(&spi->done);
platform_set_drvdata(pdev, master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi->regs = devm_ioremap_resource(&pdev->dev, res);
+ spi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regs)) {
ret = PTR_ERR(spi->regs);
goto put_master;
@@ -323,7 +321,6 @@ static int sifive_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "Unable to find interrupt\n");
ret = irq;
goto put_master;
}
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 71b882ab31b9..e1e639191557 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1070,7 +1070,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_spi *sspi;
struct spi_master *master;
- struct resource *mem_res;
const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
@@ -1097,8 +1096,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
sspi->fifo_size = spi_comp_data->fifo_size;
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ sspi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base)) {
ret = PTR_ERR(sspi->base);
goto free_master;
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index d1075433f6a6..61bc43b0fe57 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -410,7 +410,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
ret = irq;
goto err_put_ctlr;
}
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index df5960bddfe6..9a051286f120 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -86,6 +86,7 @@
#define BIT_WDG_EN BIT(2)
/* Definition of PMIC reset status register */
+#define HWRST_STATUS_SECURITY 0x02
#define HWRST_STATUS_RECOVERY 0x20
#define HWRST_STATUS_NORMAL 0x40
#define HWRST_STATUS_ALARM 0x50
@@ -97,6 +98,8 @@
#define HWRST_STATUS_AUTODLOADER 0xa0
#define HWRST_STATUS_IQMODE 0xb0
#define HWRST_STATUS_SPRDISK 0xc0
+#define HWRST_STATUS_FACTORYTEST 0xe0
+#define HWRST_STATUS_WATCHDOG 0xf0
/* Use default timeout 50 ms that converts to watchdog values */
#define WDG_LOAD_VAL ((50 * 1000) / 32768)
@@ -162,14 +165,16 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
int read_timeout = ADI_READ_TIMEOUT;
unsigned long flags;
u32 val, rd_addr;
- int ret;
-
- ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
- ADI_HWSPINLOCK_TIMEOUT,
- &flags);
- if (ret) {
- dev_err(sadi->dev, "get the hw lock failed\n");
- return ret;
+ int ret = 0;
+
+ if (sadi->hwlock) {
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
}
/*
@@ -216,7 +221,8 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
*read_val = val & RD_VALUE_MASK;
out:
- hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (sadi->hwlock)
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
return ret;
}
@@ -227,12 +233,14 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
unsigned long flags;
int ret;
- ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
- ADI_HWSPINLOCK_TIMEOUT,
- &flags);
- if (ret) {
- dev_err(sadi->dev, "get the hw lock failed\n");
- return ret;
+ if (sadi->hwlock) {
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
}
ret = sprd_adi_drain_fifo(sadi);
@@ -258,7 +266,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
}
out:
- hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (sadi->hwlock)
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
return ret;
}
@@ -307,6 +316,18 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
return 0;
}
+static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
+{
+#ifdef CONFIG_SPRD_WATCHDOG
+ u32 val;
+
+ /* Set default watchdog reboot mode */
+ sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+ val |= HWRST_STATUS_WATCHDOG;
+ sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+#endif
+}
+
static int sprd_adi_restart_handler(struct notifier_block *this,
unsigned long mode, void *cmd)
{
@@ -336,11 +357,16 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
reboot_mode = HWRST_STATUS_IQMODE;
else if (!strncmp(cmd, "sprdisk", 7))
reboot_mode = HWRST_STATUS_SPRDISK;
+ else if (!strncmp(cmd, "tospanic", 8))
+ reboot_mode = HWRST_STATUS_SECURITY;
+ else if (!strncmp(cmd, "factorytest", 11))
+ reboot_mode = HWRST_STATUS_FACTORYTEST;
else
reboot_mode = HWRST_STATUS_NORMAL;
/* Record the reboot mode */
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+ val &= ~HWRST_STATUS_WATCHDOG;
val |= reboot_mode;
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
@@ -380,9 +406,6 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
const __be32 *list;
u32 tmp;
- /* Address bits select default 12 bits */
- writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
-
/* Set all channels as default priority */
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
@@ -459,19 +482,30 @@ static int sprd_adi_probe(struct platform_device *pdev)
sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
sadi->ctlr = ctlr;
sadi->dev = &pdev->dev;
- ret = of_hwspin_lock_get_id_byname(np, "adi");
- if (ret < 0) {
- dev_err(&pdev->dev, "can not get the hardware spinlock\n");
- goto put_ctlr;
- }
-
- sadi->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
- if (!sadi->hwlock) {
- ret = -ENXIO;
- goto put_ctlr;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ sadi->hwlock =
+ devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+ } else {
+ switch (ret) {
+ case -ENOENT:
+ dev_info(&pdev->dev, "no hardware spinlock supplied\n");
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "failed to find hwlock id, %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto put_ctlr;
+ }
}
sprd_adi_hw_init(sadi);
+ sprd_adi_set_wdt_rst_mode(sadi);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 1b7eebb72c07..8c9021b7f7a9 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -843,10 +843,8 @@ static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
int ret;
ss->irq = platform_get_irq(pdev, 0);
- if (ss->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
+ if (ss->irq < 0)
return ss->irq;
- }
ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
0, pdev->name, ss);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 840a6bf81336..0c24c494f386 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -298,7 +298,6 @@ static int spi_st_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_master *master;
- struct resource *res;
struct spi_st *spi_st;
int irq, ret = 0;
u32 var;
@@ -331,8 +330,7 @@ static int spi_st_probe(struct platform_device *pdev)
init_completion(&spi_st->done);
/* Get resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+ spi_st->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi_st->base)) {
ret = PTR_ERR(spi_st->base);
goto clk_disable;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 655e4afbfb2a..9ac6f9fe13cf 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -570,11 +570,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "IRQ error missing or invalid\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 5194bc07fd60..cbfac6596fad 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -428,7 +428,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct sun4i_spi *sspi;
- struct resource *res;
int ret = 0, irq;
master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
@@ -440,8 +439,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
goto err_free_master;
@@ -449,7 +447,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "No spi IRQ specified\n");
ret = -ENXIO;
goto err_free_master;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index ee2bdaf5b856..ec7967be9e2f 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -435,7 +435,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct sun6i_spi *sspi;
- struct resource *res;
int ret = 0, irq;
master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
@@ -447,8 +446,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
goto err_free_master;
@@ -456,7 +454,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "No spi IRQ specified\n");
ret = -ENXIO;
goto err_free_master;
}
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index f99abd85c50a..ae17c99cce03 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -670,7 +670,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
rx_irq = platform_get_irq(pdev, 0);
if (rx_irq <= 0) {
- dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq);
ret = rx_irq;
goto put_spi;
}
@@ -685,7 +684,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
tx_irq = platform_get_irq(pdev, 1);
if (tx_irq <= 0) {
- dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq);
ret = tx_irq;
goto put_spi;
}
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index cd714a4f52c6..a841a7250d14 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -419,7 +419,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct tegra_sflash_data *tsd;
- struct resource *r;
int ret;
const struct of_device_id *match;
@@ -451,8 +450,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
&master->max_speed_hz))
master->max_speed_hz = 25000000; /* 25MHz */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tsd->base = devm_ioremap_resource(&pdev->dev, r);
+ tsd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tsd->base)) {
ret = PTR_ERR(tsd->base);
goto exit_free_master;
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6ca600702470..3cb65371ae3b 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -717,7 +717,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no irq resource?\n");
ret = irq;
goto free_master;
}
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index b32c77df5d49..47cde1864630 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -16,6 +17,7 @@
#include <asm/unaligned.h>
#define SSI_TIMEOUT_MS 2000
+#define SSI_POLL_TIMEOUT_US 200
#define SSI_MAX_CLK_DIVIDER 254
#define SSI_MIN_CLK_DIVIDER 4
@@ -214,6 +216,7 @@ static void uniphier_spi_setup_transfer(struct spi_device *spi,
if (!priv->is_save_param || priv->mode != spi->mode) {
uniphier_spi_set_mode(spi);
priv->mode = spi->mode;
+ priv->is_save_param = false;
}
if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
@@ -226,8 +229,7 @@ static void uniphier_spi_setup_transfer(struct spi_device *spi,
priv->speed_hz = t->speed_hz;
}
- if (!priv->is_save_param)
- priv->is_save_param = true;
+ priv->is_save_param = true;
/* reset FIFOs */
val = SSI_FC_TXFFL | SSI_FC_RXFFL;
@@ -290,21 +292,23 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
{
- unsigned int tx_count;
+ unsigned int fifo_threshold, fill_bytes;
u32 val;
- tx_count = DIV_ROUND_UP(priv->tx_bytes,
+ fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
bytes_per_word(priv->bits_per_word));
- tx_count = min(tx_count, SSI_FIFO_DEPTH);
+ fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
+
+ fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
/* set fifo threshold */
val = readl(priv->base + SSI_FC);
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
- val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
- val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
writel(val, priv->base + SSI_FC);
- while (tx_count--)
+ while (fill_bytes--)
uniphier_spi_send(priv);
}
@@ -323,20 +327,14 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
-static int uniphier_spi_transfer_one(struct spi_master *master,
- struct spi_device *spi,
- struct spi_transfer *t)
+static int uniphier_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
struct device *dev = master->dev.parent;
unsigned long time_left;
- /* Terminate and return success for 0 byte length transfer */
- if (!t->len)
- return 0;
-
- uniphier_spi_setup_transfer(spi, t);
-
reinit_completion(&priv->xfer_done);
uniphier_spi_fill_tx_fifo(priv);
@@ -356,6 +354,59 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
return priv->error;
}
+static int uniphier_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ int loop = SSI_POLL_TIMEOUT_US * 10;
+
+ while (priv->tx_bytes) {
+ uniphier_spi_fill_tx_fifo(priv);
+
+ while ((priv->rx_bytes - priv->tx_bytes) > 0) {
+ while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
+ && loop--)
+ ndelay(100);
+
+ if (loop == -1)
+ goto irq_transfer;
+
+ uniphier_spi_recv(priv);
+ }
+ }
+
+ return 0;
+
+irq_transfer:
+ return uniphier_spi_transfer_one_irq(master, spi, t);
+}
+
+static int uniphier_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ unsigned long threshold;
+
+ /* Terminate and return success for 0 byte length transfer */
+ if (!t->len)
+ return 0;
+
+ uniphier_spi_setup_transfer(spi, t);
+
+ /*
+ * If the transfer operation will take longer than
+ * SSI_POLL_TIMEOUT_US, it should use irq.
+ */
+ threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
+ USEC_PER_SEC * BITS_PER_BYTE);
+ if (t->len > threshold)
+ return uniphier_spi_transfer_one_irq(master, spi, t);
+ else
+ return uniphier_spi_transfer_one_poll(master, spi, t);
+}
+
static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
{
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
@@ -419,7 +470,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
struct spi_master *master;
- struct resource *res;
unsigned long clk_rate;
int irq;
int ret;
@@ -434,8 +484,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
priv->master = master;
priv->is_save_param = false;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out_master_put;
@@ -454,7 +503,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
ret = irq;
goto out_disable_clk;
}
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 1dc479fab98c..797ac0ea8fa3 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -370,7 +370,6 @@ static int xlp_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct xlp_spi_priv *xspi;
- struct resource *res;
struct clk *clk;
int irq, err;
@@ -378,16 +377,13 @@ static int xlp_spi_probe(struct platform_device *pdev)
if (!xspi)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xspi->base = devm_ioremap_resource(&pdev->dev, res);
+ xspi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xspi->base))
return PTR_ERR(xspi->base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
+ if (irq < 0)
return irq;
- }
err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
pdev->name, xspi);
if (err) {
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index c6bee67decb5..5cf6993ddce5 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -620,7 +620,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct zynq_qspi *xqspi;
- struct resource *res;
u32 num_cs;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
@@ -630,8 +629,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi = spi_controller_get_devdata(ctlr);
xqspi->dev = dev;
platform_set_drvdata(pdev, xqspi);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
@@ -671,7 +669,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
- dev_err(&pdev->dev, "irq resource not found\n");
goto remove_master;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
@@ -695,7 +692,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->setup = zynq_qspi_setup_op;
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
ctlr->dev.of_node = np;
- ret = spi_register_controller(ctlr);
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");
goto clk_dis_all;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 07a83ca164c2..60c4de4e4485 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1016,7 +1016,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
int ret = 0;
struct spi_master *master;
struct zynqmp_qspi *xqspi;
- struct resource *res;
struct device *dev = &pdev->dev;
eemi_ops = zynqmp_pm_get_eemi_ops();
@@ -1031,8 +1030,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, master);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
@@ -1077,7 +1075,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
- dev_err(dev, "irq resource not found\n");
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 75ac046cae52..f9502dbbb5c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1265,8 +1265,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
- unsigned long flags;
+ struct spi_message *msg;
bool was_busy = false;
+ unsigned long flags;
int ret;
/* Lock queue */
@@ -1325,10 +1326,10 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
}
/* Extract head of queue */
- ctlr->cur_msg =
- list_first_entry(&ctlr->queue, struct spi_message, queue);
+ msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
+ ctlr->cur_msg = msg;
- list_del_init(&ctlr->cur_msg->queue);
+ list_del_init(&msg->queue);
if (ctlr->busy)
was_busy = true;
else
@@ -1361,7 +1362,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
- ctlr->cur_msg->status = ret;
+ msg->status = ret;
spi_finalize_current_message(ctlr);
mutex_unlock(&ctlr->io_mutex);
@@ -1369,28 +1370,28 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
}
}
- trace_spi_message_start(ctlr->cur_msg);
+ trace_spi_message_start(msg);
if (ctlr->prepare_message) {
- ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
+ ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
ret);
- ctlr->cur_msg->status = ret;
+ msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ctlr->cur_msg_prepared = true;
}
- ret = spi_map_msg(ctlr, ctlr->cur_msg);
+ ret = spi_map_msg(ctlr, msg);
if (ret) {
- ctlr->cur_msg->status = ret;
+ msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
- ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
+ ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
@@ -1434,7 +1435,7 @@ static void spi_pump_messages(struct kthread_work *work)
*/
static void spi_set_thread_rt(struct spi_controller *ctlr)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
@@ -2105,8 +2106,8 @@ static int match_true(struct device *dev, void *data)
return 1;
}
-static ssize_t spi_slave_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
@@ -2117,9 +2118,8 @@ static ssize_t spi_slave_show(struct device *dev,
child ? to_spi_device(child)->modalias : NULL);
}
-static ssize_t spi_slave_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
@@ -2157,7 +2157,7 @@ static ssize_t spi_slave_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+static DEVICE_ATTR_RW(slave);
static struct attribute *spi_slave_attrs[] = {
&dev_attr_slave.attr,
@@ -2188,8 +2188,10 @@ extern struct class spi_slave_class; /* dummy */
* __spi_alloc_controller - allocate an SPI master or slave controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
- * memory is in the driver_data field of the returned device,
- * accessible with spi_controller_get_devdata().
+ * memory is in the driver_data field of the returned device, accessible
+ * with spi_controller_get_devdata(); the memory is cacheline aligned;
+ * drivers granting DMA access to portions of their private data need to
+ * round up @size using ALIGN(size, dma_get_cache_alignment()).
* @slave: flag indicating whether to allocate an SPI master (false) or SPI
* slave (true) controller
* Context: can sleep
@@ -2211,11 +2213,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
unsigned int size, bool slave)
{
struct spi_controller *ctlr;
+ size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
if (!dev)
return NULL;
- ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+ ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
if (!ctlr)
return NULL;
@@ -2229,14 +2232,14 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
ctlr->dev.class = &spi_master_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
- spi_controller_set_devdata(ctlr, &ctlr[1]);
+ spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
return ctlr;
}
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
#ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
{
int nb, i, *cs;
struct device_node *np = ctlr->dev.of_node;
@@ -2269,7 +2272,7 @@ static int of_spi_register_master(struct spi_controller *ctlr)
return 0;
}
#else
-static int of_spi_register_master(struct spi_controller *ctlr)
+static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
{
return 0;
}
@@ -2456,7 +2459,7 @@ int spi_register_controller(struct spi_controller *ctlr)
ctlr->mode_bits |= SPI_CS_HIGH;
} else {
/* Legacy code path for GPIOs from DT */
- status = of_spi_register_master(ctlr);
+ status = of_spi_get_gpio_numbers(ctlr);
if (status)
return status;
}
@@ -3652,37 +3655,25 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_OF)
-static int __spi_of_device_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/* must call put_device() when done with returned spi_device device */
struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
- struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
- __spi_of_device_match);
+ struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
+
return dev ? to_spi_device(dev) : NULL;
}
EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
#endif /* IS_ENABLED(CONFIG_OF) */
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
-static int __spi_of_controller_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
/* the spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_master_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, node,
- __spi_of_controller_match);
+ dev = class_find_device_by_of_node(&spi_slave_class, node);
if (!dev)
return NULL;
@@ -3753,11 +3744,6 @@ static int spi_acpi_controller_match(struct device *dev, const void *data)
return ACPI_COMPANION(dev->parent) == data;
}
-static int spi_acpi_device_match(struct device *dev, const void *data)
-{
- return ACPI_COMPANION(dev) == data;
-}
-
static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
{
struct device *dev;
@@ -3777,8 +3763,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
-
+ dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
return dev ? to_spi_device(dev) : NULL;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c96a01eef6c..b9c7f0dc653b 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,12 +112,15 @@ source "drivers/staging/gasket/Kconfig"
source "drivers/staging/axis-fifo/Kconfig"
-source "drivers/staging/erofs/Kconfig"
-
source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/isdn/Kconfig"
+source "drivers/staging/wusbcore/Kconfig"
+source "drivers/staging/uwb/Kconfig"
+
+source "drivers/staging/exfat/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fcaac9693b83..4b5962006c4b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -46,7 +46,9 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
-obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_ISDN_CAPI) += isdn/
+obj-$(CONFIG_UWB) += uwb/
+obj-$(CONFIG_USB_WUSB) += wusbcore/
+obj-$(CONFIG_EXFAT_FS) += exfat/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index fbf015cc6d62..767dd98fd92d 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -6,8 +6,6 @@ TODO:
ion/
- - Add dt-bindings for remaining heaps (chunk and carveout heaps). This would
- involve putting appropriate bindings in a memory node for Ion to find.
- Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0)
- Better test framework (integration with VGEM was suggested)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 92c2914239e3..e6b1ca141b93 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -30,32 +30,6 @@ static struct ion_device *internal_dev;
static int heap_id;
/* this function should only be called while dev->lock is held */
-static void ion_buffer_add(struct ion_device *dev,
- struct ion_buffer *buffer)
-{
- struct rb_node **p = &dev->buffers.rb_node;
- struct rb_node *parent = NULL;
- struct ion_buffer *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_buffer, node);
-
- if (buffer < entry) {
- p = &(*p)->rb_left;
- } else if (buffer > entry) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer already found.", __func__);
- BUG();
- }
- }
-
- rb_link_node(&buffer->node, parent, p);
- rb_insert_color(&buffer->node, &dev->buffers);
-}
-
-/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
@@ -100,9 +74,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
- mutex_lock(&dev->buffer_lock);
- ion_buffer_add(dev, buffer);
- mutex_unlock(&dev->buffer_lock);
return buffer;
err1:
@@ -131,11 +102,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
- struct ion_device *dev = buffer->dev;
-
- mutex_lock(&dev->buffer_lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->buffer_lock);
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
ion_heap_freelist_add(heap, buffer);
@@ -694,8 +660,6 @@ static int ion_device_create(void)
}
idev->debug_root = debugfs_create_dir("ion", NULL);
- idev->buffers = RB_ROOT;
- mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
internal_dev = idev;
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index e291299fd35f..74914a266e25 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -23,7 +23,6 @@
/**
* struct ion_buffer - metadata for a particular buffer
- * @node: node in the ion_device buffers tree
* @list: element in list of deferred freeable buffers
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
@@ -39,10 +38,7 @@
* @attachments: list of devices attached to this buffer
*/
struct ion_buffer {
- union {
- struct rb_node node;
- struct list_head list;
- };
+ struct list_head list;
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
@@ -61,14 +57,10 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
/**
* struct ion_device - the metadata of the ion device node
* @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
* @lock: rwsem protecting the tree of heaps and clients
*/
struct ion_device {
struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
struct dentry *debug_root;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fd4995fb676e..f85ec5b16b65 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -8,11 +8,14 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <linux/sched/signal.h>
#include "ion.h"
static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
+ if (fatal_signal_pending(current))
+ return NULL;
return alloc_pages(pool->gfp_mask, pool->order);
}
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 00a1ec7b9154..1240bb0317d9 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
return -EINVAL;
wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
- hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
+ hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
hrtimer_set_expires_range_ns(&to->timer, wake_time,
current->timer_slack_ns);
-
- hrtimer_init_sleeper(to, current);
}
while (1) {
@@ -460,7 +458,7 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
break;
}
if (to) {
- hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+ hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
if (likely(to->task))
freezable_schedule();
hrtimer_cancel(&to->timer);
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index aabcda3f9fc8..28603dfadce2 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -665,11 +665,6 @@ static void db2k_initialize_adc(struct comedi_device *dev)
db2k_initialize_tmrs(dev);
}
-static void db2k_initialize_dac(struct comedi_device *dev)
-{
- db2k_dac_disarm(dev);
-}
-
static int db2k_8255_cb(struct comedi_device *dev, int dir, int port, int data,
unsigned long iobase)
{
@@ -719,7 +714,7 @@ static int db2k_auto_attach(struct comedi_device *dev, unsigned long context)
return result;
db2k_initialize_adc(dev);
- db2k_initialize_dac(dev);
+ db2k_dac_disarm(dev);
s = &dev->subdevices[0];
/* ai subdevice */
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee91300..caf4d4df4bd3 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
unsigned int flags)
{
- int divider, base, prescale;
+ unsigned int divider, base, prescale;
- /* This function needs improvment */
+ /* This function needs improvement */
/* Don't know if divider==0 works. */
for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
}
prescale = 15;
- base = timer_base * (1 << prescale);
+ base = timer_base * (prescale + 1);
divider = 65535;
*nanosec = divider * base;
return (prescale << 16) | (divider);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index c175227009f1..f98e3ae27bff 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -596,7 +596,7 @@ static int ni_request_ao_mite_channel(struct comedi_device *dev)
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
- "failed to reserve mite dma channel for analog outut\n");
+ "failed to reserve mite dma channel for analog output\n");
return -EBUSY;
}
mite_chan->dir = COMEDI_OUTPUT;
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3cc40d2544be..54d7605e909f 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1074,7 +1074,7 @@ static int usbduxsigma_pwm_period(struct comedi_device *dev,
unsigned int period)
{
struct usbduxsigma_private *devpriv = dev->private;
- int fx2delay = 255;
+ int fx2delay;
if (period < MIN_PWM_PERIOD)
return -EAGAIN;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 4f3c2c13a225..147481bf680c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -165,7 +165,7 @@ static void _nbu2ss_create_ep0_packet(struct nbu2ss_udc *udc,
udc->ep0_req.req.buf = p_buf;
udc->ep0_req.req.length = length;
udc->ep0_req.req.dma = 0;
- udc->ep0_req.req.zero = TRUE;
+ udc->ep0_req.req.zero = true;
udc->ep0_req.req.complete = _nbu2ss_ep0_complete;
udc->ep0_req.req.status = -EINPROGRESS;
udc->ep0_req.req.context = udc;
@@ -668,7 +668,7 @@ static int _nbu2ss_ep0_in_transfer(struct nbu2ss_udc *udc,
if ((req->req.actual % EP0_PACKETSIZE) == 0) {
if (req->zero) {
req->zero = false;
- EP0_send_NULL(udc, FALSE);
+ EP0_send_NULL(udc, false);
return 1;
}
}
@@ -695,7 +695,7 @@ static int _nbu2ss_ep0_in_transfer(struct nbu2ss_udc *udc,
i_remain_size -= result;
if (i_remain_size == 0) {
- EP0_send_NULL(udc, FALSE);
+ EP0_send_NULL(udc, false);
return result;
}
@@ -754,7 +754,7 @@ static int _nbu2ss_ep0_out_transfer(struct nbu2ss_udc *udc,
if ((req->req.actual % EP0_PACKETSIZE) == 0) {
if (req->zero) {
req->zero = false;
- EP0_receive_NULL(udc, FALSE);
+ EP0_receive_NULL(udc, false);
return 1;
}
}
@@ -799,7 +799,7 @@ static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
if (req->dma_flag)
return 1; /* DMA is forwarded */
- req->dma_flag = TRUE;
+ req->dma_flag = true;
p_buffer = req->req.dma;
p_buffer += req->req.actual;
@@ -997,7 +997,7 @@ static int _nbu2ss_in_dma(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
if (req->req.actual == 0)
_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
#endif
- req->dma_flag = TRUE;
+ req->dma_flag = true;
/* MAX Packet Size */
mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
@@ -1166,7 +1166,7 @@ static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
{
int nret = -EINVAL;
- req->dma_flag = FALSE;
+ req->dma_flag = false;
req->div_len = 0;
if (req->req.length == 0) {
@@ -1190,7 +1190,7 @@ static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
break;
case EP0_IN_STATUS_PHASE:
- nret = EP0_send_NULL(udc, TRUE);
+ nret = EP0_send_NULL(udc, true);
break;
default:
@@ -1216,7 +1216,7 @@ static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
{
u32 length;
- bool bflag = FALSE;
+ bool bflag = false;
struct nbu2ss_req *req;
req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
@@ -1229,7 +1229,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
length &= EPN_LDATA;
if (length < ep->ep.maxpacket)
- bflag = TRUE;
+ bflag = true;
}
_nbu2ss_start_transfer(ep->udc, ep, req, bflag);
@@ -1280,7 +1280,7 @@ static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
if (bstall) {
/* Set STALL */
- ep->halted = TRUE;
+ ep->halted = true;
if (ep_adrs & USB_DIR_IN)
data = EPN_BCLR | EPN_ISTL;
@@ -1290,7 +1290,7 @@ static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
} else {
/* Clear STALL */
- ep->stalled = FALSE;
+ ep->stalled = false;
if (ep_adrs & USB_DIR_IN) {
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
, EPN_ISTL);
@@ -1305,9 +1305,9 @@ static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
, data);
}
- ep->stalled = FALSE;
+ ep->stalled = false;
if (ep->halted) {
- ep->halted = FALSE;
+ ep->halted = false;
_nbu2ss_restert_transfer(ep);
}
}
@@ -1533,13 +1533,13 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
/*-------------------------------------------------------------------------*/
static int std_req_clear_feature(struct nbu2ss_udc *udc)
{
- return _nbu2ss_req_feature(udc, FALSE);
+ return _nbu2ss_req_feature(udc, false);
}
/*-------------------------------------------------------------------------*/
static int std_req_set_feature(struct nbu2ss_udc *udc)
{
- return _nbu2ss_req_feature(udc, TRUE);
+ return _nbu2ss_req_feature(udc, true);
}
/*-------------------------------------------------------------------------*/
@@ -1601,7 +1601,7 @@ static inline void _nbu2ss_read_request_data(struct nbu2ss_udc *udc, u32 *pdata)
/*-------------------------------------------------------------------------*/
static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
{
- bool bcall_back = TRUE;
+ bool bcall_back = true;
int nret = -EINVAL;
struct usb_ctrlrequest *p_ctrl;
@@ -1623,22 +1623,22 @@ static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
switch (p_ctrl->bRequest) {
case USB_REQ_GET_STATUS:
nret = std_req_get_status(udc);
- bcall_back = FALSE;
+ bcall_back = false;
break;
case USB_REQ_CLEAR_FEATURE:
nret = std_req_clear_feature(udc);
- bcall_back = FALSE;
+ bcall_back = false;
break;
case USB_REQ_SET_FEATURE:
nret = std_req_set_feature(udc);
- bcall_back = FALSE;
+ bcall_back = false;
break;
case USB_REQ_SET_ADDRESS:
nret = std_req_set_address(udc);
- bcall_back = FALSE;
+ bcall_back = false;
break;
case USB_REQ_SET_CONFIGURATION:
@@ -1655,7 +1655,7 @@ static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
if (nret >= 0) {
/*--------------------------------------*/
/* Status Stage */
- nret = EP0_send_NULL(udc, TRUE);
+ nret = EP0_send_NULL(udc, true);
}
}
@@ -1688,7 +1688,7 @@ static inline int _nbu2ss_ep0_in_data_stage(struct nbu2ss_udc *udc)
nret = _nbu2ss_ep0_in_transfer(udc, req);
if (nret == 0) {
udc->ep0state = EP0_OUT_STATUS_PAHSE;
- EP0_receive_NULL(udc, TRUE);
+ EP0_receive_NULL(udc, true);
}
return 0;
@@ -1708,7 +1708,7 @@ static inline int _nbu2ss_ep0_out_data_stage(struct nbu2ss_udc *udc)
nret = _nbu2ss_ep0_out_transfer(udc, req);
if (nret == 0) {
udc->ep0state = EP0_IN_STATUS_PHASE;
- EP0_send_NULL(udc, TRUE);
+ EP0_send_NULL(udc, true);
} else if (nret < 0) {
_nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, EP0_BCLR);
@@ -1817,7 +1817,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
if (nret < 0) {
/* Send Stall */
- _nbu2ss_set_endpoint_stall(udc, 0, TRUE);
+ _nbu2ss_set_endpoint_stall(udc, 0, true);
}
}
@@ -1925,7 +1925,7 @@ static inline void _nbu2ss_epn_in_dma_int(struct nbu2ss_udc *udc,
preq->actual += req->div_len;
req->div_len = 0;
- req->dma_flag = FALSE;
+ req->dma_flag = false;
#ifdef USE_DMA
_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
@@ -1961,7 +1961,7 @@ static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
if (req->req.actual == req->req.length) {
if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
req->div_len = 0;
- req->dma_flag = FALSE;
+ req->dma_flag = false;
_nbu2ss_ep_done(ep, req, 0);
return;
}
@@ -1990,7 +1990,7 @@ static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
if ((req->req.actual % ep->ep.maxpacket) > 0) {
if (req->req.actual == req->div_len) {
req->div_len = 0;
- req->dma_flag = FALSE;
+ req->dma_flag = false;
_nbu2ss_ep_done(ep, req, 0);
return;
}
@@ -1998,7 +1998,7 @@ static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
req->req.actual += req->div_len;
req->div_len = 0;
- req->dma_flag = FALSE;
+ req->dma_flag = false;
_nbu2ss_epn_out_int(udc, ep, req);
}
@@ -2187,7 +2187,7 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
/* USB Interrupt Enable */
_nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, USB_INT_EN_BIT);
- udc->udc_enabled = TRUE;
+ udc->udc_enabled = true;
return 0;
}
@@ -2203,7 +2203,7 @@ static void _nbu2ss_reset_controller(struct nbu2ss_udc *udc)
static void _nbu2ss_disable_controller(struct nbu2ss_udc *udc)
{
if (udc->udc_enabled) {
- udc->udc_enabled = FALSE;
+ udc->udc_enabled = false;
_nbu2ss_reset_controller(udc);
_nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
}
@@ -2456,8 +2456,8 @@ static int nbu2ss_ep_enable(struct usb_ep *_ep,
ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
ep->ep_type = ep_type;
ep->wedged = 0;
- ep->halted = FALSE;
- ep->stalled = FALSE;
+ ep->halted = false;
+ ep->stalled = false;
ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
@@ -2588,9 +2588,9 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
#ifdef USE_DMA
if ((uintptr_t)req->req.buf & 0x3)
- req->unaligned = TRUE;
+ req->unaligned = true;
else
- req->unaligned = FALSE;
+ req->unaligned = false;
if (req->unaligned) {
if (!ep->virt_buf)
@@ -2616,7 +2616,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
list_add_tail(&req->queue, &ep->queue);
if (bflag && !ep->stalled) {
- result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
+ result = _nbu2ss_start_transfer(udc, ep, req, false);
if (result < 0) {
dev_err(udc->dev, " *** %s, result = %d\n", __func__,
result);
@@ -2704,12 +2704,12 @@ static int nbu2ss_ep_set_halt(struct usb_ep *_ep, int value)
ep_adrs = ep->epnum | ep->direct;
if (value == 0) {
_nbu2ss_set_endpoint_stall(udc, ep_adrs, value);
- ep->stalled = FALSE;
+ ep->stalled = false;
} else {
if (list_empty(&ep->queue))
_nbu2ss_epn_set_stall(udc, ep);
else
- ep->stalled = TRUE;
+ ep->stalled = true;
}
if (value == 0)
@@ -3094,10 +3094,8 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (irq < 0)
return irq;
- }
status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq,
0, driver_name, udc);
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index b8c3dee5626c..9c2671cb32f7 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -19,11 +19,6 @@
#define USE_DMA 1
#define USE_SUSPEND_WAIT 1
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
/*------------ Board dependence(Resource) */
#define VBUS_VALUE GPIO_VBUS
diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt
deleted file mode 100644
index 74cf84ac48a3..000000000000
--- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt
+++ /dev/null
@@ -1,209 +0,0 @@
-Overview
-========
-
-EROFS file-system stands for Enhanced Read-Only File System. Different
-from other read-only file systems, it aims to be designed for flexibility,
-scalability, but be kept simple and high performance.
-
-It is designed as a better filesystem solution for the following scenarios:
- - read-only storage media or
-
- - part of a fully trusted read-only solution, which means it needs to be
- immutable and bit-for-bit identical to the official golden image for
- their releases due to security and other considerations and
-
- - hope to save some extra storage space with guaranteed end-to-end performance
- by using reduced metadata and transparent file compression, especially
- for those embedded devices with limited memory (ex, smartphone);
-
-Here is the main features of EROFS:
- - Little endian on-disk design;
-
- - Currently 4KB block size (nobh) and therefore maximum 16TB address space;
-
- - Metadata & data could be mixed by design;
-
- - 2 inode versions for different requirements:
- v1 v2
- Inode metadata size: 32 bytes 64 bytes
- Max file size: 4 GB 16 EB (also limited by max. vol size)
- Max uids/gids: 65536 4294967296
- File creation time: no yes (64 + 32-bit timestamp)
- Max hardlinks: 65536 4294967296
- Metadata reserved: 4 bytes 14 bytes
-
- - Support extended attributes (xattrs) as an option;
-
- - Support xattr inline and tail-end data inline for all files;
-
- - Support POSIX.1e ACLs by using xattrs;
-
- - Support transparent file compression as an option:
- LZ4 algorithm with 4 KB fixed-output compression for high performance;
-
-The following git tree provides the file system user-space tools under
-development (ex, formatting tool mkfs.erofs):
->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
-
-Bugs and patches are welcome, please kindly help us and send to the following
-linux-erofs mailing list:
->> linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
-
-Note that EROFS is still working in progress as a Linux staging driver,
-Cc the staging mailing list as well is highly recommended:
->> Linux Driver Project Developer List <devel@driverdev.osuosl.org>
-
-Mount options
-=============
-
-fault_injection=%d Enable fault injection in all supported types with
- specified injection rate. Supported injection type:
- Type_Name Type_Value
- FAULT_KMALLOC 0x000000001
- FAULT_READ_IO 0x000000002
-(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled
- by default if CONFIG_EROFS_FS_XATTR is selected.
-(no)acl Setup POSIX Access Control List. Note: acl is enabled
- by default if CONFIG_EROFS_FS_POSIX_ACL is selected.
-
-On-disk details
-===============
-
-Summary
--------
-Different from other read-only file systems, an EROFS volume is designed
-to be as simple as possible:
-
- |-> aligned with the block size
- ____________________________________________________________
- | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data |
- |_|__|_|_____|__________|_____|______|__________|_____|______|
- 0 +1K
-
-All data areas should be aligned with the block size, but metadata areas
-may not. All metadatas can be now observed in two different spaces (views):
- 1. Inode metadata space
- Each valid inode should be aligned with an inode slot, which is a fixed
- value (32 bytes) and designed to be kept in line with v1 inode size.
-
- Each inode can be directly found with the following formula:
- inode offset = meta_blkaddr * block_size + 32 * nid
-
- |-> aligned with 8B
- |-> followed closely
- + meta_blkaddr blocks |-> another slot
- _____________________________________________________________________
- | ... | inode | xattrs | extents | data inline | ... | inode ...
- |________|_______|(optional)|(optional)|__(optional)_|_____|__________
- |-> aligned with the inode slot size
- . .
- . .
- . .
- . .
- . .
- . .
- .____________________________________________________|-> aligned with 4B
- | xattr_ibody_header | shared xattrs | inline xattrs |
- |____________________|_______________|_______________|
- |-> 12 bytes <-|->x * 4 bytes<-| .
- . . .
- . . .
- . . .
- ._______________________________.______________________.
- | id | id | id | id | ... | id | ent | ... | ent| ... |
- |____|____|____|____|______|____|_____|_____|____|_____|
- |-> aligned with 4B
- |-> aligned with 4B
-
- Inode could be 32 or 64 bytes, which can be distinguished from a common
- field which all inode versions have -- i_advise:
-
- __________________ __________________
- | i_advise | | i_advise |
- |__________________| |__________________|
- | ... | | ... |
- | | | |
- |__________________| 32 bytes | |
- | |
- |__________________| 64 bytes
-
- Xattrs, extents, data inline are followed by the corresponding inode with
- proper alignes, and they could be optional for different data mappings,
- _currently_ there are totally 3 valid data mappings supported:
-
- 1) flat file data without data inline (no extent);
- 2) fixed-output size data compression (must have extents);
- 3) flat file data with tail-end data inline (no extent);
-
- The size of the optional xattrs is indicated by i_xattr_count in inode
- header. Large xattrs or xattrs shared by many different files can be
- stored in shared xattrs metadata rather than inlined right after inode.
-
- 2. Shared xattrs metadata space
- Shared xattrs space is similar to the above inode space, started with
- a specific block indicated by xattr_blkaddr, organized one by one with
- proper align.
-
- Each share xattr can also be directly found by the following formula:
- xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
-
- |-> aligned by 4 bytes
- + xattr_blkaddr blocks |-> aligned with 4 bytes
- _________________________________________________________________________
- | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
- |________|_____________|_____________|_____|______________|_______________
-
-Directories
------------
-All directories are now organized in a compact on-disk format. Note that
-each directory block is divided into index and name areas in order to support
-random file lookup, and all directory entries are _strictly_ recorded in
-alphabetical order in order to support improved prefix binary search
-algorithm (could refer to the related source code).
-
- ___________________________
- / |
- / ______________|________________
- / / | nameoff1 | nameoffN-1
- ____________.______________._______________v________________v__________
-| dirent | dirent | ... | dirent | filename | filename | ... | filename |
-|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
- \ ^
- \ | * could have
- \ | trailing '\0'
- \________________________| nameoff0
-
- Directory block
-
-Note that apart from the offset of the first filename, nameoff0 also indicates
-the total number of directory entries in this block since it is no need to
-introduce another on-disk field at all.
-
-Compression
------------
-Currently, EROFS supports 4KB fixed-output clustersize transparent file
-compression, as illustrated below:
-
- |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
- clusterofs clusterofs clusterofs
- | | | logical data
-_________v_______________________________v_____________________v_______________
-... | . | | . | | . | ...
-____|____.________|_____________|________.____|_____________|__.__________|____
- |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
- size size size size size
- . . . .
- . . . .
- . . . .
- _______._____________._____________._____________._____________________
- ... | | | | ... physical data
- _______|_____________|_____________|_____________|_____________________
- |-> cluster <-|-> cluster <-|-> cluster <-|
- size size size
-
-Currently each on-disk physical cluster can contain 4KB (un)compressed data
-at most. For each logical cluster, there is a corresponding on-disk index to
-describe its cluster type, physical cluster address, etc.
-
-See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
-
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
deleted file mode 100644
index d04b798a8efb..000000000000
--- a/drivers/staging/erofs/Kconfig
+++ /dev/null
@@ -1,151 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-config EROFS_FS
- tristate "EROFS filesystem support"
- depends on BLOCK
- help
- EROFS(Enhanced Read-Only File System) is a lightweight
- read-only file system with modern designs (eg. page-sized
- blocks, inline xattrs/data, etc.) for scenarios which need
- high-performance read-only requirements, eg. firmwares in
- mobile phone or LIVECDs.
-
- It also provides VLE compression support, focusing on
- random read improvements, keeping relatively lower
- compression ratios, which is useful for high-performance
- devices with limited memory and ROM space.
-
- If unsure, say N.
-
-config EROFS_FS_DEBUG
- bool "EROFS debugging feature"
- depends on EROFS_FS
- help
- Print EROFS debugging messages and enable more BUG_ONs
- which check the filesystem consistency aggressively.
-
- For daily use, say N.
-
-config EROFS_FS_XATTR
- bool "EROFS extended attributes"
- depends on EROFS_FS
- default y
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
-
- If unsure, say N.
-
-config EROFS_FS_POSIX_ACL
- bool "EROFS Access Control Lists"
- depends on EROFS_FS_XATTR
- select FS_POSIX_ACL
- default y
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
- To learn more about Access Control Lists, visit the POSIX ACLs for
- Linux website <http://acl.bestbits.at/>.
-
- If you don't know what Access Control Lists are, say N.
-
-config EROFS_FS_SECURITY
- bool "EROFS Security Labels"
- depends on EROFS_FS_XATTR
- help
- Security labels provide an access control facility to support Linux
- Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
- Linux. This option enables an extended attribute handler for file
- security labels in the erofs filesystem, so that it requires enabling
- the extended attribute support in advance.
-
- If you are not using a security module, say N.
-
-config EROFS_FS_USE_VM_MAP_RAM
- bool "EROFS VM_MAP_RAM Support"
- depends on EROFS_FS
- help
- use vm_map_ram/vm_unmap_ram instead of vmap/vunmap.
-
- If you don't know what these are, say N.
-
-config EROFS_FAULT_INJECTION
- bool "EROFS fault injection facility"
- depends on EROFS_FS
- help
- Test EROFS to inject faults such as ENOMEM, EIO, and so on.
- If unsure, say N.
-
-config EROFS_FS_IO_MAX_RETRIES
- int "EROFS IO Maximum Retries"
- depends on EROFS_FS
- default "5"
- help
- Maximum retry count of IO Errors.
-
- If unsure, leave the default value (5 retries, 6 IOs at most).
-
-config EROFS_FS_ZIP
- bool "EROFS Data Compresssion Support"
- depends on EROFS_FS
- select LZ4_DECOMPRESS
- help
- Currently we support LZ4 VLE Compression only.
- Play at your own risk.
-
- If you don't want to use compression feature, say N.
-
-config EROFS_FS_CLUSTER_PAGE_LIMIT
- int "EROFS Cluster Pages Hard Limit"
- depends on EROFS_FS_ZIP
- range 1 256
- default "1"
- help
- Indicates VLE compressed pages hard limit of a
- compressed cluster.
-
- For example, if files of a image are compressed
- into 8k-unit, the hard limit should not be less
- than 2. Otherwise, the image cannot be mounted
- correctly on this kernel.
-
-choice
- prompt "EROFS VLE Data Decompression mode"
- depends on EROFS_FS_ZIP
- default EROFS_FS_ZIP_CACHE_BIPOLAR
- help
- EROFS supports three options for VLE decompression.
- "In-place Decompression Only" consumes the minimum memory
- with lowest random read.
-
- "Bipolar Cached Decompression" consumes the maximum memory
- with highest random read.
-
- If unsure, select "Bipolar Cached Decompression"
-
-config EROFS_FS_ZIP_NO_CACHE
- bool "In-place Decompression Only"
- help
- Read compressed data into page cache and do in-place
- decompression directly.
-
-config EROFS_FS_ZIP_CACHE_UNIPOLAR
- bool "Unipolar Cached Decompression"
- help
- For each request, it caches the last compressed page
- for further reading.
- It still decompresses in place for the rest compressed pages.
-
-config EROFS_FS_ZIP_CACHE_BIPOLAR
- bool "Bipolar Cached Decompression"
- help
- For each request, it caches the both end compressed pages
- for further reading.
- It still decompresses in place for the rest compressed pages.
-
- Recommended for performance priority.
-
-endchoice
-
diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile
deleted file mode 100644
index e704d9e51514..000000000000
--- a/drivers/staging/erofs/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-EROFS_VERSION = "1.0pre1"
-
-ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
-
-obj-$(CONFIG_EROFS_FS) += erofs.o
-# staging requirement: to be self-contained in its own directory
-ccflags-y += -I $(srctree)/$(src)/include
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
-erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o zmap.o decompressor.o
-
diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO
deleted file mode 100644
index a8608b2f72bd..000000000000
--- a/drivers/staging/erofs/TODO
+++ /dev/null
@@ -1,46 +0,0 @@
-
-EROFS is still working in progress, thus it is not suitable
-for all productive uses. play at your own risk :)
-
-TODO List:
- - add the missing error handling code
- (mainly existed in xattr and decompression submodules);
-
- - finalize erofs ondisk format design (which means that
- minor on-disk revisions could happen later);
-
- - documentation and detailed technical analysis;
-
- - general code review and clean up
- (including confusing variable names and code snippets);
-
- - support larger compressed clustersizes for selection
- (currently erofs only works as expected with the page-sized
- compressed cluster configuration, usually 4KB);
-
- - support more lossless data compression algorithms
- in addition to LZ4 algorithms in VLE approach;
-
- - data deduplication and other useful features.
-
-The following git tree provides the file system user-space
-tools under development (ex, formatting tool mkfs.erofs):
->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
-
-The open-source development of erofs-utils is at the early stage.
-Contact the original author Li Guifu <bluce.liguifu@huawei.com> and
-the co-maintainer Fang Wei <fangwei1@huawei.com> for the latest news
-and more details.
-
-Code, suggestions, etc, are welcome. Please feel free to
-ask and send patches,
-
-To:
- linux-erofs mailing list <linux-erofs@lists.ozlabs.org>
- Gao Xiang <gaoxiang25@huawei.com>
- Chao Yu <yuchao0@huawei.com>
-
-Cc: (for linux-kernel upstream patches)
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- linux-staging mailing list <devel@driverdev.osuosl.org>
-
diff --git a/drivers/staging/erofs/compress.h b/drivers/staging/erofs/compress.h
deleted file mode 100644
index c43aa3374d28..000000000000
--- a/drivers/staging/erofs/compress.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/drivers/staging/erofs/compress.h
- *
- * Copyright (C) 2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef __EROFS_FS_COMPRESS_H
-#define __EROFS_FS_COMPRESS_H
-
-#include "internal.h"
-
-enum {
- Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
- Z_EROFS_COMPRESSION_RUNTIME_MAX
-};
-
-struct z_erofs_decompress_req {
- struct super_block *sb;
- struct page **in, **out;
-
- unsigned short pageofs_out;
- unsigned int inputsize, outputsize;
-
- /* indicate the algorithm will be used for decompression */
- unsigned int alg;
- bool inplace_io, partial_decoding;
-};
-
-/*
- * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
- * used to mark temporary allocated pages from other
- * file/cached pages and NULL mapping pages.
- */
-#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
-
-/* check if a page is marked as staging */
-static inline bool z_erofs_page_is_staging(struct page *page)
-{
- return page->mapping == Z_EROFS_MAPPING_STAGING;
-}
-
-static inline bool z_erofs_put_stagingpage(struct list_head *pagepool,
- struct page *page)
-{
- if (!z_erofs_page_is_staging(page))
- return false;
-
- /* staging pages should not be used by others at the same time */
- if (page_ref_count(page) > 1)
- put_page(page);
- else
- list_add(&page->lru, pagepool);
- return true;
-}
-
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool);
-
-#endif
-
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
deleted file mode 100644
index cc31c3e5984c..000000000000
--- a/drivers/staging/erofs/data.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/data.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include "internal.h"
-#include <linux/prefetch.h>
-
-#include <trace/events/erofs.h>
-
-static inline void read_endio(struct bio *bio)
-{
- struct super_block *const sb = bio->bi_private;
- struct bio_vec *bvec;
- blk_status_t err = bio->bi_status;
- struct bvec_iter_all iter_all;
-
- if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
- erofs_show_injection_info(FAULT_READ_IO);
- err = BLK_STS_IOERR;
- }
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
-
- /* page is already locked */
- DBG_BUGON(PageUptodate(page));
-
- if (unlikely(err))
- SetPageError(page);
- else
- SetPageUptodate(page);
-
- unlock_page(page);
- /* page could be reclaimed now */
- }
- bio_put(bio);
-}
-
-/* prio -- true is used for dir */
-struct page *__erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio, bool nofail)
-{
- struct inode *const bd_inode = sb->s_bdev->bd_inode;
- struct address_space *const mapping = bd_inode->i_mapping;
- /* prefer retrying in the allocator to blindly looping below */
- const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
- (nofail ? __GFP_NOFAIL : 0);
- unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
- struct page *page;
- int err;
-
-repeat:
- page = find_or_create_page(mapping, blkaddr, gfp);
- if (unlikely(!page)) {
- DBG_BUGON(nofail);
- return ERR_PTR(-ENOMEM);
- }
- DBG_BUGON(!PageLocked(page));
-
- if (!PageUptodate(page)) {
- struct bio *bio;
-
- bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
- if (IS_ERR(bio)) {
- DBG_BUGON(nofail);
- err = PTR_ERR(bio);
- goto err_out;
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (unlikely(err != PAGE_SIZE)) {
- err = -EFAULT;
- goto err_out;
- }
-
- __submit_bio(bio, REQ_OP_READ,
- REQ_META | (prio ? REQ_PRIO : 0));
-
- lock_page(page);
-
- /* this page has been truncated by others */
- if (unlikely(page->mapping != mapping)) {
-unlock_repeat:
- unlock_page(page);
- put_page(page);
- goto repeat;
- }
-
- /* more likely a read error */
- if (unlikely(!PageUptodate(page))) {
- if (io_retries) {
- --io_retries;
- goto unlock_repeat;
- }
- err = -EIO;
- goto err_out;
- }
- }
- return page;
-
-err_out:
- unlock_page(page);
- put_page(page);
- return ERR_PTR(err);
-}
-
-static int erofs_map_blocks_flatmode(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- int err = 0;
- erofs_blk_t nblocks, lastblk;
- u64 offset = map->m_la;
- struct erofs_vnode *vi = EROFS_V(inode);
-
- trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
-
- nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
- lastblk = nblocks - is_inode_flat_inline(inode);
-
- if (unlikely(offset >= inode->i_size)) {
- /* leave out-of-bound access unmapped */
- map->m_flags = 0;
- map->m_plen = 0;
- goto out;
- }
-
- /* there is no hole in flatmode */
- map->m_flags = EROFS_MAP_MAPPED;
-
- if (offset < blknr_to_addr(lastblk)) {
- map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
- map->m_plen = blknr_to_addr(lastblk) - offset;
- } else if (is_inode_flat_inline(inode)) {
- /* 2 - inode inline B: inode, [xattrs], inline last blk... */
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
- map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
- vi->xattr_isize + erofs_blkoff(map->m_la);
- map->m_plen = inode->i_size - offset;
-
- /* inline data should locate in one meta block */
- if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
- DBG_BUGON(1);
- err = -EIO;
- goto err_out;
- }
-
- map->m_flags |= EROFS_MAP_META;
- } else {
- errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
- vi->nid, inode->i_size, map->m_la);
- DBG_BUGON(1);
- err = -EIO;
- goto err_out;
- }
-
-out:
- map->m_llen = map->m_plen;
-
-err_out:
- trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
- return err;
-}
-
-int erofs_map_blocks(struct inode *inode,
- struct erofs_map_blocks *map, int flags)
-{
- if (unlikely(is_inode_layout_compression(inode))) {
- int err = z_erofs_map_blocks_iter(inode, map, flags);
-
- if (map->mpage) {
- put_page(map->mpage);
- map->mpage = NULL;
- }
- return err;
- }
- return erofs_map_blocks_flatmode(inode, map, flags);
-}
-
-static inline struct bio *erofs_read_raw_page(struct bio *bio,
- struct address_space *mapping,
- struct page *page,
- erofs_off_t *last_block,
- unsigned int nblocks,
- bool ra)
-{
- struct inode *const inode = mapping->host;
- struct super_block *const sb = inode->i_sb;
- erofs_off_t current_block = (erofs_off_t)page->index;
- int err;
-
- DBG_BUGON(!nblocks);
-
- if (PageUptodate(page)) {
- err = 0;
- goto has_updated;
- }
-
- if (cleancache_get_page(page) == 0) {
- err = 0;
- SetPageUptodate(page);
- goto has_updated;
- }
-
- /* note that for readpage case, bio also equals to NULL */
- if (bio &&
- /* not continuous */
- *last_block + 1 != current_block) {
-submit_bio_retry:
- __submit_bio(bio, REQ_OP_READ, 0);
- bio = NULL;
- }
-
- if (!bio) {
- struct erofs_map_blocks map = {
- .m_la = blknr_to_addr(current_block),
- };
- erofs_blk_t blknr;
- unsigned int blkoff;
-
- err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
- if (unlikely(err))
- goto err_out;
-
- /* zero out the holed page */
- if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
- zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
-
- /* imply err = 0, see erofs_map_blocks */
- goto has_updated;
- }
-
- /* for RAW access mode, m_plen must be equal to m_llen */
- DBG_BUGON(map.m_plen != map.m_llen);
-
- blknr = erofs_blknr(map.m_pa);
- blkoff = erofs_blkoff(map.m_pa);
-
- /* deal with inline page */
- if (map.m_flags & EROFS_MAP_META) {
- void *vsrc, *vto;
- struct page *ipage;
-
- DBG_BUGON(map.m_plen > PAGE_SIZE);
-
- ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
-
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto err_out;
- }
-
- vsrc = kmap_atomic(ipage);
- vto = kmap_atomic(page);
- memcpy(vto, vsrc + blkoff, map.m_plen);
- memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
- kunmap_atomic(vto);
- kunmap_atomic(vsrc);
- flush_dcache_page(page);
-
- SetPageUptodate(page);
- /* TODO: could we unlock the page earlier? */
- unlock_page(ipage);
- put_page(ipage);
-
- /* imply err = 0, see erofs_map_blocks */
- goto has_updated;
- }
-
- /* pa must be block-aligned for raw reading */
- DBG_BUGON(erofs_blkoff(map.m_pa));
-
- /* max # of continuous pages */
- if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
- nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
- if (nblocks > BIO_MAX_PAGES)
- nblocks = BIO_MAX_PAGES;
-
- bio = erofs_grab_bio(sb, blknr, nblocks, sb,
- read_endio, false);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- bio = NULL;
- goto err_out;
- }
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- /* out of the extent or bio is full */
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
-
- *last_block = current_block;
-
- /* shift in advance in case of it followed by too many gaps */
- if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
- /* err should reassign to 0 after submitting */
- err = 0;
- goto submit_bio_out;
- }
-
- return bio;
-
-err_out:
- /* for sync reading, set page error immediately */
- if (!ra) {
- SetPageError(page);
- ClearPageUptodate(page);
- }
-has_updated:
- unlock_page(page);
-
- /* if updated manually, continuous pages has a gap */
- if (bio)
-submit_bio_out:
- __submit_bio(bio, REQ_OP_READ, 0);
-
- return unlikely(err) ? ERR_PTR(err) : NULL;
-}
-
-/*
- * since we dont have write or truncate flows, so no inode
- * locking needs to be held at the moment.
- */
-static int erofs_raw_access_readpage(struct file *file, struct page *page)
-{
- erofs_off_t last_block;
- struct bio *bio;
-
- trace_erofs_readpage(page, true);
-
- bio = erofs_read_raw_page(NULL, page->mapping,
- page, &last_block, 1, false);
-
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
- return 0;
-}
-
-static int erofs_raw_access_readpages(struct file *filp,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned int nr_pages)
-{
- erofs_off_t last_block;
- struct bio *bio = NULL;
- gfp_t gfp = readahead_gfp_mask(mapping);
- struct page *page = list_last_entry(pages, struct page, lru);
-
- trace_erofs_readpages(mapping->host, page, nr_pages, true);
-
- for (; nr_pages; --nr_pages) {
- page = list_entry(pages->prev, struct page, lru);
-
- prefetchw(&page->flags);
- list_del(&page->lru);
-
- if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
- bio = erofs_read_raw_page(bio, mapping, page,
- &last_block, nr_pages, true);
-
- /* all the page errors are ignored when readahead */
- if (IS_ERR(bio)) {
- pr_err("%s, readahead error at page %lu of nid %llu\n",
- __func__, page->index,
- EROFS_V(mapping->host)->nid);
-
- bio = NULL;
- }
- }
-
- /* pages could still be locked */
- put_page(page);
- }
- DBG_BUGON(!list_empty(pages));
-
- /* the rare case (end in gaps) */
- if (unlikely(bio))
- __submit_bio(bio, REQ_OP_READ, 0);
- return 0;
-}
-
-/* for uncompressed (aligned) files and raw access for other files */
-const struct address_space_operations erofs_raw_access_aops = {
- .readpage = erofs_raw_access_readpage,
- .readpages = erofs_raw_access_readpages,
-};
-
diff --git a/drivers/staging/erofs/decompressor.c b/drivers/staging/erofs/decompressor.c
deleted file mode 100644
index 1fb0abb98dff..000000000000
--- a/drivers/staging/erofs/decompressor.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/decompressor.c
- *
- * Copyright (C) 2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "compress.h"
-#include <linux/lz4.h>
-
-#ifndef LZ4_DISTANCE_MAX /* history window size */
-#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
-#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
-#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
-#endif
-
-struct z_erofs_decompressor {
- /*
- * if destpages have sparsed pages, fill them with bounce pages.
- * it also check whether destpages indicate continuous physical memory.
- */
- int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool);
- int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
- char *name;
-};
-
-static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nr =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
- unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
- BITS_PER_LONG)] = { 0 };
- void *kaddr = NULL;
- unsigned int i, j, top;
-
- top = 0;
- for (i = j = 0; i < nr; ++i, ++j) {
- struct page *const page = rq->out[i];
- struct page *victim;
-
- if (j >= LZ4_MAX_DISTANCE_PAGES)
- j = 0;
-
- /* 'valid' bounced can only be tested after a complete round */
- if (test_bit(j, bounced)) {
- DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
- DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
- availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
- }
-
- if (page) {
- __clear_bit(j, bounced);
- if (kaddr) {
- if (kaddr + PAGE_SIZE == page_address(page))
- kaddr += PAGE_SIZE;
- else
- kaddr = NULL;
- } else if (!i) {
- kaddr = page_address(page);
- }
- continue;
- }
- kaddr = NULL;
- __set_bit(j, bounced);
-
- if (top) {
- victim = availables[--top];
- get_page(victim);
- } else {
- if (!list_empty(pagepool)) {
- victim = lru_to_page(pagepool);
- list_del(&victim->lru);
- DBG_BUGON(page_ref_count(victim) != 1);
- } else {
- victim = alloc_pages(GFP_KERNEL, 0);
- if (!victim)
- return -ENOMEM;
- }
- victim->mapping = Z_EROFS_MAPPING_STAGING;
- }
- rq->out[i] = victim;
- }
- return kaddr ? 1 : 0;
-}
-
-static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
- u8 *src, unsigned int pageofs_in)
-{
- /*
- * if in-place decompression is ongoing, those decompressed
- * pages should be copied in order to avoid being overlapped.
- */
- struct page **in = rq->in;
- u8 *const tmp = erofs_get_pcpubuf(0);
- u8 *tmpp = tmp;
- unsigned int inlen = rq->inputsize - pageofs_in;
- unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
-
- while (tmpp < tmp + inlen) {
- if (!src)
- src = kmap_atomic(*in);
- memcpy(tmpp, src + pageofs_in, count);
- kunmap_atomic(src);
- src = NULL;
- tmpp += count;
- pageofs_in = 0;
- count = PAGE_SIZE;
- ++in;
- }
- return tmp;
-}
-
-static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
-{
- unsigned int inputmargin, inlen;
- u8 *src;
- bool copied, support_0padding;
- int ret;
-
- if (rq->inputsize > PAGE_SIZE)
- return -ENOTSUPP;
-
- src = kmap_atomic(*rq->in);
- inputmargin = 0;
- support_0padding = false;
-
- /* decompression inplace is only safe when 0padding is enabled */
- if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) {
- support_0padding = true;
-
- while (!src[inputmargin & ~PAGE_MASK])
- if (!(++inputmargin & ~PAGE_MASK))
- break;
-
- if (inputmargin >= rq->inputsize) {
- kunmap_atomic(src);
- return -EIO;
- }
- }
-
- copied = false;
- inlen = rq->inputsize - inputmargin;
- if (rq->inplace_io) {
- const uint oend = (rq->pageofs_out +
- rq->outputsize) & ~PAGE_MASK;
- const uint nr = PAGE_ALIGN(rq->pageofs_out +
- rq->outputsize) >> PAGE_SHIFT;
-
- if (rq->partial_decoding || !support_0padding ||
- rq->out[nr - 1] != rq->in[0] ||
- rq->inputsize - oend <
- LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
- src = generic_copy_inplace_data(rq, src, inputmargin);
- inputmargin = 0;
- copied = true;
- }
- }
-
- ret = LZ4_decompress_safe_partial(src + inputmargin, out,
- inlen, rq->outputsize,
- rq->outputsize);
- if (ret < 0) {
- errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]",
- __func__, src + inputmargin, inlen, inputmargin,
- out, rq->outputsize);
- WARN_ON(1);
- print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
- 16, 1, src + inputmargin, inlen, true);
- print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
- 16, 1, out, rq->outputsize, true);
- ret = -EIO;
- }
-
- if (copied)
- erofs_put_pcpubuf(src);
- else
- kunmap_atomic(src);
- return ret;
-}
-
-static struct z_erofs_decompressor decompressors[] = {
- [Z_EROFS_COMPRESSION_SHIFTED] = {
- .name = "shifted"
- },
- [Z_EROFS_COMPRESSION_LZ4] = {
- .prepare_destpages = lz4_prepare_destpages,
- .decompress = lz4_decompress,
- .name = "lz4"
- },
-};
-
-static void copy_from_pcpubuf(struct page **out, const char *dst,
- unsigned short pageofs_out,
- unsigned int outputsize)
-{
- const char *end = dst + outputsize;
- const unsigned int righthalf = PAGE_SIZE - pageofs_out;
- const char *cur = dst - pageofs_out;
-
- while (cur < end) {
- struct page *const page = *out++;
-
- if (page) {
- char *buf = kmap_atomic(page);
-
- if (cur >= dst) {
- memcpy(buf, cur, min_t(uint, PAGE_SIZE,
- end - cur));
- } else {
- memcpy(buf + pageofs_out, cur + pageofs_out,
- min_t(uint, righthalf, end - cur));
- }
- kunmap_atomic(buf);
- }
- cur += PAGE_SIZE;
- }
-}
-
-static int decompress_generic(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const struct z_erofs_decompressor *alg = decompressors + rq->alg;
- unsigned int dst_maptype;
- void *dst;
- int ret;
-
- if (nrpages_out == 1 && !rq->inplace_io) {
- DBG_BUGON(!*rq->out);
- dst = kmap_atomic(*rq->out);
- dst_maptype = 0;
- goto dstmap_out;
- }
-
- /*
- * For the case of small output size (especially much less
- * than PAGE_SIZE), memcpy the decompressed data rather than
- * compressed data is preferred.
- */
- if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
- dst = erofs_get_pcpubuf(0);
- if (IS_ERR(dst))
- return PTR_ERR(dst);
-
- rq->inplace_io = false;
- ret = alg->decompress(rq, dst);
- if (!ret)
- copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
- rq->outputsize);
-
- erofs_put_pcpubuf(dst);
- return ret;
- }
-
- ret = alg->prepare_destpages(rq, pagepool);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- dst = page_address(*rq->out);
- dst_maptype = 1;
- goto dstmap_out;
- }
-
- dst = erofs_vmap(rq->out, nrpages_out);
- if (!dst)
- return -ENOMEM;
- dst_maptype = 2;
-
-dstmap_out:
- ret = alg->decompress(rq, dst + rq->pageofs_out);
-
- if (!dst_maptype)
- kunmap_atomic(dst);
- else if (dst_maptype == 2)
- erofs_vunmap(dst, nrpages_out);
- return ret;
-}
-
-static int shifted_decompress(const struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- const unsigned int nrpages_out =
- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
- const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
- unsigned char *src, *dst;
-
- if (nrpages_out > 2) {
- DBG_BUGON(1);
- return -EIO;
- }
-
- if (rq->out[0] == *rq->in) {
- DBG_BUGON(nrpages_out != 1);
- return 0;
- }
-
- src = kmap_atomic(*rq->in);
- if (!rq->out[0]) {
- dst = NULL;
- } else {
- dst = kmap_atomic(rq->out[0]);
- memcpy(dst + rq->pageofs_out, src, righthalf);
- }
-
- if (rq->out[1] == *rq->in) {
- memmove(src, src + righthalf, rq->pageofs_out);
- } else if (nrpages_out == 2) {
- if (dst)
- kunmap_atomic(dst);
- DBG_BUGON(!rq->out[1]);
- dst = kmap_atomic(rq->out[1]);
- memcpy(dst, src + righthalf, rq->pageofs_out);
- }
- if (dst)
- kunmap_atomic(dst);
- kunmap_atomic(src);
- return 0;
-}
-
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct list_head *pagepool)
-{
- if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
- return shifted_decompress(rq, pagepool);
- return decompress_generic(rq, pagepool);
-}
-
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
deleted file mode 100644
index dbf6a151886c..000000000000
--- a/drivers/staging/erofs/dir.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/dir.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include "internal.h"
-
-static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
- [EROFS_FT_UNKNOWN] = DT_UNKNOWN,
- [EROFS_FT_REG_FILE] = DT_REG,
- [EROFS_FT_DIR] = DT_DIR,
- [EROFS_FT_CHRDEV] = DT_CHR,
- [EROFS_FT_BLKDEV] = DT_BLK,
- [EROFS_FT_FIFO] = DT_FIFO,
- [EROFS_FT_SOCK] = DT_SOCK,
- [EROFS_FT_SYMLINK] = DT_LNK,
-};
-
-static void debug_one_dentry(unsigned char d_type, const char *de_name,
- unsigned int de_namelen)
-{
-#ifdef CONFIG_EROFS_FS_DEBUG
- /* since the on-disk name could not have the trailing '\0' */
- unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
-
- memcpy(dbg_namebuf, de_name, de_namelen);
- dbg_namebuf[de_namelen] = '\0';
-
- debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
- de_namelen, d_type);
-#endif
-}
-
-static int erofs_fill_dentries(struct dir_context *ctx,
- void *dentry_blk, unsigned int *ofs,
- unsigned int nameoff, unsigned int maxsize)
-{
- struct erofs_dirent *de = dentry_blk + *ofs;
- const struct erofs_dirent *end = dentry_blk + nameoff;
-
- while (de < end) {
- const char *de_name;
- unsigned int de_namelen;
- unsigned char d_type;
-
- if (de->file_type < EROFS_FT_MAX)
- d_type = erofs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
-
- nameoff = le16_to_cpu(de->nameoff);
- de_name = (char *)dentry_blk + nameoff;
-
- /* the last dirent in the block? */
- if (de + 1 >= end)
- de_namelen = strnlen(de_name, maxsize - nameoff);
- else
- de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
-
- /* a corrupted entry is found */
- if (unlikely(nameoff + de_namelen > maxsize ||
- de_namelen > EROFS_NAME_LEN)) {
- DBG_BUGON(1);
- return -EIO;
- }
-
- debug_one_dentry(d_type, de_name, de_namelen);
- if (!dir_emit(ctx, de_name, de_namelen,
- le64_to_cpu(de->nid), d_type))
- /* stopped by some reason */
- return 1;
- ++de;
- *ofs += sizeof(struct erofs_dirent);
- }
- *ofs = maxsize;
- return 0;
-}
-
-static int erofs_readdir(struct file *f, struct dir_context *ctx)
-{
- struct inode *dir = file_inode(f);
- struct address_space *mapping = dir->i_mapping;
- const size_t dirsize = i_size_read(dir);
- unsigned int i = ctx->pos / EROFS_BLKSIZ;
- unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
- int err = 0;
- bool initial = true;
-
- while (ctx->pos < dirsize) {
- struct page *dentry_page;
- struct erofs_dirent *de;
- unsigned int nameoff, maxsize;
-
- dentry_page = read_mapping_page(mapping, i, NULL);
- if (IS_ERR(dentry_page))
- continue;
-
- de = (struct erofs_dirent *)kmap(dentry_page);
-
- nameoff = le16_to_cpu(de->nameoff);
-
- if (unlikely(nameoff < sizeof(struct erofs_dirent) ||
- nameoff >= PAGE_SIZE)) {
- errln("%s, invalid de[0].nameoff %u",
- __func__, nameoff);
-
- err = -EIO;
- goto skip_this;
- }
-
- maxsize = min_t(unsigned int,
- dirsize - ctx->pos + ofs, PAGE_SIZE);
-
- /* search dirents at the arbitrary position */
- if (unlikely(initial)) {
- initial = false;
-
- ofs = roundup(ofs, sizeof(struct erofs_dirent));
- if (unlikely(ofs >= nameoff))
- goto skip_this;
- }
-
- err = erofs_fill_dentries(ctx, de, &ofs, nameoff, maxsize);
-skip_this:
- kunmap(dentry_page);
-
- put_page(dentry_page);
-
- ctx->pos = blknr_to_addr(i) + ofs;
-
- if (unlikely(err))
- break;
- ++i;
- ofs = 0;
- }
- return err < 0 ? err : 0;
-}
-
-const struct file_operations erofs_dir_fops = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate_shared = erofs_readdir,
-};
-
diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h
deleted file mode 100644
index 9f61abb7c1ca..000000000000
--- a/drivers/staging/erofs/erofs_fs.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
- *
- * linux/drivers/staging/erofs/erofs_fs.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is dual-licensed; you may select either the GNU General Public
- * License version 2 or Apache License, Version 2.0. See the file COPYING
- * in the main directory of the Linux distribution for more details.
- */
-#ifndef __EROFS_FS_H
-#define __EROFS_FS_H
-
-/* Enhanced(Extended) ROM File System */
-#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
-#define EROFS_SUPER_OFFSET 1024
-
-/*
- * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be
- * incompatible with this kernel version.
- */
-#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001
-#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING
-
-struct erofs_super_block {
-/* 0 */__le32 magic; /* in the little endian */
-/* 4 */__le32 checksum; /* crc32c(super_block) */
-/* 8 */__le32 features; /* (aka. feature_compat) */
-/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */
-/* 13 */__u8 reserved;
-
-/* 14 */__le16 root_nid;
-/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */
-
-/* 24 */__le64 build_time; /* inode v1 time derivation */
-/* 32 */__le32 build_time_nsec;
-/* 36 */__le32 blocks; /* used for statfs */
-/* 40 */__le32 meta_blkaddr;
-/* 44 */__le32 xattr_blkaddr;
-/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */
-/* 64 */__u8 volume_name[16]; /* volume name */
-/* 80 */__le32 requirements; /* (aka. feature_incompat) */
-
-/* 84 */__u8 reserved2[44];
-} __packed; /* 128 bytes */
-
-/*
- * erofs inode data mapping:
- * 0 - inode plain without inline data A:
- * inode, [xattrs], ... | ... | no-holed data
- * 1 - inode VLE compression B (legacy):
- * inode, [xattrs], extents ... | ...
- * 2 - inode plain with inline data C:
- * inode, [xattrs], last_inline_data, ... | ... | no-holed data
- * 3 - inode compression D:
- * inode, [xattrs], map_header, extents ... | ...
- * 4~7 - reserved
- */
-enum {
- EROFS_INODE_FLAT_PLAIN,
- EROFS_INODE_FLAT_COMPRESSION_LEGACY,
- EROFS_INODE_FLAT_INLINE,
- EROFS_INODE_FLAT_COMPRESSION,
- EROFS_INODE_LAYOUT_MAX
-};
-
-static bool erofs_inode_is_data_compressed(unsigned int datamode)
-{
- if (datamode == EROFS_INODE_FLAT_COMPRESSION)
- return true;
- return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
-}
-
-/* bit definitions of inode i_advise */
-#define EROFS_I_VERSION_BITS 1
-#define EROFS_I_DATA_MAPPING_BITS 3
-
-#define EROFS_I_VERSION_BIT 0
-#define EROFS_I_DATA_MAPPING_BIT 1
-
-struct erofs_inode_v1 {
-/* 0 */__le16 i_advise;
-
-/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
-/* 2 */__le16 i_xattr_icount;
-/* 4 */__le16 i_mode;
-/* 6 */__le16 i_nlink;
-/* 8 */__le32 i_size;
-/* 12 */__le32 i_reserved;
-/* 16 */union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
- } i_u __packed;
-/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */
-/* 24 */__le16 i_uid;
-/* 26 */__le16 i_gid;
-/* 28 */__le32 i_checksum;
-} __packed;
-
-/* 32 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_V1 0
-/* 64 bytes on-disk inode */
-#define EROFS_INODE_LAYOUT_V2 1
-
-struct erofs_inode_v2 {
- __le16 i_advise;
-
- /* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
- __le16 i_xattr_icount;
- __le16 i_mode;
- __le16 i_reserved; /* 8 bytes */
- __le64 i_size; /* 16 bytes */
- union {
- /* file total compressed blocks for data mapping 1 */
- __le32 compressed_blocks;
- __le32 raw_blkaddr;
-
- /* for device files, used to indicate old/new device # */
- __le32 rdev;
- } i_u __packed;
-
- /* only used for 32-bit stat compatibility */
- __le32 i_ino; /* 24 bytes */
-
- __le32 i_uid;
- __le32 i_gid;
- __le64 i_ctime; /* 32 bytes */
- __le32 i_ctime_nsec;
- __le32 i_nlink;
- __u8 i_reserved2[12];
- __le32 i_checksum; /* 64 bytes */
-} __packed;
-
-#define EROFS_MAX_SHARED_XATTRS (128)
-/* h_shared_count between 129 ... 255 are special # */
-#define EROFS_SHARED_XATTR_EXTENT (255)
-
-/*
- * inline xattrs (n == i_xattr_icount):
- * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
- * 12 bytes / \
- * / \
- * /-----------------------\
- * | erofs_xattr_entries+ |
- * +-----------------------+
- * inline xattrs must starts in erofs_xattr_ibody_header,
- * for read-only fs, no need to introduce h_refcount
- */
-struct erofs_xattr_ibody_header {
- __le32 h_checksum;
- __u8 h_shared_count;
- __u8 h_reserved[7];
- __le32 h_shared_xattrs[0]; /* shared xattr id array */
-} __packed;
-
-/* Name indexes */
-#define EROFS_XATTR_INDEX_USER 1
-#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2
-#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
-#define EROFS_XATTR_INDEX_TRUSTED 4
-#define EROFS_XATTR_INDEX_LUSTRE 5
-#define EROFS_XATTR_INDEX_SECURITY 6
-
-/* xattr entry (for both inline & shared xattrs) */
-struct erofs_xattr_entry {
- __u8 e_name_len; /* length of name */
- __u8 e_name_index; /* attribute name index */
- __le16 e_value_size; /* size of attribute value */
- /* followed by e_name and e_value */
- char e_name[0]; /* attribute name */
-} __packed;
-
-#define ondisk_xattr_ibody_size(count) ({\
- u32 __count = le16_to_cpu(count); \
- ((__count) == 0) ? 0 : \
- sizeof(struct erofs_xattr_ibody_header) + \
- sizeof(__u32) * ((__count) - 1); })
-
-#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry))
-#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \
- sizeof(struct erofs_xattr_entry) + \
- (entry)->e_name_len + le16_to_cpu((entry)->e_value_size))
-
-/* available compression algorithm types */
-enum {
- Z_EROFS_COMPRESSION_LZ4,
- Z_EROFS_COMPRESSION_MAX
-};
-
-/*
- * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
- * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
- * (4B) + 2B + (4B) if compacted 2B is on.
- */
-#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0
-
-#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT)
-
-struct z_erofs_map_header {
- __le32 h_reserved1;
- __le16 h_advise;
- /*
- * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
- * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
- */
- __u8 h_algorithmtype;
- /*
- * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
- * bit 3-4 : (physical - logical) cluster bits of head 1:
- * For example, if logical clustersize = 4096, 1 for 8192.
- * bit 5-7 : (physical - logical) cluster bits of head 2.
- */
- __u8 h_clusterbits;
-};
-
-#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
-
-/*
- * Z_EROFS Variable-sized Logical Extent cluster type:
- * 0 - literal (uncompressed) cluster
- * 1 - compressed cluster (for the head logical cluster)
- * 2 - compressed cluster (for the other logical clusters)
- *
- * In detail,
- * 0 - literal (uncompressed) cluster,
- * di_advise = 0
- * di_clusterofs = the literal data offset of the cluster
- * di_blkaddr = the blkaddr of the literal cluster
- *
- * 1 - compressed cluster (for the head logical cluster)
- * di_advise = 1
- * di_clusterofs = the decompressed data offset of the cluster
- * di_blkaddr = the blkaddr of the compressed cluster
- *
- * 2 - compressed cluster (for the other logical clusters)
- * di_advise = 2
- * di_clusterofs =
- * the decompressed data offset in its own head cluster
- * di_u.delta[0] = distance to its corresponding head cluster
- * di_u.delta[1] = distance to its corresponding tail cluster
- * (di_advise could be 0, 1 or 2)
- */
-enum {
- Z_EROFS_VLE_CLUSTER_TYPE_PLAIN,
- Z_EROFS_VLE_CLUSTER_TYPE_HEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD,
- Z_EROFS_VLE_CLUSTER_TYPE_RESERVED,
- Z_EROFS_VLE_CLUSTER_TYPE_MAX
-};
-
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
-#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
-
-struct z_erofs_vle_decompressed_index {
- __le16 di_advise;
- /* where to decompress in the head cluster */
- __le16 di_clusterofs;
-
- union {
- /* for the head cluster */
- __le32 blkaddr;
- /*
- * for the rest clusters
- * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
- * [0] - pointing to the head cluster
- * [1] - pointing to the tail cluster
- */
- __le16 delta[2];
- } di_u __packed; /* 8 bytes */
-} __packed;
-
-#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
- (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
- sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
-
-/* dirent sorts in alphabet order, thus we can do binary search */
-struct erofs_dirent {
- __le64 nid; /* 0, node number */
- __le16 nameoff; /* 8, start offset of file name */
- __u8 file_type; /* 10, file type */
- __u8 reserved; /* 11, reserved */
-} __packed;
-
-/* file types used in inode_info->flags */
-enum {
- EROFS_FT_UNKNOWN,
- EROFS_FT_REG_FILE,
- EROFS_FT_DIR,
- EROFS_FT_CHRDEV,
- EROFS_FT_BLKDEV,
- EROFS_FT_FIFO,
- EROFS_FT_SOCK,
- EROFS_FT_SYMLINK,
- EROFS_FT_MAX
-};
-
-#define EROFS_NAME_LEN 255
-
-/* check the EROFS on-disk layout strictly at compile time */
-static inline void erofs_check_ondisk_layout_definitions(void)
-{
- BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
- BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32);
- BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64);
- BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
- BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4);
- BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
- BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
- BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
-
- BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
- Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
-}
-
-#endif
-
diff --git a/drivers/staging/erofs/include/linux/tagptr.h b/drivers/staging/erofs/include/linux/tagptr.h
deleted file mode 100644
index ccd106dbd48e..000000000000
--- a/drivers/staging/erofs/include/linux/tagptr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Tagged pointer implementation
- *
- * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
- */
-#ifndef _LINUX_TAGPTR_H
-#define _LINUX_TAGPTR_H
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/*
- * the name of tagged pointer types are tagptr{1, 2, 3...}_t
- * avoid directly using the internal structs __tagptr{1, 2, 3...}
- */
-#define __MAKE_TAGPTR(n) \
-typedef struct __tagptr##n { \
- uintptr_t v; \
-} tagptr##n##_t;
-
-__MAKE_TAGPTR(1)
-__MAKE_TAGPTR(2)
-__MAKE_TAGPTR(3)
-__MAKE_TAGPTR(4)
-
-#undef __MAKE_TAGPTR
-
-extern void __compiletime_error("bad tagptr tags")
- __bad_tagptr_tags(void);
-
-extern void __compiletime_error("bad tagptr type")
- __bad_tagptr_type(void);
-
-/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
-#define __tagptr_mask_1(ptr, n) \
- __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
- (1UL << (n)) - 1 :
-
-#define __tagptr_mask(ptr) (\
- __tagptr_mask_1(ptr, 1) ( \
- __tagptr_mask_1(ptr, 2) ( \
- __tagptr_mask_1(ptr, 3) ( \
- __tagptr_mask_1(ptr, 4) ( \
- __bad_tagptr_type(), 0)))))
-
-/* generate a tagged pointer from a raw value */
-#define tagptr_init(type, val) \
- ((typeof(type)){ .v = (uintptr_t)(val) })
-
-/*
- * directly cast a tagged pointer to the native pointer type, which
- * could be used for backward compatibility of existing code.
- */
-#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
-
-/* encode tagged pointers */
-#define tagptr_fold(type, ptr, _tags) ({ \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
- __bad_tagptr_tags(); \
-tagptr_init(type, (uintptr_t)(ptr) | tags); })
-
-/* decode tagged pointers */
-#define tagptr_unfold_ptr(tptr) \
- ((void *)((tptr).v & ~__tagptr_mask(tptr)))
-
-#define tagptr_unfold_tags(tptr) \
- ((tptr).v & __tagptr_mask(tptr))
-
-/* operations for the tagger pointer */
-#define tagptr_eq(_tptr1, _tptr2) ({ \
- typeof(_tptr1) tptr1 = (_tptr1); \
- typeof(_tptr2) tptr2 = (_tptr2); \
- (void)(&tptr1 == &tptr2); \
-(tptr1).v == (tptr2).v; })
-
-/* lock-free CAS operation */
-#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- typeof(_o) o = (_o); \
- typeof(_n) n = (_n); \
- (void)(&o == &n); \
- (void)(&o == ptptr); \
-tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
-
-/* wrap WRITE_ONCE if atomic update is needed */
-#define tagptr_replace_tags(_ptptr, tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
-*ptptr; })
-
-#define tagptr_set_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v |= tags; \
-*ptptr; })
-
-#define tagptr_clear_tags(_ptptr, _tags) ({ \
- typeof(_ptptr) ptptr = (_ptptr); \
- const typeof(_tags) tags = (_tags); \
- if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
- __bad_tagptr_tags(); \
- ptptr->v &= ~tags; \
-*ptptr; })
-
-#endif
-
diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h
deleted file mode 100644
index 660c92fc1803..000000000000
--- a/drivers/staging/erofs/include/trace/events/erofs.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM erofs
-
-#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EROFS_H
-
-#include <linux/tracepoint.h>
-
-#define show_dev(dev) MAJOR(dev), MINOR(dev)
-#define show_dev_nid(entry) show_dev(entry->dev), entry->nid
-
-#define show_file_type(type) \
- __print_symbolic(type, \
- { 0, "FILE" }, \
- { 1, "DIR" })
-
-#define show_map_flags(flags) __print_flags(flags, "|", \
- { EROFS_GET_BLOCKS_RAW, "RAW" })
-
-#define show_mflags(flags) __print_flags(flags, "", \
- { EROFS_MAP_MAPPED, "M" }, \
- { EROFS_MAP_META, "I" }, \
- { EROFS_MAP_ZIPPED, "Z" })
-
-TRACE_EVENT(erofs_lookup,
-
- TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags),
-
- TP_ARGS(dir, dentry, flags),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(const char *, name )
- __field(unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = dir->i_sb->s_dev;
- __entry->nid = EROFS_V(dir)->nid;
- __entry->name = dentry->d_name.name;
- __entry->flags = flags;
- ),
-
- TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
- show_dev_nid(__entry),
- __entry->name,
- __entry->flags)
-);
-
-TRACE_EVENT(erofs_fill_inode,
- TP_PROTO(struct inode *inode, int isdir),
- TP_ARGS(inode, isdir),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(erofs_blk_t, blkaddr )
- __field(unsigned int, ofs )
- __field(int, isdir )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->isdir = isdir;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
- show_dev_nid(__entry),
- __entry->blkaddr, __entry->ofs,
- __entry->isdir)
-);
-
-TRACE_EVENT(erofs_readpage,
-
- TP_PROTO(struct page *page, bool raw),
-
- TP_ARGS(page, raw),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(int, dir )
- __field(pgoff_t, index )
- __field(int, uptodate)
- __field(bool, raw )
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_V(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
- __entry->raw = raw;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d "
- "raw = %d",
- show_dev_nid(__entry),
- show_file_type(__entry->dir),
- (unsigned long)__entry->index,
- __entry->uptodate,
- __entry->raw)
-);
-
-TRACE_EVENT(erofs_readpages,
-
- TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage,
- bool raw),
-
- TP_ARGS(inode, page, nrpage, raw),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(erofs_nid_t, nid )
- __field(pgoff_t, start )
- __field(unsigned int, nrpage )
- __field(bool, raw )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->start = page->index;
- __entry->nrpage = nrpage;
- __entry->raw = raw;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d",
- show_dev_nid(__entry),
- (unsigned long)__entry->start,
- __entry->nrpage,
- __entry->raw)
-);
-
-DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- __field( erofs_off_t, la )
- __field( u64, llen )
- __field( unsigned int, flags )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->la = map->m_la;
- __entry->llen = map->m_llen;
- __entry->flags = flags;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s",
- show_dev_nid(__entry),
- __entry->la, __entry->llen,
- __entry->flags ? show_map_flags(__entry->flags) : "NULL")
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- __field( unsigned int, flags )
- __field( erofs_off_t, la )
- __field( erofs_off_t, pa )
- __field( u64, llen )
- __field( u64, plen )
- __field( unsigned int, mflags )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- __entry->flags = flags;
- __entry->la = map->m_la;
- __entry->pa = map->m_pa;
- __entry->llen = map->m_llen;
- __entry->plen = map->m_plen;
- __entry->mflags = map->m_flags;
- __entry->ret = ret;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu, flags %s "
- "la %llu pa %llu llen %llu plen %llu mflags %s ret %d",
- show_dev_nid(__entry),
- __entry->flags ? show_map_flags(__entry->flags) : "NULL",
- __entry->la, __entry->pa, __entry->llen, __entry->plen,
- show_mflags(__entry->mflags), __entry->ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-TRACE_EVENT(erofs_destroy_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_V(inode)->nid;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
-);
-
-#endif /* _TRACE_EROFS_H */
-
- /* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
deleted file mode 100644
index 4c3d8bf8d249..000000000000
--- a/drivers/staging/erofs/inode.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/inode.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include "xattr.h"
-
-#include <trace/events/erofs.h>
-
-/* no locking */
-static int read_inode(struct inode *inode, void *data)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
- struct erofs_inode_v1 *v1 = data;
- const unsigned int advise = le16_to_cpu(v1->i_advise);
- erofs_blk_t nblks = 0;
-
- vi->datamode = __inode_data_mapping(advise);
-
- if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) {
- errln("unsupported data mapping %u of nid %llu",
- vi->datamode, vi->nid);
- DBG_BUGON(1);
- return -EIO;
- }
-
- if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) {
- struct erofs_inode_v2 *v2 = data;
-
- vi->inode_isize = sizeof(struct erofs_inode_v2);
- vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount);
-
- inode->i_mode = le16_to_cpu(v2->i_mode);
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
- vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr);
- } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(v2->i_u.rdev));
- } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- inode->i_rdev = 0;
- } else {
- return -EIO;
- }
-
- i_uid_write(inode, le32_to_cpu(v2->i_uid));
- i_gid_write(inode, le32_to_cpu(v2->i_gid));
- set_nlink(inode, le32_to_cpu(v2->i_nlink));
-
- /* ns timestamp */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- le64_to_cpu(v2->i_ctime);
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- le32_to_cpu(v2->i_ctime_nsec);
-
- inode->i_size = le64_to_cpu(v2->i_size);
-
- /* total blocks for compressed files */
- if (is_inode_layout_compression(inode))
- nblks = le32_to_cpu(v2->i_u.compressed_blocks);
- } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) {
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
-
- vi->inode_isize = sizeof(struct erofs_inode_v1);
- vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount);
-
- inode->i_mode = le16_to_cpu(v1->i_mode);
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
- vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr);
- } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(v1->i_u.rdev));
- } else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- inode->i_rdev = 0;
- } else {
- return -EIO;
- }
-
- i_uid_write(inode, le16_to_cpu(v1->i_uid));
- i_gid_write(inode, le16_to_cpu(v1->i_gid));
- set_nlink(inode, le16_to_cpu(v1->i_nlink));
-
- /* use build time to derive all file time */
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
- sbi->build_time;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
- sbi->build_time_nsec;
-
- inode->i_size = le32_to_cpu(v1->i_size);
- if (is_inode_layout_compression(inode))
- nblks = le32_to_cpu(v1->i_u.compressed_blocks);
- } else {
- errln("unsupported on-disk inode version %u of nid %llu",
- __inode_version(advise), vi->nid);
- DBG_BUGON(1);
- return -EIO;
- }
-
- if (!nblks)
- /* measure inode.i_blocks as generic filesystems */
- inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
- else
- inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
- return 0;
-}
-
-/*
- * try_lock can be required since locking order is:
- * file data(fs_inode)
- * meta(bd_inode)
- * but the majority of the callers is "iget",
- * in that case we are pretty sure no deadlock since
- * no data operations exist. However I tend to
- * try_lock since it takes no much overhead and
- * will success immediately.
- */
-static int fill_inline_data(struct inode *inode, void *data,
- unsigned int m_pofs)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
- struct erofs_sb_info *sbi = EROFS_I_SB(inode);
-
- /* should be inode inline C */
- if (!is_inode_flat_inline(inode))
- return 0;
-
- /* fast symlink (following ext4) */
- if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) {
- char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL);
-
- if (unlikely(!lnk))
- return -ENOMEM;
-
- m_pofs += vi->inode_isize + vi->xattr_isize;
-
- /* inline symlink data shouldn't across page boundary as well */
- if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
- DBG_BUGON(1);
- kfree(lnk);
- return -EIO;
- }
-
- /* get in-page inline data */
- memcpy(lnk, data + m_pofs, inode->i_size);
- lnk[inode->i_size] = '\0';
-
- inode->i_link = lnk;
- set_inode_fast_symlink(inode);
- }
- return 0;
-}
-
-static int fill_inode(struct inode *inode, int isdir)
-{
- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
- struct erofs_vnode *vi = EROFS_V(inode);
- struct page *page;
- void *data;
- int err;
- erofs_blk_t blkaddr;
- unsigned int ofs;
-
- trace_erofs_fill_inode(inode, isdir);
-
- blkaddr = erofs_blknr(iloc(sbi, vi->nid));
- ofs = erofs_blkoff(iloc(sbi, vi->nid));
-
- debugln("%s, reading inode nid %llu at %u of blkaddr %u",
- __func__, vi->nid, ofs, blkaddr);
-
- page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir);
-
- if (IS_ERR(page)) {
- errln("failed to get inode (nid: %llu) page, err %ld",
- vi->nid, PTR_ERR(page));
- return PTR_ERR(page);
- }
-
- DBG_BUGON(!PageUptodate(page));
- data = page_address(page);
-
- err = read_inode(inode, data + ofs);
- if (!err) {
- /* setup the new inode */
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &erofs_generic_iops;
- inode->i_fop = &generic_ro_fops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &erofs_dir_iops;
- inode->i_fop = &erofs_dir_fops;
- } else if (S_ISLNK(inode->i_mode)) {
- /* by default, page_get_link is used for symlink */
- inode->i_op = &erofs_symlink_iops;
- inode_nohighmem(inode);
- } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
- S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- inode->i_op = &erofs_generic_iops;
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
- goto out_unlock;
- } else {
- err = -EIO;
- goto out_unlock;
- }
-
- if (is_inode_layout_compression(inode)) {
- err = z_erofs_fill_inode(inode);
- goto out_unlock;
- }
-
- inode->i_mapping->a_ops = &erofs_raw_access_aops;
-
- /* fill last page if inline data is available */
- err = fill_inline_data(inode, data, ofs);
- }
-
-out_unlock:
- unlock_page(page);
- put_page(page);
- return err;
-}
-
-/*
- * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
- * we should do more for 32-bit platform to find the right inode.
- */
-#if BITS_PER_LONG == 32
-static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
-{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
-
- return EROFS_V(inode)->nid == nid;
-}
-
-static int erofs_iget_set_actor(struct inode *inode, void *opaque)
-{
- const erofs_nid_t nid = *(erofs_nid_t *)opaque;
-
- inode->i_ino = erofs_inode_hash(nid);
- return 0;
-}
-#endif
-
-static inline struct inode *erofs_iget_locked(struct super_block *sb,
- erofs_nid_t nid)
-{
- const unsigned long hashval = erofs_inode_hash(nid);
-
-#if BITS_PER_LONG >= 64
- /* it is safe to use iget_locked for >= 64-bit platform */
- return iget_locked(sb, hashval);
-#else
- return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
- erofs_iget_set_actor, &nid);
-#endif
-}
-
-struct inode *erofs_iget(struct super_block *sb,
- erofs_nid_t nid,
- bool isdir)
-{
- struct inode *inode = erofs_iget_locked(sb, nid);
-
- if (unlikely(!inode))
- return ERR_PTR(-ENOMEM);
-
- if (inode->i_state & I_NEW) {
- int err;
- struct erofs_vnode *vi = EROFS_V(inode);
-
- vi->nid = nid;
-
- err = fill_inode(inode, isdir);
- if (likely(!err))
- unlock_new_inode(inode);
- else {
- iget_failed(inode);
- inode = ERR_PTR(err);
- }
- }
- return inode;
-}
-
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
-{
- struct inode *const inode = d_inode(path->dentry);
-
- if (is_inode_layout_compression(inode))
- stat->attributes |= STATX_ATTR_COMPRESSED;
-
- stat->attributes |= STATX_ATTR_IMMUTABLE;
- stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
- STATX_ATTR_IMMUTABLE);
-
- generic_fillattr(inode, stat);
- return 0;
-}
-
-const struct inode_operations erofs_generic_iops = {
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
-const struct inode_operations erofs_symlink_iops = {
- .get_link = page_get_link,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
-const struct inode_operations erofs_fast_symlink_iops = {
- .get_link = simple_get_link,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
deleted file mode 100644
index 963cc1b8b896..000000000000
--- a/drivers/staging/erofs/internal.h
+++ /dev/null
@@ -1,642 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * linux/drivers/staging/erofs/internal.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#ifndef __INTERNAL_H
-#define __INTERNAL_H
-
-#include <linux/fs.h>
-#include <linux/dcache.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bio.h>
-#include <linux/buffer_head.h>
-#include <linux/cleancache.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "erofs_fs.h"
-
-/* redefine pr_fmt "erofs: " */
-#undef pr_fmt
-#define pr_fmt(fmt) "erofs: " fmt
-
-#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
-#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
-#ifdef CONFIG_EROFS_FS_DEBUG
-#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
-
-#define dbg_might_sleep might_sleep
-#define DBG_BUGON BUG_ON
-#else
-#define debugln(x, ...) ((void)0)
-
-#define dbg_might_sleep() ((void)0)
-#define DBG_BUGON(x) ((void)(x))
-#endif
-
-enum {
- FAULT_KMALLOC,
- FAULT_READ_IO,
- FAULT_MAX,
-};
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-extern const char *erofs_fault_name[FAULT_MAX];
-#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
-
-struct erofs_fault_info {
- atomic_t inject_ops;
- unsigned int inject_rate;
- unsigned int inject_type;
-};
-#endif
-
-#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
-#define EROFS_FS_ZIP_CACHE_LVL (2)
-#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
-#define EROFS_FS_ZIP_CACHE_LVL (1)
-#else
-#define EROFS_FS_ZIP_CACHE_LVL (0)
-#endif
-
-#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
-#define EROFS_FS_HAS_MANAGED_CACHE
-#endif
-
-/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
-#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
-
-typedef u64 erofs_nid_t;
-
-struct erofs_sb_info {
- /* list for all registered superblocks, mainly for shrinker */
- struct list_head list;
- struct mutex umount_mutex;
-
- u32 blocks;
- u32 meta_blkaddr;
-#ifdef CONFIG_EROFS_FS_XATTR
- u32 xattr_blkaddr;
-#endif
-
- /* inode slot unit size in bit shift */
- unsigned char islotbits;
-#ifdef CONFIG_EROFS_FS_ZIP
- /* cluster size in bit shift */
- unsigned char clusterbits;
-
- /* the dedicated workstation for compression */
- struct radix_tree_root workstn_tree;
-
- /* threshold for decompression synchronously */
- unsigned int max_sync_decompress_pages;
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct inode *managed_cache;
-#endif
-
-#endif
-
- u32 build_time_nsec;
- u64 build_time;
-
- /* what we really care is nid, rather than ino.. */
- erofs_nid_t root_nid;
- /* used for statfs, f_files - f_favail */
- u64 inos;
-
- u8 uuid[16]; /* 128-bit uuid for volume */
- u8 volume_name[16]; /* volume name */
- u32 requirements;
-
- char *dev_name;
-
- unsigned int mount_opt;
- unsigned int shrinker_run_no;
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
- struct erofs_fault_info fault_info; /* For fault injection */
-#endif
-};
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-#define erofs_show_injection_info(type) \
- infoln("inject %s in %s of %pS", erofs_fault_name[type], \
- __func__, __builtin_return_address(0))
-
-static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
-{
- struct erofs_fault_info *ffi = &sbi->fault_info;
-
- if (!ffi->inject_rate)
- return false;
-
- if (!IS_FAULT_SET(ffi, type))
- return false;
-
- atomic_inc(&ffi->inject_ops);
- if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
- atomic_set(&ffi->inject_ops, 0);
- return true;
- }
- return false;
-}
-#else
-static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
-{
- return false;
-}
-
-static inline void erofs_show_injection_info(int type)
-{
-}
-#endif
-
-static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
- size_t size, gfp_t flags)
-{
- if (time_to_inject(sbi, FAULT_KMALLOC)) {
- erofs_show_injection_info(FAULT_KMALLOC);
- return NULL;
- }
- return kmalloc(size, flags);
-}
-
-#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
-#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
-
-/* Mount flags set via mount options or defaults */
-#define EROFS_MOUNT_XATTR_USER 0x00000010
-#define EROFS_MOUNT_POSIX_ACL 0x00000020
-#define EROFS_MOUNT_FAULT_INJECTION 0x00000040
-
-#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
-#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
-#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
-
-#ifdef CONFIG_EROFS_FS_ZIP
-#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
-#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
-
-/* basic unit of the workstation of a super_block */
-struct erofs_workgroup {
- /* the workgroup index in the workstation */
- pgoff_t index;
-
- /* overall workgroup reference count */
- atomic_t refcount;
-};
-
-#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-
-#if defined(CONFIG_SMP)
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- /*
- * other observers should notice all modifications
- * in the freezing period.
- */
- smp_mb();
- atomic_set(&grp->refcount, orig_val);
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- return atomic_cond_read_relaxed(&grp->refcount,
- VAL != EROFS_LOCKED_MAGIC);
-}
-#else
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- /* no need to spin on UP platforms, let's just disable preemption. */
- if (val != atomic_read(&grp->refcount)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- int v = atomic_read(&grp->refcount);
-
- /* workgroup is never freezed on uniprocessor systems */
- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
- return v;
-}
-#endif
-
-int erofs_workgroup_put(struct erofs_workgroup *grp);
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag);
-int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp, bool tag);
-unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink, bool cleanup);
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp);
-int erofs_try_to_free_cached_page(struct address_space *mapping,
- struct page *page);
-
-#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page)
-{
- return page->mapping == MNGD_MAPPING(sbi);
-}
-#else
-#define MNGD_MAPPING(sbi) (NULL)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page) { return false; }
-#endif
-
-#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
-
-static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
- unsigned int nr)
-{
- return nr <= sbi->max_sync_decompress_pages;
-}
-
-int __init z_erofs_init_zip_subsystem(void);
-void z_erofs_exit_zip_subsystem(void);
-#else
-/* dummy initializer/finalizer for the decompression subsystem */
-static inline int z_erofs_init_zip_subsystem(void) { return 0; }
-static inline void z_erofs_exit_zip_subsystem(void) {}
-#endif
-
-/* we strictly follow PAGE_SIZE and no buffer head yet */
-#define LOG_BLOCK_SIZE PAGE_SHIFT
-
-#undef LOG_SECTORS_PER_BLOCK
-#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
-
-#undef SECTORS_PER_BLOCK
-#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
-
-#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
-
-#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
-#error erofs cannot be used in this platform
-#endif
-
-#define ROOT_NID(sb) ((sb)->root_nid)
-
-#ifdef CONFIG_EROFS_FS_ZIP
-/* hard limit of pages per compressed cluster */
-#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
-
-/* page count of a compressed cluster */
-#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
-
-#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
-#else
-#define EROFS_PCPUBUF_NR_PAGES 0
-#endif
-
-typedef u64 erofs_off_t;
-
-/* data type for filesystem-wide blocks number */
-typedef u32 erofs_blk_t;
-
-#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
-#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
-#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
-
-static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
-{
- return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
-}
-
-/* atomic flag definitions */
-#define EROFS_V_EA_INITED_BIT 0
-#define EROFS_V_Z_INITED_BIT 1
-
-/* bitlock definitions (arranged in reverse order) */
-#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
-#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2)
-
-struct erofs_vnode {
- erofs_nid_t nid;
-
- /* atomic flags (including bitlocks) */
- unsigned long flags;
-
- unsigned char datamode;
- unsigned char inode_isize;
- unsigned short xattr_isize;
-
- unsigned xattr_shared_count;
- unsigned *xattr_shared_xattrs;
-
- union {
- erofs_blk_t raw_blkaddr;
-#ifdef CONFIG_EROFS_FS_ZIP
- struct {
- unsigned short z_advise;
- unsigned char z_algorithmtype[2];
- unsigned char z_logical_clusterbits;
- unsigned char z_physical_clusterbits[2];
- };
-#endif
- };
- /* the corresponding vfs inode */
- struct inode vfs_inode;
-};
-
-#define EROFS_V(ptr) \
- container_of(ptr, struct erofs_vnode, vfs_inode)
-
-#define __inode_advise(x, bit, bits) \
- (((x) >> (bit)) & ((1 << (bits)) - 1))
-
-#define __inode_version(advise) \
- __inode_advise(advise, EROFS_I_VERSION_BIT, \
- EROFS_I_VERSION_BITS)
-
-#define __inode_data_mapping(advise) \
- __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
- EROFS_I_DATA_MAPPING_BITS)
-
-static inline unsigned long inode_datablocks(struct inode *inode)
-{
- /* since i_size cannot be changed */
- return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
-}
-
-static inline bool is_inode_layout_compression(struct inode *inode)
-{
- return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode);
-}
-
-static inline bool is_inode_flat_inline(struct inode *inode)
-{
- return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE;
-}
-
-extern const struct super_operations erofs_sops;
-
-extern const struct address_space_operations erofs_raw_access_aops;
-#ifdef CONFIG_EROFS_FS_ZIP
-extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
-#endif
-
-/*
- * Logical to physical block mapping, used by erofs_map_blocks()
- *
- * Different with other file systems, it is used for 2 access modes:
- *
- * 1) RAW access mode:
- *
- * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
- * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
- *
- * Note that m_lblk in the RAW access mode refers to the number of
- * the compressed ondisk block rather than the uncompressed
- * in-memory block for the compressed file.
- *
- * m_pofs equals to m_lofs except for the inline data page.
- *
- * 2) Normal access mode:
- *
- * If the inode is not compressed, it has no difference with
- * the RAW access mode. However, if the inode is compressed,
- * users should pass a valid (m_lblk, m_lofs) pair, and get
- * the needed m_pblk, m_pofs, m_len to get the compressed data
- * and the updated m_lblk, m_lofs which indicates the start
- * of the corresponding uncompressed data in the file.
- */
-enum {
- BH_Zipped = BH_PrivateStart,
- BH_FullMapped,
-};
-
-/* Has a disk mapping */
-#define EROFS_MAP_MAPPED (1 << BH_Mapped)
-/* Located in metadata (could be copied from bd_inode) */
-#define EROFS_MAP_META (1 << BH_Meta)
-/* The extent has been compressed */
-#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
-/* The length of extent is full */
-#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
-
-struct erofs_map_blocks {
- erofs_off_t m_pa, m_la;
- u64 m_plen, m_llen;
-
- unsigned int m_flags;
-
- struct page *mpage;
-};
-
-/* Flags used by erofs_map_blocks() */
-#define EROFS_GET_BLOCKS_RAW 0x0001
-
-/* zmap.c */
-#ifdef CONFIG_EROFS_FS_ZIP
-int z_erofs_fill_inode(struct inode *inode);
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags);
-#else
-static inline int z_erofs_fill_inode(struct inode *inode) { return -ENOTSUPP; }
-static inline int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- return -ENOTSUPP;
-}
-#endif
-
-/* data.c */
-static inline struct bio *
-erofs_grab_bio(struct super_block *sb,
- erofs_blk_t blkaddr, unsigned int nr_pages, void *bi_private,
- bio_end_io_t endio, bool nofail)
-{
- const gfp_t gfp = GFP_NOIO;
- struct bio *bio;
-
- do {
- if (nr_pages == 1) {
- bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
- if (unlikely(!bio)) {
- DBG_BUGON(nofail);
- return ERR_PTR(-ENOMEM);
- }
- break;
- }
- bio = bio_alloc(gfp, nr_pages);
- nr_pages /= 2;
- } while (unlikely(!bio));
-
- bio->bi_end_io = endio;
- bio_set_dev(bio, sb->s_bdev);
- bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
- bio->bi_private = bi_private;
- return bio;
-}
-
-static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
-{
- bio_set_op_attrs(bio, op, op_flags);
- submit_bio(bio);
-}
-
-#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
-#define EROFS_IO_MAX_RETRIES_NOFAIL 0
-#else
-#define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
-#endif
-
-struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr,
- bool prio, bool nofail);
-
-static inline struct page *erofs_get_meta_page(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio)
-{
- return __erofs_get_meta_page(sb, blkaddr, prio, false);
-}
-
-static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
- erofs_blk_t blkaddr, bool prio)
-{
- return __erofs_get_meta_page(sb, blkaddr, prio, true);
-}
-
-int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
-
-static inline struct page *
-erofs_get_inline_page(struct inode *inode,
- erofs_blk_t blkaddr)
-{
- return erofs_get_meta_page(inode->i_sb,
- blkaddr, S_ISDIR(inode->i_mode));
-}
-
-/* inode.c */
-static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
-{
-#if BITS_PER_LONG == 32
- return (nid >> 32) ^ (nid & 0xffffffff);
-#else
- return nid;
-#endif
-}
-
-extern const struct inode_operations erofs_generic_iops;
-extern const struct inode_operations erofs_symlink_iops;
-extern const struct inode_operations erofs_fast_symlink_iops;
-
-static inline void set_inode_fast_symlink(struct inode *inode)
-{
- inode->i_op = &erofs_fast_symlink_iops;
-}
-
-static inline bool is_inode_fast_symlink(struct inode *inode)
-{
- return inode->i_op == &erofs_fast_symlink_iops;
-}
-
-struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags);
-
-/* namei.c */
-extern const struct inode_operations erofs_dir_iops;
-
-int erofs_namei(struct inode *dir, struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type);
-
-/* dir.c */
-extern const struct file_operations erofs_dir_fops;
-
-static inline void *erofs_vmap(struct page **pages, unsigned int count)
-{
-#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
- int i = 0;
-
- while (1) {
- void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
- /* retry two more times (totally 3 times) */
- if (addr || ++i >= 3)
- return addr;
- vm_unmap_aliases();
- }
- return NULL;
-#else
- return vmap(pages, count, VM_MAP, PAGE_KERNEL);
-#endif
-}
-
-static inline void erofs_vunmap(const void *mem, unsigned int count)
-{
-#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
- vm_unmap_ram(mem, count);
-#else
- vunmap(mem);
-#endif
-}
-
-/* utils.c */
-extern struct shrinker erofs_shrinker_info;
-
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
-
-#if (EROFS_PCPUBUF_NR_PAGES > 0)
-void *erofs_get_pcpubuf(unsigned int pagenr);
-#define erofs_put_pcpubuf(buf) do { \
- (void)&(buf); \
- preempt_enable(); \
-} while (0)
-#else
-static inline void *erofs_get_pcpubuf(unsigned int pagenr)
-{
- return ERR_PTR(-ENOTSUPP);
-}
-
-#define erofs_put_pcpubuf(buf) do {} while (0)
-#endif
-
-void erofs_register_super(struct super_block *sb);
-void erofs_unregister_super(struct super_block *sb);
-
-#ifndef lru_to_page
-#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
-#endif
-
-#endif
-
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
deleted file mode 100644
index fd3ae78d0ba5..000000000000
--- a/drivers/staging/erofs/namei.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/namei.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include "internal.h"
-#include "xattr.h"
-
-#include <trace/events/erofs.h>
-
-struct erofs_qstr {
- const unsigned char *name;
- const unsigned char *end;
-};
-
-/* based on the end of qn is accurate and it must have the trailing '\0' */
-static inline int dirnamecmp(const struct erofs_qstr *qn,
- const struct erofs_qstr *qd,
- unsigned int *matched)
-{
- unsigned int i = *matched;
-
- /*
- * on-disk error, let's only BUG_ON in the debugging mode.
- * otherwise, it will return 1 to just skip the invalid name
- * and go on (in consideration of the lookup performance).
- */
- DBG_BUGON(qd->name > qd->end);
-
- /* qd could not have trailing '\0' */
- /* However it is absolutely safe if < qd->end */
- while (qd->name + i < qd->end && qd->name[i] != '\0') {
- if (qn->name[i] != qd->name[i]) {
- *matched = i;
- return qn->name[i] > qd->name[i] ? 1 : -1;
- }
- ++i;
- }
- *matched = i;
- /* See comments in __d_alloc on the terminating NUL character */
- return qn->name[i] == '\0' ? 0 : 1;
-}
-
-#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
-
-static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
- u8 *data,
- unsigned int dirblksize,
- const int ndirents)
-{
- int head, back;
- unsigned int startprfx, endprfx;
- struct erofs_dirent *const de = (struct erofs_dirent *)data;
-
- /* since the 1st dirent has been evaluated previously */
- head = 1;
- back = ndirents - 1;
- startprfx = endprfx = 0;
-
- while (head <= back) {
- const int mid = head + (back - head) / 2;
- const int nameoff = nameoff_from_disk(de[mid].nameoff,
- dirblksize);
- unsigned int matched = min(startprfx, endprfx);
- struct erofs_qstr dname = {
- .name = data + nameoff,
- .end = unlikely(mid >= ndirents - 1) ?
- data + dirblksize :
- data + nameoff_from_disk(de[mid + 1].nameoff,
- dirblksize)
- };
-
- /* string comparison without already matched prefix */
- int ret = dirnamecmp(name, &dname, &matched);
-
- if (unlikely(!ret)) {
- return de + mid;
- } else if (ret > 0) {
- head = mid + 1;
- startprfx = matched;
- } else {
- back = mid - 1;
- endprfx = matched;
- }
- }
-
- return ERR_PTR(-ENOENT);
-}
-
-static struct page *find_target_block_classic(struct inode *dir,
- struct erofs_qstr *name,
- int *_ndirents)
-{
- unsigned int startprfx, endprfx;
- int head, back;
- struct address_space *const mapping = dir->i_mapping;
- struct page *candidate = ERR_PTR(-ENOENT);
-
- startprfx = endprfx = 0;
- head = 0;
- back = inode_datablocks(dir) - 1;
-
- while (head <= back) {
- const int mid = head + (back - head) / 2;
- struct page *page = read_mapping_page(mapping, mid, NULL);
-
- if (!IS_ERR(page)) {
- struct erofs_dirent *de = kmap_atomic(page);
- const int nameoff = nameoff_from_disk(de->nameoff,
- EROFS_BLKSIZ);
- const int ndirents = nameoff / sizeof(*de);
- int diff;
- unsigned int matched;
- struct erofs_qstr dname;
-
- if (unlikely(!ndirents)) {
- DBG_BUGON(1);
- kunmap_atomic(de);
- put_page(page);
- page = ERR_PTR(-EIO);
- goto out;
- }
-
- matched = min(startprfx, endprfx);
-
- dname.name = (u8 *)de + nameoff;
- if (ndirents == 1)
- dname.end = (u8 *)de + EROFS_BLKSIZ;
- else
- dname.end = (u8 *)de +
- nameoff_from_disk(de[1].nameoff,
- EROFS_BLKSIZ);
-
- /* string comparison without already matched prefix */
- diff = dirnamecmp(name, &dname, &matched);
- kunmap_atomic(de);
-
- if (unlikely(!diff)) {
- *_ndirents = 0;
- goto out;
- } else if (diff > 0) {
- head = mid + 1;
- startprfx = matched;
-
- if (!IS_ERR(candidate))
- put_page(candidate);
- candidate = page;
- *_ndirents = ndirents;
- } else {
- put_page(page);
-
- back = mid - 1;
- endprfx = matched;
- }
- continue;
- }
-out: /* free if the candidate is valid */
- if (!IS_ERR(candidate))
- put_page(candidate);
- return page;
- }
- return candidate;
-}
-
-int erofs_namei(struct inode *dir,
- struct qstr *name,
- erofs_nid_t *nid, unsigned int *d_type)
-{
- int ndirents;
- struct page *page;
- void *data;
- struct erofs_dirent *de;
- struct erofs_qstr qn;
-
- if (unlikely(!dir->i_size))
- return -ENOENT;
-
- qn.name = name->name;
- qn.end = name->name + name->len;
-
- ndirents = 0;
- page = find_target_block_classic(dir, &qn, &ndirents);
-
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- data = kmap_atomic(page);
- /* the target page has been mapped */
- if (ndirents)
- de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
- else
- de = (struct erofs_dirent *)data;
-
- if (!IS_ERR(de)) {
- *nid = le64_to_cpu(de->nid);
- *d_type = de->file_type;
- }
-
- kunmap_atomic(data);
- put_page(page);
-
- return PTR_ERR_OR_ZERO(de);
-}
-
-/* NOTE: i_mutex is already held by vfs */
-static struct dentry *erofs_lookup(struct inode *dir,
- struct dentry *dentry,
- unsigned int flags)
-{
- int err;
- erofs_nid_t nid;
- unsigned int d_type;
- struct inode *inode;
-
- DBG_BUGON(!d_really_is_negative(dentry));
- /* dentry must be unhashed in lookup, no need to worry about */
- DBG_BUGON(!d_unhashed(dentry));
-
- trace_erofs_lookup(dir, dentry, flags);
-
- /* file name exceeds fs limit */
- if (unlikely(dentry->d_name.len > EROFS_NAME_LEN))
- return ERR_PTR(-ENAMETOOLONG);
-
- /* false uninitialized warnings on gcc 4.8.x */
- err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
-
- if (err == -ENOENT) {
- /* negative dentry */
- inode = NULL;
- } else if (unlikely(err)) {
- inode = ERR_PTR(err);
- } else {
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- }
- return d_splice_alias(inode, dentry);
-}
-
-const struct inode_operations erofs_dir_iops = {
- .lookup = erofs_lookup,
- .getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
- .listxattr = erofs_listxattr,
-#endif
- .get_acl = erofs_get_acl,
-};
-
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
deleted file mode 100644
index 54494412eba4..000000000000
--- a/drivers/staging/erofs/super.c
+++ /dev/null
@@ -1,701 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/super.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/statfs.h>
-#include <linux/parser.h>
-#include <linux/seq_file.h>
-#include "internal.h"
-#include "xattr.h"
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/erofs.h>
-
-static struct kmem_cache *erofs_inode_cachep __read_mostly;
-
-static void init_once(void *ptr)
-{
- struct erofs_vnode *vi = ptr;
-
- inode_init_once(&vi->vfs_inode);
-}
-
-static int __init erofs_init_inode_cache(void)
-{
- erofs_inode_cachep = kmem_cache_create("erofs_inode",
- sizeof(struct erofs_vnode), 0,
- SLAB_RECLAIM_ACCOUNT,
- init_once);
-
- return erofs_inode_cachep ? 0 : -ENOMEM;
-}
-
-static void erofs_exit_inode_cache(void)
-{
- kmem_cache_destroy(erofs_inode_cachep);
-}
-
-static struct inode *alloc_inode(struct super_block *sb)
-{
- struct erofs_vnode *vi =
- kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
-
- if (!vi)
- return NULL;
-
- /* zero out everything except vfs_inode */
- memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
- return &vi->vfs_inode;
-}
-
-static void free_inode(struct inode *inode)
-{
- struct erofs_vnode *vi = EROFS_V(inode);
-
- /* be careful RCU symlink path (see ext4_inode_info->i_data)! */
- if (is_inode_fast_symlink(inode))
- kfree(inode->i_link);
-
- kfree(vi->xattr_shared_xattrs);
-
- kmem_cache_free(erofs_inode_cachep, vi);
-}
-
-static bool check_layout_compatibility(struct super_block *sb,
- struct erofs_super_block *layout)
-{
- const unsigned int requirements = le32_to_cpu(layout->requirements);
-
- EROFS_SB(sb)->requirements = requirements;
-
- /* check if current kernel meets all mandatory requirements */
- if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
- errln("unidentified requirements %x, please upgrade kernel version",
- requirements & ~EROFS_ALL_REQUIREMENTS);
- return false;
- }
- return true;
-}
-
-static int superblock_read(struct super_block *sb)
-{
- struct erofs_sb_info *sbi;
- struct buffer_head *bh;
- struct erofs_super_block *layout;
- unsigned int blkszbits;
- int ret;
-
- bh = sb_bread(sb, 0);
-
- if (!bh) {
- errln("cannot read erofs superblock");
- return -EIO;
- }
-
- sbi = EROFS_SB(sb);
- layout = (struct erofs_super_block *)((u8 *)bh->b_data
- + EROFS_SUPER_OFFSET);
-
- ret = -EINVAL;
- if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
- errln("cannot find valid erofs superblock");
- goto out;
- }
-
- blkszbits = layout->blkszbits;
- /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
- if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
- errln("blksize %u isn't supported on this platform",
- 1 << blkszbits);
- goto out;
- }
-
- if (!check_layout_compatibility(sb, layout))
- goto out;
-
- sbi->blocks = le32_to_cpu(layout->blocks);
- sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
-#ifdef CONFIG_EROFS_FS_XATTR
- sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
-#endif
- sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
-#ifdef CONFIG_EROFS_FS_ZIP
- /* TODO: clusterbits should be related to inode */
- sbi->clusterbits = blkszbits;
-
- if (1 << (sbi->clusterbits - PAGE_SHIFT) > Z_EROFS_CLUSTER_MAX_PAGES)
- errln("clusterbits %u is not supported on this kernel",
- sbi->clusterbits);
-#endif
-
- sbi->root_nid = le16_to_cpu(layout->root_nid);
- sbi->inos = le64_to_cpu(layout->inos);
-
- sbi->build_time = le64_to_cpu(layout->build_time);
- sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
-
- memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
- memcpy(sbi->volume_name, layout->volume_name,
- sizeof(layout->volume_name));
-
- ret = 0;
-out:
- brelse(bh);
- return ret;
-}
-
-#ifdef CONFIG_EROFS_FAULT_INJECTION
-const char *erofs_fault_name[FAULT_MAX] = {
- [FAULT_KMALLOC] = "kmalloc",
- [FAULT_READ_IO] = "read IO error",
-};
-
-static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
- unsigned int rate)
-{
- struct erofs_fault_info *ffi = &sbi->fault_info;
-
- if (rate) {
- atomic_set(&ffi->inject_ops, 0);
- ffi->inject_rate = rate;
- ffi->inject_type = (1 << FAULT_MAX) - 1;
- } else {
- memset(ffi, 0, sizeof(struct erofs_fault_info));
- }
-
- set_opt(sbi, FAULT_INJECTION);
-}
-
-static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- int rate = 0;
-
- if (args->from && match_int(args, &rate))
- return -EINVAL;
-
- __erofs_build_fault_attr(sbi, rate);
- return 0;
-}
-
-static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
-{
- return sbi->fault_info.inject_rate;
-}
-#else
-static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
- unsigned int rate)
-{
-}
-
-static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
- substring_t *args)
-{
- infoln("fault_injection options not supported");
- return 0;
-}
-
-static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
-{
- return 0;
-}
-#endif
-
-static void default_options(struct erofs_sb_info *sbi)
-{
- /* set up some FS parameters */
-#ifdef CONFIG_EROFS_FS_ZIP
- sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
-#endif
-
-#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(sbi, XATTR_USER);
-#endif
-
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(sbi, POSIX_ACL);
-#endif
-}
-
-enum {
- Opt_user_xattr,
- Opt_nouser_xattr,
- Opt_acl,
- Opt_noacl,
- Opt_fault_injection,
- Opt_err
-};
-
-static match_table_t erofs_tokens = {
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_fault_injection, "fault_injection=%u"},
- {Opt_err, NULL}
-};
-
-static int parse_options(struct super_block *sb, char *options)
-{
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int err;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ","))) {
- int token;
-
- if (!*p)
- continue;
-
- args[0].to = args[0].from = NULL;
- token = match_token(p, erofs_tokens, args);
-
- switch (token) {
-#ifdef CONFIG_EROFS_FS_XATTR
- case Opt_user_xattr:
- set_opt(EROFS_SB(sb), XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt(EROFS_SB(sb), XATTR_USER);
- break;
-#else
- case Opt_user_xattr:
- infoln("user_xattr options not supported");
- break;
- case Opt_nouser_xattr:
- infoln("nouser_xattr options not supported");
- break;
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- case Opt_acl:
- set_opt(EROFS_SB(sb), POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(EROFS_SB(sb), POSIX_ACL);
- break;
-#else
- case Opt_acl:
- infoln("acl options not supported");
- break;
- case Opt_noacl:
- infoln("noacl options not supported");
- break;
-#endif
- case Opt_fault_injection:
- err = erofs_build_fault_attr(EROFS_SB(sb), args);
- if (err)
- return err;
- break;
-
- default:
- errln("Unrecognized mount option \"%s\" "
- "or missing value", p);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-
-static const struct address_space_operations managed_cache_aops;
-
-static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
-{
- int ret = 1; /* 0 - busy */
- struct address_space *const mapping = page->mapping;
-
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(mapping->a_ops != &managed_cache_aops);
-
- if (PagePrivate(page))
- ret = erofs_try_to_free_cached_page(mapping, page);
-
- return ret;
-}
-
-static void managed_cache_invalidatepage(struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- const unsigned int stop = length + offset;
-
- DBG_BUGON(!PageLocked(page));
-
- /* Check for potential overflow in debug mode */
- DBG_BUGON(stop > PAGE_SIZE || stop < length);
-
- if (offset == 0 && stop == PAGE_SIZE)
- while (!managed_cache_releasepage(page, GFP_NOFS))
- cond_resched();
-}
-
-static const struct address_space_operations managed_cache_aops = {
- .releasepage = managed_cache_releasepage,
- .invalidatepage = managed_cache_invalidatepage,
-};
-
-static struct inode *erofs_init_managed_cache(struct super_block *sb)
-{
- struct inode *inode = new_inode(sb);
-
- if (unlikely(!inode))
- return ERR_PTR(-ENOMEM);
-
- set_nlink(inode, 1);
- inode->i_size = OFFSET_MAX;
-
- inode->i_mapping->a_ops = &managed_cache_aops;
- mapping_set_gfp_mask(inode->i_mapping,
- GFP_NOFS | __GFP_HIGHMEM |
- __GFP_MOVABLE | __GFP_NOFAIL);
- return inode;
-}
-
-#endif
-
-static int erofs_read_super(struct super_block *sb,
- const char *dev_name,
- void *data, int silent)
-{
- struct inode *inode;
- struct erofs_sb_info *sbi;
- int err = -EINVAL;
-
- infoln("read_super, device -> %s", dev_name);
- infoln("options -> %s", (char *)data);
-
- if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
- errln("failed to set erofs blksize");
- goto err;
- }
-
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (unlikely(!sbi)) {
- err = -ENOMEM;
- goto err;
- }
- sb->s_fs_info = sbi;
-
- err = superblock_read(sb);
- if (err)
- goto err_sbread;
-
- sb->s_magic = EROFS_SUPER_MAGIC;
- sb->s_flags |= SB_RDONLY | SB_NOATIME;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_time_gran = 1;
-
- sb->s_op = &erofs_sops;
-
-#ifdef CONFIG_EROFS_FS_XATTR
- sb->s_xattr = erofs_xattr_handlers;
-#endif
-
- /* set erofs default mount options */
- default_options(sbi);
-
- err = parse_options(sb, data);
- if (err)
- goto err_parseopt;
-
- if (!silent)
- infoln("root inode @ nid %llu", ROOT_NID(sbi));
-
- if (test_opt(sbi, POSIX_ACL))
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
-
-#ifdef CONFIG_EROFS_FS_ZIP
- INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
-#endif
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- sbi->managed_cache = erofs_init_managed_cache(sb);
- if (IS_ERR(sbi->managed_cache)) {
- err = PTR_ERR(sbi->managed_cache);
- goto err_init_managed_cache;
- }
-#endif
-
- /* get the root inode */
- inode = erofs_iget(sb, ROOT_NID(sbi), true);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- goto err_iget;
- }
-
- if (!S_ISDIR(inode->i_mode)) {
- errln("rootino(nid %llu) is not a directory(i_mode %o)",
- ROOT_NID(sbi), inode->i_mode);
- err = -EINVAL;
- iput(inode);
- goto err_iget;
- }
-
- sb->s_root = d_make_root(inode);
- if (!sb->s_root) {
- err = -ENOMEM;
- goto err_iget;
- }
-
- /* save the device name to sbi */
- sbi->dev_name = __getname();
- if (!sbi->dev_name) {
- err = -ENOMEM;
- goto err_devname;
- }
-
- snprintf(sbi->dev_name, PATH_MAX, "%s", dev_name);
- sbi->dev_name[PATH_MAX - 1] = '\0';
-
- erofs_register_super(sb);
-
- if (!silent)
- infoln("mounted on %s with opts: %s.", dev_name,
- (char *)data);
- return 0;
- /*
- * please add a label for each exit point and use
- * the following name convention, thus new features
- * can be integrated easily without renaming labels.
- */
-err_devname:
- dput(sb->s_root);
- sb->s_root = NULL;
-err_iget:
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- iput(sbi->managed_cache);
-err_init_managed_cache:
-#endif
-err_parseopt:
-err_sbread:
- sb->s_fs_info = NULL;
- kfree(sbi);
-err:
- return err;
-}
-
-/*
- * could be triggered after deactivate_locked_super()
- * is called, thus including umount and failed to initialize.
- */
-static void erofs_put_super(struct super_block *sb)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
-
- /* for cases which are failed in "read_super" */
- if (!sbi)
- return;
-
- WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
-
- infoln("unmounted for %s", sbi->dev_name);
- __putname(sbi->dev_name);
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- iput(sbi->managed_cache);
-#endif
-
- mutex_lock(&sbi->umount_mutex);
-
-#ifdef CONFIG_EROFS_FS_ZIP
- /* clean up the compression space of this sb */
- erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
-#endif
-
- erofs_unregister_super(sb);
- mutex_unlock(&sbi->umount_mutex);
-
- kfree(sbi);
- sb->s_fs_info = NULL;
-}
-
-
-struct erofs_mount_private {
- const char *dev_name;
- char *options;
-};
-
-/* support mount_bdev() with options */
-static int erofs_fill_super(struct super_block *sb,
- void *_priv, int silent)
-{
- struct erofs_mount_private *priv = _priv;
-
- return erofs_read_super(sb, priv->dev_name,
- priv->options, silent);
-}
-
-static struct dentry *erofs_mount(
- struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct erofs_mount_private priv = {
- .dev_name = dev_name,
- .options = data
- };
-
- return mount_bdev(fs_type, flags, dev_name,
- &priv, erofs_fill_super);
-}
-
-static void erofs_kill_sb(struct super_block *sb)
-{
- kill_block_super(sb);
-}
-
-static struct file_system_type erofs_fs_type = {
- .owner = THIS_MODULE,
- .name = "erofs",
- .mount = erofs_mount,
- .kill_sb = erofs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("erofs");
-
-static int __init erofs_module_init(void)
-{
- int err;
-
- erofs_check_ondisk_layout_definitions();
- infoln("initializing erofs " EROFS_VERSION);
-
- err = erofs_init_inode_cache();
- if (err)
- goto icache_err;
-
- err = register_shrinker(&erofs_shrinker_info);
- if (err)
- goto shrinker_err;
-
- err = z_erofs_init_zip_subsystem();
- if (err)
- goto zip_err;
-
- err = register_filesystem(&erofs_fs_type);
- if (err)
- goto fs_err;
-
- infoln("successfully to initialize erofs");
- return 0;
-
-fs_err:
- z_erofs_exit_zip_subsystem();
-zip_err:
- unregister_shrinker(&erofs_shrinker_info);
-shrinker_err:
- erofs_exit_inode_cache();
-icache_err:
- return err;
-}
-
-static void __exit erofs_module_exit(void)
-{
- unregister_filesystem(&erofs_fs_type);
- z_erofs_exit_zip_subsystem();
- unregister_shrinker(&erofs_shrinker_info);
- erofs_exit_inode_cache();
- infoln("successfully finalize erofs");
-}
-
-/* get filesystem statistics */
-static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
-
- buf->f_type = sb->s_magic;
- buf->f_bsize = EROFS_BLKSIZ;
- buf->f_blocks = sbi->blocks;
- buf->f_bfree = buf->f_bavail = 0;
-
- buf->f_files = ULLONG_MAX;
- buf->f_ffree = ULLONG_MAX - sbi->inos;
-
- buf->f_namelen = EROFS_NAME_LEN;
-
- buf->f_fsid.val[0] = (u32)id;
- buf->f_fsid.val[1] = (u32)(id >> 32);
- return 0;
-}
-
-static int erofs_show_options(struct seq_file *seq, struct dentry *root)
-{
- struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
-
-#ifdef CONFIG_EROFS_FS_XATTR
- if (test_opt(sbi, XATTR_USER))
- seq_puts(seq, ",user_xattr");
- else
- seq_puts(seq, ",nouser_xattr");
-#endif
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- if (test_opt(sbi, POSIX_ACL))
- seq_puts(seq, ",acl");
- else
- seq_puts(seq, ",noacl");
-#endif
- if (test_opt(sbi, FAULT_INJECTION))
- seq_printf(seq, ",fault_injection=%u",
- erofs_get_fault_rate(sbi));
- return 0;
-}
-
-static int erofs_remount(struct super_block *sb, int *flags, char *data)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- unsigned int org_mnt_opt = sbi->mount_opt;
- unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
- int err;
-
- DBG_BUGON(!sb_rdonly(sb));
- err = parse_options(sb, data);
- if (err)
- goto out;
-
- if (test_opt(sbi, POSIX_ACL))
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
-
- *flags |= SB_RDONLY;
- return 0;
-out:
- __erofs_build_fault_attr(sbi, org_inject_rate);
- sbi->mount_opt = org_mnt_opt;
-
- return err;
-}
-
-const struct super_operations erofs_sops = {
- .put_super = erofs_put_super,
- .alloc_inode = alloc_inode,
- .free_inode = free_inode,
- .statfs = erofs_statfs,
- .show_options = erofs_show_options,
- .remount_fs = erofs_remount,
-};
-
-module_init(erofs_module_init);
-module_exit(erofs_module_exit);
-
-MODULE_DESCRIPTION("Enhanced ROM File System");
-MODULE_AUTHOR("Gao Xiang, Yu Chao, Miao Xie, CONSUMER BG, HUAWEI Inc.");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
deleted file mode 100644
index 7af0ba8d8495..000000000000
--- a/drivers/staging/erofs/unzip_pagevec.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * linux/drivers/staging/erofs/unzip_pagevec.h
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#ifndef __EROFS_UNZIP_PAGEVEC_H
-#define __EROFS_UNZIP_PAGEVEC_H
-
-#include <linux/tagptr.h>
-
-/* page type in pagevec for unzip subsystem */
-enum z_erofs_page_type {
- /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
- Z_EROFS_PAGE_TYPE_EXCLUSIVE,
-
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
-
- Z_EROFS_VLE_PAGE_TYPE_HEAD,
- Z_EROFS_VLE_PAGE_TYPE_MAX
-};
-
-extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
- __bad_page_type_exclusive(void);
-
-/* pagevec tagged pointer */
-typedef tagptr2_t erofs_vtptr_t;
-
-/* pagevec collector */
-struct z_erofs_pagevec_ctor {
- struct page *curr, *next;
- erofs_vtptr_t *pages;
-
- unsigned int nr, index;
-};
-
-static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- if (!ctor->curr)
- return;
-
- if (atomic)
- kunmap_atomic(ctor->pages);
- else
- kunmap(ctor->curr);
-}
-
-static inline struct page *
-z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
- unsigned nr)
-{
- unsigned index;
-
- /* keep away from occupied pages */
- if (ctor->next)
- return ctor->next;
-
- for (index = 0; index < nr; ++index) {
- const erofs_vtptr_t t = ctor->pages[index];
- const unsigned tags = tagptr_unfold_tags(t);
-
- if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
- return tagptr_unfold_ptr(t);
- }
- DBG_BUGON(nr >= ctor->nr);
- return NULL;
-}
-
-static inline void
-z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
- bool atomic)
-{
- struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
-
- z_erofs_pagevec_ctor_exit(ctor, atomic);
-
- ctor->curr = next;
- ctor->next = NULL;
- ctor->pages = atomic ?
- kmap_atomic(ctor->curr) : kmap(ctor->curr);
-
- ctor->nr = PAGE_SIZE / sizeof(struct page *);
- ctor->index = 0;
-}
-
-static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
- unsigned nr,
- erofs_vtptr_t *pages, unsigned i)
-{
- ctor->nr = nr;
- ctor->curr = ctor->next = NULL;
- ctor->pages = pages;
-
- if (i >= nr) {
- i -= nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- while (i > ctor->nr) {
- i -= ctor->nr;
- z_erofs_pagevec_ctor_pagedown(ctor, false);
- }
- }
-
- ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
- ctor->index = i;
-}
-
-static inline bool
-z_erofs_pagevec_ctor_enqueue(struct z_erofs_pagevec_ctor *ctor,
- struct page *page,
- enum z_erofs_page_type type,
- bool *occupied)
-{
- *occupied = false;
- if (unlikely(!ctor->next && type))
- if (ctor->index + 1 == ctor->nr)
- return false;
-
- if (unlikely(ctor->index >= ctor->nr))
- z_erofs_pagevec_ctor_pagedown(ctor, false);
-
- /* exclusive page type must be 0 */
- if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
- __bad_page_type_exclusive();
-
- /* should remind that collector->next never equal to 1, 2 */
- if (type == (uintptr_t)ctor->next) {
- ctor->next = page;
- *occupied = true;
- }
-
- ctor->pages[ctor->index++] =
- tagptr_fold(erofs_vtptr_t, page, type);
- return true;
-}
-
-static inline struct page *
-z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
- enum z_erofs_page_type *type)
-{
- erofs_vtptr_t t;
-
- if (unlikely(ctor->index >= ctor->nr)) {
- DBG_BUGON(!ctor->next);
- z_erofs_pagevec_ctor_pagedown(ctor, true);
- }
-
- t = ctor->pages[ctor->index];
-
- *type = tagptr_unfold_tags(t);
-
- /* should remind that collector->next never equal to 1, 2 */
- if (*type == (uintptr_t)ctor->next)
- ctor->next = tagptr_unfold_ptr(t);
-
- ctor->pages[ctor->index++] =
- tagptr_fold(erofs_vtptr_t, NULL, 0);
-
- return tagptr_unfold_ptr(t);
-}
-
-#endif
-
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
deleted file mode 100644
index f0dab81ff816..000000000000
--- a/drivers/staging/erofs/unzip_vle.c
+++ /dev/null
@@ -1,1591 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/unzip_vle.c
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include "unzip_vle.h"
-#include "compress.h"
-#include <linux/prefetch.h>
-
-#include <trace/events/erofs.h>
-
-/*
- * a compressed_pages[] placeholder in order to avoid
- * being filled with file pages for in-place decompression.
- */
-#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
-
-/* how to allocate cached pages for a workgroup */
-enum z_erofs_cache_alloctype {
- DONTALLOC, /* don't allocate any cached pages */
- DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
-};
-
-/*
- * tagged pointer with 1-bit tag for all compressed pages
- * tag 0 - the page is just found with an extra page reference
- */
-typedef tagptr1_t compressed_page_t;
-
-#define tag_compressed_page_justfound(page) \
- tagptr_fold(compressed_page_t, page, 1)
-
-static struct workqueue_struct *z_erofs_workqueue __read_mostly;
-static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
-
-void z_erofs_exit_zip_subsystem(void)
-{
- destroy_workqueue(z_erofs_workqueue);
- kmem_cache_destroy(z_erofs_workgroup_cachep);
-}
-
-static inline int init_unzip_workqueue(void)
-{
- const unsigned int onlinecpus = num_possible_cpus();
-
- /*
- * we don't need too many threads, limiting threads
- * could improve scheduling performance.
- */
- z_erofs_workqueue =
- alloc_workqueue("erofs_unzipd",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
- onlinecpus + onlinecpus / 4);
-
- return z_erofs_workqueue ? 0 : -ENOMEM;
-}
-
-static void init_once(void *ptr)
-{
- struct z_erofs_vle_workgroup *grp = ptr;
- struct z_erofs_vle_work *const work =
- z_erofs_vle_grab_primary_work(grp);
- unsigned int i;
-
- mutex_init(&work->lock);
- work->nr_pages = 0;
- work->vcnt = 0;
- for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
- grp->compressed_pages[i] = NULL;
-}
-
-static void init_always(struct z_erofs_vle_workgroup *grp)
-{
- struct z_erofs_vle_work *const work =
- z_erofs_vle_grab_primary_work(grp);
-
- atomic_set(&grp->obj.refcount, 1);
- grp->flags = 0;
-
- DBG_BUGON(work->nr_pages);
- DBG_BUGON(work->vcnt);
-}
-
-int __init z_erofs_init_zip_subsystem(void)
-{
- z_erofs_workgroup_cachep =
- kmem_cache_create("erofs_compress",
- Z_EROFS_WORKGROUP_SIZE, 0,
- SLAB_RECLAIM_ACCOUNT, init_once);
-
- if (z_erofs_workgroup_cachep) {
- if (!init_unzip_workqueue())
- return 0;
-
- kmem_cache_destroy(z_erofs_workgroup_cachep);
- }
- return -ENOMEM;
-}
-
-enum z_erofs_vle_work_role {
- Z_EROFS_VLE_WORK_SECONDARY,
- Z_EROFS_VLE_WORK_PRIMARY,
- /*
- * The current work was the tail of an exist chain, and the previous
- * processed chained works are all decided to be hooked up to it.
- * A new chain should be created for the remaining unprocessed works,
- * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
- * the next work cannot reuse the whole page in the following scenario:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (belongs to the next work) | (belongs to the current work) |
- * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
- */
- Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
- /*
- * The current work has been linked with the processed chained works,
- * and could be also linked with the potential remaining works, which
- * means if the processing page is the tail partial page of the work,
- * the current work can safely use the whole page (since the next work
- * is under control) for in-place decompression, as illustrated below:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current work) | (of the previous work) |
- * | PRIMARY_FOLLOWED or | |
- * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
- *
- * [ (*) the above page can be used for the current work itself. ]
- */
- Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
- Z_EROFS_VLE_WORK_MAX
-};
-
-struct z_erofs_vle_work_builder {
- enum z_erofs_vle_work_role role;
- /*
- * 'hosted = false' means that the current workgroup doesn't belong to
- * the owned chained workgroups. In the other words, it is none of our
- * business to submit this workgroup.
- */
- bool hosted;
-
- struct z_erofs_vle_workgroup *grp;
- struct z_erofs_vle_work *work;
- struct z_erofs_pagevec_ctor vector;
-
- /* pages used for reading the compressed data */
- struct page **compressed_pages;
- unsigned int compressed_deficit;
-};
-
-#define VLE_WORK_BUILDER_INIT() \
- { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
- struct address_space *mc,
- pgoff_t index,
- unsigned int clusterpages,
- enum z_erofs_cache_alloctype type,
- struct list_head *pagepool,
- gfp_t gfp)
-{
- struct page **const pages = bl->compressed_pages;
- const unsigned int remaining = bl->compressed_deficit;
- bool standalone = true;
- unsigned int i, j = 0;
-
- if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
- return;
-
- gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
-
- index += clusterpages - remaining;
-
- for (i = 0; i < remaining; ++i) {
- struct page *page;
- compressed_page_t t;
-
- /* the compressed page was loaded before */
- if (READ_ONCE(pages[i]))
- continue;
-
- page = find_get_page(mc, index + i);
-
- if (page) {
- t = tag_compressed_page_justfound(page);
- } else if (type == DELAYEDALLOC) {
- t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
- } else { /* DONTALLOC */
- if (standalone)
- j = i;
- standalone = false;
- continue;
- }
-
- if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
- continue;
-
- if (page)
- put_page(page);
- }
- bl->compressed_pages += j;
- bl->compressed_deficit = remaining - j;
-
- if (standalone)
- bl->role = Z_EROFS_VLE_WORK_PRIMARY;
-}
-
-/* called by erofs_shrinker to get rid of all compressed_pages */
-int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
- struct erofs_workgroup *egrp)
-{
- struct z_erofs_vle_workgroup *const grp =
- container_of(egrp, struct z_erofs_vle_workgroup, obj);
- struct address_space *const mapping = MNGD_MAPPING(sbi);
- const int clusterpages = erofs_clusterpages(sbi);
- int i;
-
- /*
- * refcount of workgroup is now freezed as 1,
- * therefore no need to worry about available decompression users.
- */
- for (i = 0; i < clusterpages; ++i) {
- struct page *page = grp->compressed_pages[i];
-
- if (!page || page->mapping != mapping)
- continue;
-
- /* block other users from reclaiming or migrating the page */
- if (!trylock_page(page))
- return -EBUSY;
-
- /* barrier is implied in the following 'unlock_page' */
- WRITE_ONCE(grp->compressed_pages[i], NULL);
-
- set_page_private(page, 0);
- ClearPagePrivate(page);
-
- unlock_page(page);
- put_page(page);
- }
- return 0;
-}
-
-int erofs_try_to_free_cached_page(struct address_space *mapping,
- struct page *page)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
- const unsigned int clusterpages = erofs_clusterpages(sbi);
- struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
- int ret = 0; /* 0 - busy */
-
- if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
- unsigned int i;
-
- for (i = 0; i < clusterpages; ++i) {
- if (grp->compressed_pages[i] == page) {
- WRITE_ONCE(grp->compressed_pages[i], NULL);
- ret = 1;
- break;
- }
- }
- erofs_workgroup_unfreeze(&grp->obj, 1);
-
- if (ret) {
- ClearPagePrivate(page);
- put_page(page);
- }
- }
- return ret;
-}
-#else
-static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
- struct address_space *mc,
- pgoff_t index,
- unsigned int clusterpages,
- enum z_erofs_cache_alloctype type,
- struct list_head *pagepool,
- gfp_t gfp)
-{
- /* nowhere to load compressed pages from */
-}
-#endif
-
-/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static inline bool try_to_reuse_as_compressed_page(
- struct z_erofs_vle_work_builder *b,
- struct page *page)
-{
- while (b->compressed_deficit) {
- --b->compressed_deficit;
- if (!cmpxchg(b->compressed_pages++, NULL, page))
- return true;
- }
-
- return false;
-}
-
-/* callers must be with work->lock held */
-static int z_erofs_vle_work_add_page(
- struct z_erofs_vle_work_builder *builder,
- struct page *page,
- enum z_erofs_page_type type)
-{
- int ret;
- bool occupied;
-
- /* give priority for the compressed data storage */
- if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
- type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- try_to_reuse_as_compressed_page(builder, page))
- return 0;
-
- ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
- page, type, &occupied);
- builder->work->vcnt += (unsigned int)ret;
-
- return ret ? 0 : -EAGAIN;
-}
-
-static enum z_erofs_vle_work_role
-try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
-{
- DBG_BUGON(*hosted);
-
- /* let's claim these following types of workgroup */
-retry:
- if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
- /* type 1, nil workgroup */
- if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
- *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
- goto retry;
-
- *owned_head = &grp->next;
- *hosted = true;
- /* lucky, I am the followee :) */
- return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
-
- } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
- /*
- * type 2, link to the end of a existing open chain,
- * be careful that its submission itself is governed
- * by the original owned chain.
- */
- if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
- *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
- goto retry;
- *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
- return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
- }
-
- return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
-}
-
-struct z_erofs_vle_work_finder {
- struct super_block *sb;
- pgoff_t idx;
- unsigned int pageofs;
-
- struct z_erofs_vle_workgroup **grp_ret;
- enum z_erofs_vle_work_role *role;
- z_erofs_vle_owned_workgrp_t *owned_head;
- bool *hosted;
-};
-
-static struct z_erofs_vle_work *
-z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
-{
- bool tag, primary;
- struct erofs_workgroup *egrp;
- struct z_erofs_vle_workgroup *grp;
- struct z_erofs_vle_work *work;
-
- egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
- if (!egrp) {
- *f->grp_ret = NULL;
- return NULL;
- }
-
- grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
- *f->grp_ret = grp;
-
- work = z_erofs_vle_grab_work(grp, f->pageofs);
- /* if multiref is disabled, `primary' is always true */
- primary = true;
-
- DBG_BUGON(work->pageofs != f->pageofs);
-
- /*
- * lock must be taken first to avoid grp->next == NIL between
- * claiming workgroup and adding pages:
- * grp->next != NIL
- * grp->next = NIL
- * mutex_unlock_all
- * mutex_lock(&work->lock)
- * add all pages to pagevec
- *
- * [correct locking case 1]:
- * mutex_lock(grp->work[a])
- * ...
- * mutex_lock(grp->work[b]) mutex_lock(grp->work[c])
- * ... *role = SECONDARY
- * add all pages to pagevec
- * ...
- * mutex_unlock(grp->work[c])
- * mutex_lock(grp->work[c])
- * ...
- * grp->next = NIL
- * mutex_unlock_all
- *
- * [correct locking case 2]:
- * mutex_lock(grp->work[b])
- * ...
- * mutex_lock(grp->work[a])
- * ...
- * mutex_lock(grp->work[c])
- * ...
- * grp->next = NIL
- * mutex_unlock_all
- * mutex_lock(grp->work[a])
- * *role = PRIMARY_OWNER
- * add all pages to pagevec
- * ...
- */
- mutex_lock(&work->lock);
-
- *f->hosted = false;
- if (!primary)
- *f->role = Z_EROFS_VLE_WORK_SECONDARY;
- else /* claim the workgroup if possible */
- *f->role = try_to_claim_workgroup(grp, f->owned_head,
- f->hosted);
- return work;
-}
-
-static struct z_erofs_vle_work *
-z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
- struct erofs_map_blocks *map)
-{
- bool gnew = false;
- struct z_erofs_vle_workgroup *grp = *f->grp_ret;
- struct z_erofs_vle_work *work;
-
- /* if multiref is disabled, grp should never be nullptr */
- if (unlikely(grp)) {
- DBG_BUGON(1);
- return ERR_PTR(-EINVAL);
- }
-
- /* no available workgroup, let's allocate one */
- grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
- if (unlikely(!grp))
- return ERR_PTR(-ENOMEM);
-
- init_always(grp);
- grp->obj.index = f->idx;
- grp->llen = map->m_llen;
-
- z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
- Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
- Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
-
- if (map->m_flags & EROFS_MAP_FULL_MAPPED)
- grp->flags |= Z_EROFS_VLE_WORKGRP_FULL_LENGTH;
-
- /* new workgrps have been claimed as type 1 */
- WRITE_ONCE(grp->next, *f->owned_head);
- /* primary and followed work for all new workgrps */
- *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
- /* it should be submitted by ourselves */
- *f->hosted = true;
-
- gnew = true;
- work = z_erofs_vle_grab_primary_work(grp);
- work->pageofs = f->pageofs;
-
- /*
- * lock all primary followed works before visible to others
- * and mutex_trylock *never* fails for a new workgroup.
- */
- mutex_trylock(&work->lock);
-
- if (gnew) {
- int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
-
- if (err) {
- mutex_unlock(&work->lock);
- kmem_cache_free(z_erofs_workgroup_cachep, grp);
- return ERR_PTR(-EAGAIN);
- }
- }
-
- *f->owned_head = &grp->next;
- *f->grp_ret = grp;
- return work;
-}
-
-#define builder_is_hooked(builder) \
- ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
-
-#define builder_is_followed(builder) \
- ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
-
-static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
- struct super_block *sb,
- struct erofs_map_blocks *map,
- z_erofs_vle_owned_workgrp_t *owned_head)
-{
- const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
- struct z_erofs_vle_workgroup *grp;
- const struct z_erofs_vle_work_finder finder = {
- .sb = sb,
- .idx = erofs_blknr(map->m_pa),
- .pageofs = map->m_la & ~PAGE_MASK,
- .grp_ret = &grp,
- .role = &builder->role,
- .owned_head = owned_head,
- .hosted = &builder->hosted
- };
- struct z_erofs_vle_work *work;
-
- DBG_BUGON(builder->work);
-
- /* must be Z_EROFS_WORK_TAIL or the next chained work */
- DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
- DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
-
- DBG_BUGON(erofs_blkoff(map->m_pa));
-
-repeat:
- work = z_erofs_vle_work_lookup(&finder);
- if (work) {
- unsigned int orig_llen;
-
- /* increase workgroup `llen' if needed */
- while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
- orig_llen != cmpxchg_relaxed(&grp->llen,
- orig_llen, map->m_llen))
- cpu_relax();
- goto got_it;
- }
-
- work = z_erofs_vle_work_register(&finder, map);
- if (unlikely(work == ERR_PTR(-EAGAIN)))
- goto repeat;
-
- if (IS_ERR(work))
- return PTR_ERR(work);
-got_it:
- z_erofs_pagevec_ctor_init(&builder->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- work->pagevec, work->vcnt);
-
- if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
- /* enable possibly in-place decompression */
- builder->compressed_pages = grp->compressed_pages;
- builder->compressed_deficit = clusterpages;
- } else {
- builder->compressed_pages = NULL;
- builder->compressed_deficit = 0;
- }
-
- builder->grp = grp;
- builder->work = work;
- return 0;
-}
-
-/*
- * keep in mind that no referenced workgroups will be freed
- * only after a RCU grace period, so rcu_read_lock() could
- * prevent a workgroup from being freed.
- */
-static void z_erofs_rcu_callback(struct rcu_head *head)
-{
- struct z_erofs_vle_work *work = container_of(head,
- struct z_erofs_vle_work, rcu);
- struct z_erofs_vle_workgroup *grp =
- z_erofs_vle_work_workgroup(work, true);
-
- kmem_cache_free(z_erofs_workgroup_cachep, grp);
-}
-
-void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
-{
- struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
- struct z_erofs_vle_workgroup, obj);
- struct z_erofs_vle_work *const work = &vgrp->work;
-
- call_rcu(&work->rcu, z_erofs_rcu_callback);
-}
-
-static void
-__z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
- struct z_erofs_vle_work *work __maybe_unused)
-{
- erofs_workgroup_put(&grp->obj);
-}
-
-static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
-{
- struct z_erofs_vle_workgroup *grp =
- z_erofs_vle_work_workgroup(work, true);
-
- __z_erofs_vle_work_release(grp, work);
-}
-
-static inline bool
-z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
-{
- struct z_erofs_vle_work *work = builder->work;
-
- if (!work)
- return false;
-
- z_erofs_pagevec_ctor_exit(&builder->vector, false);
- mutex_unlock(&work->lock);
-
- /*
- * if all pending pages are added, don't hold work reference
- * any longer if the current work isn't hosted by ourselves.
- */
- if (!builder->hosted)
- __z_erofs_vle_work_release(builder->grp, work);
-
- builder->work = NULL;
- builder->grp = NULL;
- return true;
-}
-
-static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
- gfp_t gfp)
-{
- struct page *page = erofs_allocpage(pagepool, gfp);
-
- if (unlikely(!page))
- return NULL;
-
- page->mapping = Z_EROFS_MAPPING_STAGING;
- return page;
-}
-
-struct z_erofs_vle_frontend {
- struct inode *const inode;
-
- struct z_erofs_vle_work_builder builder;
- struct erofs_map_blocks map;
-
- z_erofs_vle_owned_workgrp_t owned_head;
-
- /* used for applying cache strategy on the fly */
- bool backmost;
- erofs_off_t headoffset;
-};
-
-#define VLE_FRONTEND_INIT(__i) { \
- .inode = __i, \
- .map = { \
- .m_llen = 0, \
- .m_plen = 0, \
- .mpage = NULL \
- }, \
- .builder = VLE_WORK_BUILDER_INIT(), \
- .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
- .backmost = true, }
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-static inline bool
-should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
-{
- if (fe->backmost)
- return true;
-
- if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- return la < fe->headoffset;
-
- return false;
-}
-#else
-static inline bool
-should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
-{
- return false;
-}
-#endif
-
-static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
- struct page *page,
- struct list_head *page_pool)
-{
- struct super_block *const sb = fe->inode->i_sb;
- struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
- struct erofs_map_blocks *const map = &fe->map;
- struct z_erofs_vle_work_builder *const builder = &fe->builder;
- const loff_t offset = page_offset(page);
-
- bool tight = builder_is_hooked(builder);
- struct z_erofs_vle_work *work = builder->work;
-
- enum z_erofs_cache_alloctype cache_strategy;
- enum z_erofs_page_type page_type;
- unsigned int cur, end, spiltted, index;
- int err = 0;
-
- /* register locked file pages as online pages in pack */
- z_erofs_onlinepage_init(page);
-
- spiltted = 0;
- end = PAGE_SIZE;
-repeat:
- cur = end - 1;
-
- /* lucky, within the range of the current map_blocks */
- if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen) {
- /* didn't get a valid unzip work previously (very rare) */
- if (!builder->work)
- goto restart_now;
- goto hitted;
- }
-
- /* go ahead the next map_blocks */
- debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
-
- if (z_erofs_vle_work_iter_end(builder))
- fe->backmost = false;
-
- map->m_la = offset + cur;
- map->m_llen = 0;
- err = z_erofs_map_blocks_iter(fe->inode, map, 0);
- if (unlikely(err))
- goto err_out;
-
-restart_now:
- if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
- goto hitted;
-
- DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
- DBG_BUGON(erofs_blkoff(map->m_pa));
-
- err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
- if (unlikely(err))
- goto err_out;
-
- /* preload all compressed pages (maybe downgrade role if necessary) */
- if (should_alloc_managed_pages(fe, map->m_la))
- cache_strategy = DELAYEDALLOC;
- else
- cache_strategy = DONTALLOC;
-
- preload_compressed_pages(builder, MNGD_MAPPING(sbi),
- map->m_pa / PAGE_SIZE,
- map->m_plen / PAGE_SIZE,
- cache_strategy, page_pool, GFP_KERNEL);
-
- tight &= builder_is_hooked(builder);
- work = builder->work;
-hitted:
- cur = end - min_t(unsigned int, offset + end - map->m_la, end);
- if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
- zero_user_segment(page, cur, end);
- goto next_part;
- }
-
- /* let's derive page type */
- page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
- (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
- Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
-
- if (cur)
- tight &= builder_is_followed(builder);
-
-retry:
- err = z_erofs_vle_work_add_page(builder, page, page_type);
- /* should allocate an additional staging page for pagevec */
- if (err == -EAGAIN) {
- struct page *const newpage =
- __stagingpage_alloc(page_pool, GFP_NOFS);
-
- err = z_erofs_vle_work_add_page(builder, newpage,
- Z_EROFS_PAGE_TYPE_EXCLUSIVE);
- if (likely(!err))
- goto retry;
- }
-
- if (unlikely(err))
- goto err_out;
-
- index = page->index - map->m_la / PAGE_SIZE;
-
- /* FIXME! avoid the last relundant fixup & endio */
- z_erofs_onlinepage_fixup(page, index, true);
-
- /* bump up the number of spiltted parts of a page */
- ++spiltted;
- /* also update nr_pages */
- work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
-next_part:
- /* can be used for verification */
- map->m_llen = offset + cur - map->m_la;
-
- end = cur;
- if (end > 0)
- goto repeat;
-
-out:
- /* FIXME! avoid the last relundant fixup & endio */
- z_erofs_onlinepage_endio(page);
-
- debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
- __func__, page, spiltted, map->m_llen);
- return err;
-
- /* if some error occurred while processing this page */
-err_out:
- SetPageError(page);
- goto out;
-}
-
-static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, ptr);
- struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
- bool background = tagptr_unfold_tags(t);
-
- if (!background) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
-
- if (!atomic_add_return(bios, &io->pending_bios))
- queue_work(z_erofs_workqueue, &io->u.work);
-}
-
-static inline void z_erofs_vle_read_endio(struct bio *bio)
-{
- struct erofs_sb_info *sbi = NULL;
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
- bool cachemngd = false;
-
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(!page->mapping);
-
- if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
- sbi = EROFS_SB(page->mapping->host->i_sb);
-
- if (time_to_inject(sbi, FAULT_READ_IO)) {
- erofs_show_injection_info(FAULT_READ_IO);
- err = BLK_STS_IOERR;
- }
- }
-
- /* sbi should already be gotten if the page is managed */
- if (sbi)
- cachemngd = erofs_page_is_managed(sbi, page);
-
- if (unlikely(err))
- SetPageError(page);
- else if (cachemngd)
- SetPageUptodate(page);
-
- if (cachemngd)
- unlock_page(page);
- }
-
- z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
- bio_put(bio);
-}
-
-static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
-static DEFINE_MUTEX(z_pagemap_global_lock);
-
-static int z_erofs_vle_unzip(struct super_block *sb,
- struct z_erofs_vle_workgroup *grp,
- struct list_head *page_pool)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- const unsigned int clusterpages = erofs_clusterpages(sbi);
-
- struct z_erofs_pagevec_ctor ctor;
- unsigned int nr_pages;
- unsigned int sparsemem_pages = 0;
- struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
- struct page **pages, **compressed_pages, *page;
- unsigned int algorithm;
- unsigned int i, outputsize;
-
- enum z_erofs_page_type page_type;
- bool overlapped, partial;
- struct z_erofs_vle_work *work;
- int err;
-
- might_sleep();
- work = z_erofs_vle_grab_primary_work(grp);
- DBG_BUGON(!READ_ONCE(work->nr_pages));
-
- mutex_lock(&work->lock);
- nr_pages = work->nr_pages;
-
- if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
- pages = pages_onstack;
- else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
- mutex_trylock(&z_pagemap_global_lock))
- pages = z_pagemap_global;
- else {
-repeat:
- pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
-
- /* fallback to global pagemap for the lowmem scenario */
- if (unlikely(!pages)) {
- if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
- goto repeat;
- else {
- mutex_lock(&z_pagemap_global_lock);
- pages = z_pagemap_global;
- }
- }
- }
-
- for (i = 0; i < nr_pages; ++i)
- pages[i] = NULL;
-
- z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
- work->pagevec, 0);
-
- for (i = 0; i < work->vcnt; ++i) {
- unsigned int pagenr;
-
- page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
-
- /* all pages in pagevec ought to be valid */
- DBG_BUGON(!page);
- DBG_BUGON(!page->mapping);
-
- if (z_erofs_put_stagingpage(page_pool, page))
- continue;
-
- if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
- pagenr = 0;
- else
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
-
- pages[pagenr] = page;
- }
- sparsemem_pages = i;
-
- z_erofs_pagevec_ctor_exit(&ctor, true);
-
- overlapped = false;
- compressed_pages = grp->compressed_pages;
-
- err = 0;
- for (i = 0; i < clusterpages; ++i) {
- unsigned int pagenr;
-
- page = compressed_pages[i];
-
- /* all compressed pages ought to be valid */
- DBG_BUGON(!page);
- DBG_BUGON(!page->mapping);
-
- if (!z_erofs_page_is_staging(page)) {
- if (erofs_page_is_managed(sbi, page)) {
- if (unlikely(!PageUptodate(page)))
- err = -EIO;
- continue;
- }
-
- /*
- * only if non-head page can be selected
- * for inplace decompression
- */
- pagenr = z_erofs_onlinepage_index(page);
-
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
- ++sparsemem_pages;
- pages[pagenr] = page;
-
- overlapped = true;
- }
-
- /* PG_error needs checking for inplaced and staging pages */
- if (unlikely(PageError(page))) {
- DBG_BUGON(PageUptodate(page));
- err = -EIO;
- }
- }
-
- if (unlikely(err))
- goto out;
-
- if (nr_pages << PAGE_SHIFT >= work->pageofs + grp->llen) {
- outputsize = grp->llen;
- partial = !(grp->flags & Z_EROFS_VLE_WORKGRP_FULL_LENGTH);
- } else {
- outputsize = (nr_pages << PAGE_SHIFT) - work->pageofs;
- partial = true;
- }
-
- if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN)
- algorithm = Z_EROFS_COMPRESSION_SHIFTED;
- else
- algorithm = Z_EROFS_COMPRESSION_LZ4;
-
- err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
- .sb = sb,
- .in = compressed_pages,
- .out = pages,
- .pageofs_out = work->pageofs,
- .inputsize = PAGE_SIZE,
- .outputsize = outputsize,
- .alg = algorithm,
- .inplace_io = overlapped,
- .partial_decoding = partial
- }, page_pool);
-
-out:
- /* must handle all compressed pages before endding pages */
- for (i = 0; i < clusterpages; ++i) {
- page = compressed_pages[i];
-
- if (erofs_page_is_managed(sbi, page))
- continue;
-
- /* recycle all individual staging pages */
- (void)z_erofs_put_stagingpage(page_pool, page);
-
- WRITE_ONCE(compressed_pages[i], NULL);
- }
-
- for (i = 0; i < nr_pages; ++i) {
- page = pages[i];
- if (!page)
- continue;
-
- DBG_BUGON(!page->mapping);
-
- /* recycle all individual staging pages */
- if (z_erofs_put_stagingpage(page_pool, page))
- continue;
-
- if (unlikely(err < 0))
- SetPageError(page);
-
- z_erofs_onlinepage_endio(page);
- }
-
- if (pages == z_pagemap_global)
- mutex_unlock(&z_pagemap_global_lock);
- else if (unlikely(pages != pages_onstack))
- kvfree(pages);
-
- work->nr_pages = 0;
- work->vcnt = 0;
-
- /* all work locks MUST be taken before the following line */
-
- WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
-
- /* all work locks SHOULD be released right now */
- mutex_unlock(&work->lock);
-
- z_erofs_vle_work_release(work);
- return err;
-}
-
-static void z_erofs_vle_unzip_all(struct super_block *sb,
- struct z_erofs_vle_unzip_io *io,
- struct list_head *page_pool)
-{
- z_erofs_vle_owned_workgrp_t owned = io->head;
-
- while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
- struct z_erofs_vle_workgroup *grp;
-
- /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
- DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
-
- /* no possible that 'owned' equals NULL */
- DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
-
- grp = container_of(owned, struct z_erofs_vle_workgroup, next);
- owned = READ_ONCE(grp->next);
-
- z_erofs_vle_unzip(sb, grp, page_pool);
- }
-}
-
-static void z_erofs_vle_unzip_wq(struct work_struct *work)
-{
- struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
- struct z_erofs_vle_unzip_io_sb, io.u.work);
- LIST_HEAD(page_pool);
-
- DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
- z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
-
- put_pages_list(&page_pool);
- kvfree(iosb);
-}
-
-static struct page *
-pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
- unsigned int nr,
- struct list_head *pagepool,
- struct address_space *mc,
- gfp_t gfp)
-{
- /* determined at compile time to avoid too many #ifdefs */
- const bool nocache = __builtin_constant_p(mc) ? !mc : false;
- const pgoff_t index = grp->obj.index;
- bool tocache = false;
-
- struct address_space *mapping;
- struct page *oldpage, *page;
-
- compressed_page_t t;
- int justfound;
-
-repeat:
- page = READ_ONCE(grp->compressed_pages[nr]);
- oldpage = page;
-
- if (!page)
- goto out_allocpage;
-
- /*
- * the cached page has not been allocated and
- * an placeholder is out there, prepare it now.
- */
- if (!nocache && page == PAGE_UNALLOCATED) {
- tocache = true;
- goto out_allocpage;
- }
-
- /* process the target tagged pointer */
- t = tagptr_init(compressed_page_t, page);
- justfound = tagptr_unfold_tags(t);
- page = tagptr_unfold_ptr(t);
-
- mapping = READ_ONCE(page->mapping);
-
- /*
- * if managed cache is disabled, it's no way to
- * get such a cached-like page.
- */
- if (nocache) {
- /* if managed cache is disabled, it is impossible `justfound' */
- DBG_BUGON(justfound);
-
- /* and it should be locked, not uptodate, and not truncated */
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(!mapping);
- goto out;
- }
-
- /*
- * unmanaged (file) pages are all locked solidly,
- * therefore it is impossible for `mapping' to be NULL.
- */
- if (mapping && mapping != mc)
- /* ought to be unmanaged pages */
- goto out;
-
- lock_page(page);
-
- /* only true if page reclaim goes wrong, should never happen */
- DBG_BUGON(justfound && PagePrivate(page));
-
- /* the page is still in manage cache */
- if (page->mapping == mc) {
- WRITE_ONCE(grp->compressed_pages[nr], page);
-
- ClearPageError(page);
- if (!PagePrivate(page)) {
- /*
- * impossible to be !PagePrivate(page) for
- * the current restriction as well if
- * the page is already in compressed_pages[].
- */
- DBG_BUGON(!justfound);
-
- justfound = 0;
- set_page_private(page, (unsigned long)grp);
- SetPagePrivate(page);
- }
-
- /* no need to submit io if it is already up-to-date */
- if (PageUptodate(page)) {
- unlock_page(page);
- page = NULL;
- }
- goto out;
- }
-
- /*
- * the managed page has been truncated, it's unsafe to
- * reuse this one, let's allocate a new cache-managed page.
- */
- DBG_BUGON(page->mapping);
- DBG_BUGON(!justfound);
-
- tocache = true;
- unlock_page(page);
- put_page(page);
-out_allocpage:
- page = __stagingpage_alloc(pagepool, gfp);
- if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
- list_add(&page->lru, pagepool);
- cpu_relax();
- goto repeat;
- }
- if (nocache || !tocache)
- goto out;
- if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
- page->mapping = Z_EROFS_MAPPING_STAGING;
- goto out;
- }
-
- set_page_private(page, (unsigned long)grp);
- SetPagePrivate(page);
-out: /* the only exit (for tracing and debugging) */
- return page;
-}
-
-static struct z_erofs_vle_unzip_io *
-jobqueue_init(struct super_block *sb,
- struct z_erofs_vle_unzip_io *io,
- bool foreground)
-{
- struct z_erofs_vle_unzip_io_sb *iosb;
-
- if (foreground) {
- /* waitqueue available for foreground io */
- DBG_BUGON(!io);
-
- init_waitqueue_head(&io->u.wait);
- atomic_set(&io->pending_bios, 0);
- goto out;
- }
-
- iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
- DBG_BUGON(!iosb);
-
- /* initialize fields in the allocated descriptor */
- io = &iosb->io;
- iosb->sb = sb;
- INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
-out:
- io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
- return io;
-}
-
-/* define workgroup jobqueue types */
-enum {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- JQ_BYPASS,
-#endif
- JQ_SUBMIT,
- NR_JOBQUEUES,
-};
-
-static void *jobqueueset_init(struct super_block *sb,
- z_erofs_vle_owned_workgrp_t qtail[],
- struct z_erofs_vle_unzip_io *q[],
- struct z_erofs_vle_unzip_io *fgq,
- bool forcefg)
-{
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- /*
- * if managed cache is enabled, bypass jobqueue is needed,
- * no need to read from device for all workgroups in this queue.
- */
- q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
- qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
-#endif
-
- q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
- qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
-
- return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
-}
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
- z_erofs_vle_owned_workgrp_t qtail[],
- z_erofs_vle_owned_workgrp_t owned_head)
-{
- z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
- z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
-
- DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
- if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
- owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
-
- WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
-
- WRITE_ONCE(*submit_qtail, owned_head);
- WRITE_ONCE(*bypass_qtail, &grp->next);
-
- qtail[JQ_BYPASS] = &grp->next;
-}
-
-static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
- unsigned int nr_bios,
- bool force_fg)
-{
- /*
- * although background is preferred, no one is pending for submission.
- * don't issue workqueue for decompression but drop it directly instead.
- */
- if (force_fg || nr_bios)
- return false;
-
- kvfree(container_of(q[JQ_SUBMIT],
- struct z_erofs_vle_unzip_io_sb,
- io));
- return true;
-}
-#else
-static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
- z_erofs_vle_owned_workgrp_t qtail[],
- z_erofs_vle_owned_workgrp_t owned_head)
-{
- /* impossible to bypass submission for managed cache disabled */
- DBG_BUGON(1);
-}
-
-static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
- unsigned int nr_bios,
- bool force_fg)
-{
- /* bios should be >0 if managed cache is disabled */
- DBG_BUGON(!nr_bios);
- return false;
-}
-#endif
-
-static bool z_erofs_vle_submit_all(struct super_block *sb,
- z_erofs_vle_owned_workgrp_t owned_head,
- struct list_head *pagepool,
- struct z_erofs_vle_unzip_io *fgq,
- bool force_fg)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- const unsigned int clusterpages = erofs_clusterpages(sbi);
- const gfp_t gfp = GFP_NOFS;
-
- z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
- struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
- struct bio *bio;
- void *bi_private;
- /* since bio will be NULL, no need to initialize last_index */
- pgoff_t uninitialized_var(last_index);
- bool force_submit = false;
- unsigned int nr_bios;
-
- if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
- return false;
-
- force_submit = false;
- bio = NULL;
- nr_bios = 0;
- bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
-
- /* by default, all need io submission */
- q[JQ_SUBMIT]->head = owned_head;
-
- do {
- struct z_erofs_vle_workgroup *grp;
- pgoff_t first_index;
- struct page *page;
- unsigned int i = 0, bypass = 0;
- int err;
-
- /* no possible 'owned_head' equals the following */
- DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
- DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
-
- grp = container_of(owned_head,
- struct z_erofs_vle_workgroup, next);
-
- /* close the main owned chain at first */
- owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
- Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
-
- first_index = grp->obj.index;
- force_submit |= (first_index != last_index + 1);
-
-repeat:
- page = pickup_page_for_submission(grp, i, pagepool,
- MNGD_MAPPING(sbi), gfp);
- if (!page) {
- force_submit = true;
- ++bypass;
- goto skippage;
- }
-
- if (bio && force_submit) {
-submit_bio_retry:
- __submit_bio(bio, REQ_OP_READ, 0);
- bio = NULL;
- }
-
- if (!bio) {
- bio = erofs_grab_bio(sb, first_index + i,
- BIO_MAX_PAGES, bi_private,
- z_erofs_vle_read_endio, true);
- ++nr_bios;
- }
-
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
-
- force_submit = false;
- last_index = first_index + i;
-skippage:
- if (++i < clusterpages)
- goto repeat;
-
- if (bypass < clusterpages)
- qtail[JQ_SUBMIT] = &grp->next;
- else
- move_to_bypass_jobqueue(grp, qtail, owned_head);
- } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
-
- if (bio)
- __submit_bio(bio, REQ_OP_READ, 0);
-
- if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
- return true;
-
- z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
- return true;
-}
-
-static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
- struct list_head *pagepool,
- bool force_fg)
-{
- struct super_block *sb = f->inode->i_sb;
- struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
-
- if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
- return;
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
-#endif
- if (!force_fg)
- return;
-
- /* wait until all bios are completed */
- wait_event(io[JQ_SUBMIT].u.wait,
- !atomic_read(&io[JQ_SUBMIT].pending_bios));
-
- /* let's synchronous decompression */
- z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
-}
-
-static int z_erofs_vle_normalaccess_readpage(struct file *file,
- struct page *page)
-{
- struct inode *const inode = page->mapping->host;
- struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
- int err;
- LIST_HEAD(pagepool);
-
- trace_erofs_readpage(page, false);
-
- f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
-
- err = z_erofs_do_read_page(&f, page, &pagepool);
- (void)z_erofs_vle_work_iter_end(&f.builder);
-
- if (err) {
- errln("%s, failed to read, err [%d]", __func__, err);
- goto out;
- }
-
- z_erofs_submit_and_unzip(&f, &pagepool, true);
-out:
- if (f.map.mpage)
- put_page(f.map.mpage);
-
- /* clean up the remaining free pages */
- put_pages_list(&pagepool);
- return 0;
-}
-
-static int z_erofs_vle_normalaccess_readpages(struct file *filp,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned int nr_pages)
-{
- struct inode *const inode = mapping->host;
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-
- bool sync = __should_decompress_synchronously(sbi, nr_pages);
- struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
- gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
- struct page *head = NULL;
- LIST_HEAD(pagepool);
-
- trace_erofs_readpages(mapping->host, lru_to_page(pages),
- nr_pages, false);
-
- f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
-
- for (; nr_pages; --nr_pages) {
- struct page *page = lru_to_page(pages);
-
- prefetchw(&page->flags);
- list_del(&page->lru);
-
- /*
- * A pure asynchronous readahead is indicated if
- * a PG_readahead marked page is hitted at first.
- * Let's also do asynchronous decompression for this case.
- */
- sync &= !(PageReadahead(page) && !head);
-
- if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
- list_add(&page->lru, &pagepool);
- continue;
- }
-
- set_page_private(page, (unsigned long)head);
- head = page;
- }
-
- while (head) {
- struct page *page = head;
- int err;
-
- /* traversal in reverse order */
- head = (void *)page_private(page);
-
- err = z_erofs_do_read_page(&f, page, &pagepool);
- if (err) {
- struct erofs_vnode *vi = EROFS_V(inode);
-
- errln("%s, readahead error at page %lu of nid %llu",
- __func__, page->index, vi->nid);
- }
-
- put_page(page);
- }
-
- (void)z_erofs_vle_work_iter_end(&f.builder);
-
- z_erofs_submit_and_unzip(&f, &pagepool, sync);
-
- if (f.map.mpage)
- put_page(f.map.mpage);
-
- /* clean up the remaining free pages */
- put_pages_list(&pagepool);
- return 0;
-}
-
-const struct address_space_operations z_erofs_vle_normalaccess_aops = {
- .readpage = z_erofs_vle_normalaccess_readpage,
- .readpages = z_erofs_vle_normalaccess_readpages,
-};
-
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
deleted file mode 100644
index ab509d75aefd..000000000000
--- a/drivers/staging/erofs/unzip_vle.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * linux/drivers/staging/erofs/unzip_vle.h
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#ifndef __EROFS_FS_UNZIP_VLE_H
-#define __EROFS_FS_UNZIP_VLE_H
-
-#include "internal.h"
-#include "unzip_pagevec.h"
-
-#define Z_EROFS_NR_INLINE_PAGEVECS 3
-
-/*
- * Structure fields follow one of the following exclusion rules.
- *
- * I: Modifiable by initialization/destruction paths and read-only
- * for everyone else.
- *
- */
-
-struct z_erofs_vle_work {
- struct mutex lock;
-
- /* I: decompression offset in page */
- unsigned short pageofs;
- unsigned short nr_pages;
-
- /* L: queued pages in pagevec[] */
- unsigned vcnt;
-
- union {
- /* L: pagevec */
- erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
- struct rcu_head rcu;
- };
-};
-
-#define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0
-#define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1
-#define Z_EROFS_VLE_WORKGRP_FMT_MASK 1
-#define Z_EROFS_VLE_WORKGRP_FULL_LENGTH 2
-
-typedef void *z_erofs_vle_owned_workgrp_t;
-
-struct z_erofs_vle_workgroup {
- struct erofs_workgroup obj;
- struct z_erofs_vle_work work;
-
- /* point to next owned_workgrp_t */
- z_erofs_vle_owned_workgrp_t next;
-
- /* compressed pages (including multi-usage pages) */
- struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
- unsigned int llen, flags;
-};
-
-/* let's avoid the valid 32-bit kernel addresses */
-
-/* the chained workgroup has't submitted io (still open) */
-#define Z_EROFS_VLE_WORKGRP_TAIL ((void *)0x5F0ECAFE)
-/* the chained workgroup has already submitted io */
-#define Z_EROFS_VLE_WORKGRP_TAIL_CLOSED ((void *)0x5F0EDEAD)
-
-#define Z_EROFS_VLE_WORKGRP_NIL (NULL)
-
-#define z_erofs_vle_workgrp_fmt(grp) \
- ((grp)->flags & Z_EROFS_VLE_WORKGRP_FMT_MASK)
-
-static inline void z_erofs_vle_set_workgrp_fmt(
- struct z_erofs_vle_workgroup *grp,
- unsigned int fmt)
-{
- grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
-}
-
-
-/* definitions if multiref is disabled */
-#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
-#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
-#define z_erofs_vle_work_workgroup(wrk, primary) \
- ((primary) ? container_of(wrk, \
- struct z_erofs_vle_workgroup, work) : \
- ({ BUG(); (void *)NULL; }))
-
-
-#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
-
-struct z_erofs_vle_unzip_io {
- atomic_t pending_bios;
- z_erofs_vle_owned_workgrp_t head;
-
- union {
- wait_queue_head_t wait;
- struct work_struct work;
- } u;
-};
-
-struct z_erofs_vle_unzip_io_sb {
- struct z_erofs_vle_unzip_io io;
- struct super_block *sb;
-};
-
-#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
-#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
-#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
-
-/*
- * waiters (aka. ongoing_packs): # to unlock the page
- * sub-index: 0 - for partial page, >= 1 full page sub-index
- */
-typedef atomic_t z_erofs_onlinepage_t;
-
-/* type punning */
-union z_erofs_onlinepage_converter {
- z_erofs_onlinepage_t *o;
- unsigned long *v;
-};
-
-static inline unsigned z_erofs_onlinepage_index(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-}
-
-static inline void z_erofs_onlinepage_init(struct page *page)
-{
- union {
- z_erofs_onlinepage_t o;
- unsigned long v;
- /* keep from being unlocked in advance */
- } u = { .o = ATOMIC_INIT(1) };
-
- set_page_private(page, u.v);
- smp_wmb();
- SetPagePrivate(page);
-}
-
-static inline void z_erofs_onlinepage_fixup(struct page *page,
- uintptr_t index, bool down)
-{
- unsigned long *p, o, v, id;
-repeat:
- p = &page_private(page);
- o = READ_ONCE(*p);
-
- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (id) {
- if (!index)
- return;
-
- DBG_BUGON(id != index);
- }
-
- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned)down);
- if (cmpxchg(p, o, v) != o)
- goto repeat;
-}
-
-static inline void z_erofs_onlinepage_endio(struct page *page)
-{
- union z_erofs_onlinepage_converter u;
- unsigned v;
-
- DBG_BUGON(!PagePrivate(page));
- u.v = &page_private(page);
-
- v = atomic_dec_return(u.o);
- if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
- ClearPagePrivate(page);
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- }
-
- debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
-}
-
-#define Z_EROFS_VLE_VMAP_ONSTACK_PAGES \
- min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
-#define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048
-
-#endif
-
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
deleted file mode 100644
index 4bbd3bf34acd..000000000000
--- a/drivers/staging/erofs/utils.c
+++ /dev/null
@@ -1,353 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/utils.c
- *
- * Copyright (C) 2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-
-#include "internal.h"
-#include <linux/pagevec.h>
-
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
-{
- struct page *page;
-
- if (!list_empty(pool)) {
- page = lru_to_page(pool);
- list_del(&page->lru);
- } else {
- page = alloc_pages(gfp | __GFP_NOFAIL, 0);
- }
- return page;
-}
-
-#if (EROFS_PCPUBUF_NR_PAGES > 0)
-static struct {
- u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
-} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
-
-void *erofs_get_pcpubuf(unsigned int pagenr)
-{
- preempt_disable();
- return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
-}
-#endif
-
-/* global shrink count (for all mounted EROFS instances) */
-static atomic_long_t erofs_global_shrink_cnt;
-
-#ifdef CONFIG_EROFS_FS_ZIP
-#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
-#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
-
-static int erofs_workgroup_get(struct erofs_workgroup *grp)
-{
- int o;
-
-repeat:
- o = erofs_wait_on_workgroup_freezed(grp);
- if (unlikely(o <= 0))
- return -1;
-
- if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
- goto repeat;
-
- /* decrease refcount paired by erofs_workgroup_put */
- if (unlikely(o == 1))
- atomic_long_dec(&erofs_global_shrink_cnt);
- return 0;
-}
-
-struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_workgroup *grp;
-
-repeat:
- rcu_read_lock();
- grp = radix_tree_lookup(&sbi->workstn_tree, index);
- if (grp) {
- *tag = xa_pointer_tag(grp);
- grp = xa_untag_pointer(grp);
-
- if (erofs_workgroup_get(grp)) {
- /* prefer to relax rcu read side */
- rcu_read_unlock();
- goto repeat;
- }
-
- DBG_BUGON(index != grp->index);
- }
- rcu_read_unlock();
- return grp;
-}
-
-int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp,
- bool tag)
-{
- struct erofs_sb_info *sbi;
- int err;
-
- /* grp shouldn't be broken or used before */
- if (unlikely(atomic_read(&grp->refcount) != 1)) {
- DBG_BUGON(1);
- return -EINVAL;
- }
-
- err = radix_tree_preload(GFP_NOFS);
- if (err)
- return err;
-
- sbi = EROFS_SB(sb);
- erofs_workstn_lock(sbi);
-
- grp = xa_tag_pointer(grp, tag);
-
- /*
- * Bump up reference count before making this workgroup
- * visible to other users in order to avoid potential UAF
- * without serialized by erofs_workstn_lock.
- */
- __erofs_workgroup_get(grp);
-
- err = radix_tree_insert(&sbi->workstn_tree,
- grp->index, grp);
- if (unlikely(err))
- /*
- * it's safe to decrease since the workgroup isn't visible
- * and refcount >= 2 (cannot be freezed).
- */
- __erofs_workgroup_put(grp);
-
- erofs_workstn_unlock(sbi);
- radix_tree_preload_end();
- return err;
-}
-
-static void __erofs_workgroup_free(struct erofs_workgroup *grp)
-{
- atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
-}
-
-int erofs_workgroup_put(struct erofs_workgroup *grp)
-{
- int count = atomic_dec_return(&grp->refcount);
-
- if (count == 1)
- atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count)
- __erofs_workgroup_free(grp);
- return count;
-}
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-/* for cache-managed case, customized reclaim paths exist */
-static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
-{
- erofs_workgroup_unfreeze(grp, 0);
- __erofs_workgroup_free(grp);
-}
-
-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
-{
- /*
- * for managed cache enabled, the refcount of workgroups
- * themselves could be < 0 (freezed). So there is no guarantee
- * that all refcount > 0 if managed cache is enabled.
- */
- if (!erofs_workgroup_try_to_freeze(grp, 1))
- return false;
-
- /*
- * note that all cached pages should be unlinked
- * before delete it from the radix tree.
- * Otherwise some cached pages of an orphan old workgroup
- * could be still linked after the new one is available.
- */
- if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
- erofs_workgroup_unfreeze(grp, 1);
- return false;
- }
-
- /*
- * it is impossible to fail after the workgroup is freezed,
- * however in order to avoid some race conditions, add a
- * DBG_BUGON to observe this in advance.
- */
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
-
- /*
- * if managed cache is enable, the last refcount
- * should indicate the related workstation.
- */
- erofs_workgroup_unfreeze_final(grp);
- return true;
-}
-
-#else
-/* for nocache case, no customized reclaim path at all */
-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
-{
- int cnt = atomic_read(&grp->refcount);
-
- DBG_BUGON(cnt <= 0);
- DBG_BUGON(cleanup && cnt != 1);
-
- if (cnt > 1)
- return false;
-
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
-
- /* (rarely) could be grabbed again when freeing */
- erofs_workgroup_put(grp);
- return true;
-}
-
-#endif
-
-unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink,
- bool cleanup)
-{
- pgoff_t first_index = 0;
- void *batch[PAGEVEC_SIZE];
- unsigned int freed = 0;
-
- int i, found;
-repeat:
- erofs_workstn_lock(sbi);
-
- found = radix_tree_gang_lookup(&sbi->workstn_tree,
- batch, first_index, PAGEVEC_SIZE);
-
- for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
-
- first_index = grp->index + 1;
-
- /* try to shrink each valid workgroup */
- if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
- continue;
-
- ++freed;
- if (unlikely(!--nr_shrink))
- break;
- }
- erofs_workstn_unlock(sbi);
-
- if (i && nr_shrink)
- goto repeat;
- return freed;
-}
-
-#endif
-
-/* protected by 'erofs_sb_list_lock' */
-static unsigned int shrinker_run_no;
-
-/* protects the mounted 'erofs_sb_list' */
-static DEFINE_SPINLOCK(erofs_sb_list_lock);
-static LIST_HEAD(erofs_sb_list);
-
-void erofs_register_super(struct super_block *sb)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
-
- mutex_init(&sbi->umount_mutex);
-
- spin_lock(&erofs_sb_list_lock);
- list_add(&sbi->list, &erofs_sb_list);
- spin_unlock(&erofs_sb_list_lock);
-}
-
-void erofs_unregister_super(struct super_block *sb)
-{
- spin_lock(&erofs_sb_list_lock);
- list_del(&EROFS_SB(sb)->list);
- spin_unlock(&erofs_sb_list_lock);
-}
-
-static unsigned long erofs_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- return atomic_long_read(&erofs_global_shrink_cnt);
-}
-
-static unsigned long erofs_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- struct erofs_sb_info *sbi;
- struct list_head *p;
-
- unsigned long nr = sc->nr_to_scan;
- unsigned int run_no;
- unsigned long freed = 0;
-
- spin_lock(&erofs_sb_list_lock);
- do
- run_no = ++shrinker_run_no;
- while (run_no == 0);
-
- /* Iterate over all mounted superblocks and try to shrink them */
- p = erofs_sb_list.next;
- while (p != &erofs_sb_list) {
- sbi = list_entry(p, struct erofs_sb_info, list);
-
- /*
- * We move the ones we do to the end of the list, so we stop
- * when we see one we have already done.
- */
- if (sbi->shrinker_run_no == run_no)
- break;
-
- if (!mutex_trylock(&sbi->umount_mutex)) {
- p = p->next;
- continue;
- }
-
- spin_unlock(&erofs_sb_list_lock);
- sbi->shrinker_run_no = run_no;
-
-#ifdef CONFIG_EROFS_FS_ZIP
- freed += erofs_shrink_workstation(sbi, nr, false);
-#endif
-
- spin_lock(&erofs_sb_list_lock);
- /* Get the next list element before we move this one */
- p = p->next;
-
- /*
- * Move this one to the end of the list to provide some
- * fairness.
- */
- list_move_tail(&sbi->list, &erofs_sb_list);
- mutex_unlock(&sbi->umount_mutex);
-
- if (freed >= nr)
- break;
- }
- spin_unlock(&erofs_sb_list_lock);
- return freed;
-}
-
-struct shrinker erofs_shrinker_info = {
- .scan_objects = erofs_shrink_scan,
- .count_objects = erofs_shrink_count,
- .seeks = DEFAULT_SEEKS,
-};
-
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
deleted file mode 100644
index df40654b9fbb..000000000000
--- a/drivers/staging/erofs/xattr.c
+++ /dev/null
@@ -1,704 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/xattr.c
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#include <linux/security.h>
-#include "xattr.h"
-
-struct xattr_iter {
- struct super_block *sb;
- struct page *page;
- void *kaddr;
-
- erofs_blk_t blkaddr;
- unsigned int ofs;
-};
-
-static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
-{
- /* the only user of kunmap() is 'init_inode_xattrs' */
- if (unlikely(!atomic))
- kunmap(it->page);
- else
- kunmap_atomic(it->kaddr);
-
- unlock_page(it->page);
- put_page(it->page);
-}
-
-static inline void xattr_iter_end_final(struct xattr_iter *it)
-{
- if (!it->page)
- return;
-
- xattr_iter_end(it, true);
-}
-
-static int init_inode_xattrs(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct xattr_iter it;
- unsigned int i;
- struct erofs_xattr_ibody_header *ih;
- struct super_block *sb;
- struct erofs_sb_info *sbi;
- bool atomic_map;
- int ret = 0;
-
- /* the most case is that xattrs of this inode are initialized. */
- if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
- return 0;
-
- if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
- return -ERESTARTSYS;
-
- /* someone has initialized xattrs for us? */
- if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
- goto out_unlock;
-
- /*
- * bypass all xattr operations if ->xattr_isize is not greater than
- * sizeof(struct erofs_xattr_ibody_header), in detail:
- * 1) it is not enough to contain erofs_xattr_ibody_header then
- * ->xattr_isize should be 0 (it means no xattr);
- * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
- * undefined right now (maybe use later with some new sb feature).
- */
- if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
- errln("xattr_isize %d of nid %llu is not supported yet",
- vi->xattr_isize, vi->nid);
- ret = -ENOTSUPP;
- goto out_unlock;
- } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
- if (unlikely(vi->xattr_isize)) {
- DBG_BUGON(1);
- ret = -EIO;
- goto out_unlock; /* xattr ondisk layout error */
- }
- ret = -ENOATTR;
- goto out_unlock;
- }
-
- sb = inode->i_sb;
- sbi = EROFS_SB(sb);
- it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
- it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
-
- it.page = erofs_get_inline_page(inode, it.blkaddr);
- if (IS_ERR(it.page)) {
- ret = PTR_ERR(it.page);
- goto out_unlock;
- }
-
- /* read in shared xattr array (non-atomic, see kmalloc below) */
- it.kaddr = kmap(it.page);
- atomic_map = false;
-
- ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
-
- vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
- sizeof(uint), GFP_KERNEL);
- if (!vi->xattr_shared_xattrs) {
- xattr_iter_end(&it, atomic_map);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- /* let's skip ibody header */
- it.ofs += sizeof(struct erofs_xattr_ibody_header);
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
- /* cannot be unaligned */
- BUG_ON(it.ofs != EROFS_BLKSIZ);
- xattr_iter_end(&it, atomic_map);
-
- it.page = erofs_get_meta_page(sb, ++it.blkaddr,
- S_ISDIR(inode->i_mode));
- if (IS_ERR(it.page)) {
- kfree(vi->xattr_shared_xattrs);
- vi->xattr_shared_xattrs = NULL;
- ret = PTR_ERR(it.page);
- goto out_unlock;
- }
-
- it.kaddr = kmap_atomic(it.page);
- atomic_map = true;
- it.ofs = 0;
- }
- vi->xattr_shared_xattrs[i] =
- le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
- it.ofs += sizeof(__le32);
- }
- xattr_iter_end(&it, atomic_map);
-
- set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
-
-out_unlock:
- clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
- return ret;
-}
-
-/*
- * the general idea for these return values is
- * if 0 is returned, go on processing the current xattr;
- * 1 (> 0) is returned, skip this round to process the next xattr;
- * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
- * and need to be handled
- */
-struct xattr_iter_handlers {
- int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
- int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
- int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
- void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
-};
-
-static inline int xattr_iter_fixup(struct xattr_iter *it)
-{
- if (it->ofs < EROFS_BLKSIZ)
- return 0;
-
- xattr_iter_end(it, true);
-
- it->blkaddr += erofs_blknr(it->ofs);
-
- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
- if (IS_ERR(it->page)) {
- int err = PTR_ERR(it->page);
-
- it->page = NULL;
- return err;
- }
-
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
- return 0;
-}
-
-static int inline_xattr_iter_begin(struct xattr_iter *it,
- struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
- unsigned int xattr_header_sz, inline_xattr_ofs;
-
- xattr_header_sz = inlinexattr_header_size(inode);
- if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
- BUG_ON(xattr_header_sz > vi->xattr_isize);
- return -ENOATTR;
- }
-
- inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
-
- it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
- it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
-
- it->page = erofs_get_inline_page(inode, it->blkaddr);
- if (IS_ERR(it->page))
- return PTR_ERR(it->page);
-
- it->kaddr = kmap_atomic(it->page);
- return vi->xattr_isize - xattr_header_sz;
-}
-
-/*
- * Regardless of success or failure, `xattr_foreach' will end up with
- * `ofs' pointing to the next xattr item rather than an arbitrary position.
- */
-static int xattr_foreach(struct xattr_iter *it,
- const struct xattr_iter_handlers *op,
- unsigned int *tlimit)
-{
- struct erofs_xattr_entry entry;
- unsigned int value_sz, processed, slice;
- int err;
-
- /* 0. fixup blkaddr, ofs, ipage */
- err = xattr_iter_fixup(it);
- if (err)
- return err;
-
- /*
- * 1. read xattr entry to the memory,
- * since we do EROFS_XATTR_ALIGN
- * therefore entry should be in the page
- */
- entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
- if (tlimit) {
- unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
-
- BUG_ON(*tlimit < entry_sz);
- *tlimit -= entry_sz;
- }
-
- it->ofs += sizeof(struct erofs_xattr_entry);
- value_sz = le16_to_cpu(entry.e_value_size);
-
- /* handle entry */
- err = op->entry(it, &entry);
- if (err) {
- it->ofs += entry.e_name_len + value_sz;
- goto out;
- }
-
- /* 2. handle xattr name (ofs will finally be at the end of name) */
- processed = 0;
-
- while (processed < entry.e_name_len) {
- if (it->ofs >= EROFS_BLKSIZ) {
- BUG_ON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- entry.e_name_len - processed);
-
- /* handle name */
- err = op->name(it, processed, it->kaddr + it->ofs, slice);
- if (err) {
- it->ofs += entry.e_name_len - processed + value_sz;
- goto out;
- }
-
- it->ofs += slice;
- processed += slice;
- }
-
- /* 3. handle xattr value */
- processed = 0;
-
- if (op->alloc_buffer) {
- err = op->alloc_buffer(it, value_sz);
- if (err) {
- it->ofs += value_sz;
- goto out;
- }
- }
-
- while (processed < value_sz) {
- if (it->ofs >= EROFS_BLKSIZ) {
- BUG_ON(it->ofs > EROFS_BLKSIZ);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
- value_sz - processed);
- op->value(it, processed, it->kaddr + it->ofs, slice);
- it->ofs += slice;
- processed += slice;
- }
-
-out:
- /* xattrs should be 4-byte aligned (on-disk constraint) */
- it->ofs = EROFS_XATTR_ALIGN(it->ofs);
- return err < 0 ? err : 0;
-}
-
-struct getxattr_iter {
- struct xattr_iter it;
-
- char *buffer;
- int buffer_size, index;
- struct qstr name;
-};
-
-static int xattr_entrymatch(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return (it->index != entry->e_name_index ||
- it->name.len != entry->e_name_len) ? -ENOATTR : 0;
-}
-
-static int xattr_namematch(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
-}
-
-static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
- int err = it->buffer_size < value_sz ? -ERANGE : 0;
-
- it->buffer_size = value_sz;
- return !it->buffer ? 1 : err;
-}
-
-static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned int processed,
- char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- memcpy(it->buffer + processed, buf, len);
-}
-
-static const struct xattr_iter_handlers find_xattr_handlers = {
- .entry = xattr_entrymatch,
- .name = xattr_namematch,
- .alloc_buffer = xattr_checkbuffer,
- .value = xattr_copyvalue
-};
-
-static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, inode);
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
- if (ret != -ENOATTR)
- break;
- }
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
-static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = -ENOATTR;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
-
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
-
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
-
- it->it.page = erofs_get_meta_page(sb, blkaddr, false);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
-
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
-
- ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
- if (ret != -ENOATTR)
- break;
- }
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_size;
-}
-
-static bool erofs_xattr_user_list(struct dentry *dentry)
-{
- return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
-}
-
-static bool erofs_xattr_trusted_list(struct dentry *dentry)
-{
- return capable(CAP_SYS_ADMIN);
-}
-
-int erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
-{
- int ret;
- struct getxattr_iter it;
-
- if (unlikely(!name))
- return -EINVAL;
-
- ret = init_inode_xattrs(inode);
- if (ret)
- return ret;
-
- it.index = index;
-
- it.name.len = strlen(name);
- if (it.name.len > EROFS_NAME_LEN)
- return -ERANGE;
- it.name.name = name;
-
- it.buffer = buffer;
- it.buffer_size = buffer_size;
-
- it.it.sb = inode->i_sb;
- ret = inline_getxattr(inode, &it);
- if (ret == -ENOATTR)
- ret = shared_getxattr(inode, &it);
- return ret;
-}
-
-static int erofs_xattr_generic_get(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, void *buffer, size_t size)
-{
- struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
-
- switch (handler->flags) {
- case EROFS_XATTR_INDEX_USER:
- if (!test_opt(sbi, XATTR_USER))
- return -EOPNOTSUPP;
- break;
- case EROFS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
- case EROFS_XATTR_INDEX_SECURITY:
- break;
- default:
- return -EINVAL;
- }
-
- return erofs_getxattr(inode, handler->flags, name, buffer, size);
-}
-
-const struct xattr_handler erofs_xattr_user_handler = {
- .prefix = XATTR_USER_PREFIX,
- .flags = EROFS_XATTR_INDEX_USER,
- .list = erofs_xattr_user_list,
- .get = erofs_xattr_generic_get,
-};
-
-const struct xattr_handler erofs_xattr_trusted_handler = {
- .prefix = XATTR_TRUSTED_PREFIX,
- .flags = EROFS_XATTR_INDEX_TRUSTED,
- .list = erofs_xattr_trusted_list,
- .get = erofs_xattr_generic_get,
-};
-
-#ifdef CONFIG_EROFS_FS_SECURITY
-const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .flags = EROFS_XATTR_INDEX_SECURITY,
- .get = erofs_xattr_generic_get,
-};
-#endif
-
-const struct xattr_handler *erofs_xattr_handlers[] = {
- &erofs_xattr_user_handler,
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
- &erofs_xattr_trusted_handler,
-#ifdef CONFIG_EROFS_FS_SECURITY
- &erofs_xattr_security_handler,
-#endif
- NULL,
-};
-
-struct listxattr_iter {
- struct xattr_iter it;
-
- struct dentry *dentry;
- char *buffer;
- int buffer_size, buffer_ofs;
-};
-
-static int xattr_entrylist(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
- unsigned int prefix_len;
- const char *prefix;
-
- const struct xattr_handler *h =
- erofs_xattr_handler(entry->e_name_index);
-
- if (!h || (h->list && !h->list(it->dentry)))
- return 1;
-
- prefix = xattr_prefix(h);
- prefix_len = strlen(prefix);
-
- if (!it->buffer) {
- it->buffer_ofs += prefix_len + entry->e_name_len + 1;
- return 1;
- }
-
- if (it->buffer_ofs + prefix_len
- + entry->e_name_len + 1 > it->buffer_size)
- return -ERANGE;
-
- memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
- it->buffer_ofs += prefix_len;
- return 0;
-}
-
-static int xattr_namelist(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
-
- memcpy(it->buffer + it->buffer_ofs, buf, len);
- it->buffer_ofs += len;
- return 0;
-}
-
-static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
-
- it->buffer[it->buffer_ofs++] = '\0';
- return 1;
-}
-
-static const struct xattr_iter_handlers list_xattr_handlers = {
- .entry = xattr_entrylist,
- .name = xattr_namelist,
- .alloc_buffer = xattr_skipvalue,
- .value = NULL
-};
-
-static int inline_listxattr(struct listxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
- if (ret)
- break;
- }
- xattr_iter_end_final(&it->it);
- return ret ? ret : it->buffer_ofs;
-}
-
-static int shared_listxattr(struct listxattr_iter *it)
-{
- struct inode *const inode = d_inode(it->dentry);
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- erofs_blk_t blkaddr =
- xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
-
- it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
- if (!i || blkaddr != it->it.blkaddr) {
- if (i)
- xattr_iter_end(&it->it, true);
-
- it->it.page = erofs_get_meta_page(sb, blkaddr, false);
- if (IS_ERR(it->it.page))
- return PTR_ERR(it->it.page);
-
- it->it.kaddr = kmap_atomic(it->it.page);
- it->it.blkaddr = blkaddr;
- }
-
- ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
- if (ret)
- break;
- }
- if (vi->xattr_shared_count)
- xattr_iter_end_final(&it->it);
-
- return ret ? ret : it->buffer_ofs;
-}
-
-ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
-{
- int ret;
- struct listxattr_iter it;
-
- ret = init_inode_xattrs(d_inode(dentry));
- if (ret)
- return ret;
-
- it.dentry = dentry;
- it.buffer = buffer;
- it.buffer_size = buffer_size;
- it.buffer_ofs = 0;
-
- it.it.sb = dentry->d_sb;
-
- ret = inline_listxattr(&it);
- if (ret < 0 && ret != -ENOATTR)
- return ret;
- return shared_listxattr(&it);
-}
-
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
-struct posix_acl *erofs_get_acl(struct inode *inode, int type)
-{
- struct posix_acl *acl;
- int prefix, rc;
- char *value = NULL;
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
- break;
- case ACL_TYPE_DEFAULT:
- prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- rc = erofs_getxattr(inode, prefix, "", NULL, 0);
- if (rc > 0) {
- value = kmalloc(rc, GFP_KERNEL);
- if (!value)
- return ERR_PTR(-ENOMEM);
- rc = erofs_getxattr(inode, prefix, "", value, rc);
- }
-
- if (rc == -ENOATTR)
- acl = NULL;
- else if (rc < 0)
- acl = ERR_PTR(rc);
- else
- acl = posix_acl_from_xattr(&init_user_ns, value, rc);
- kfree(value);
- return acl;
-}
-#endif
-
diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h
deleted file mode 100644
index 35ba5ac2139a..000000000000
--- a/drivers/staging/erofs/xattr.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * linux/drivers/staging/erofs/xattr.h
- *
- * Copyright (C) 2017-2018 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- */
-#ifndef __EROFS_XATTR_H
-#define __EROFS_XATTR_H
-
-#include "internal.h"
-#include <linux/posix_acl_xattr.h>
-#include <linux/xattr.h>
-
-/* Attribute not found */
-#define ENOATTR ENODATA
-
-static inline unsigned inlinexattr_header_size(struct inode *inode)
-{
- return sizeof(struct erofs_xattr_ibody_header)
- + sizeof(u32) * EROFS_V(inode)->xattr_shared_count;
-}
-
-static inline erofs_blk_t
-xattrblock_addr(struct erofs_sb_info *sbi, unsigned xattr_id)
-{
-#ifdef CONFIG_EROFS_FS_XATTR
- return sbi->xattr_blkaddr +
- xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
-#else
- return 0;
-#endif
-}
-
-static inline unsigned
-xattrblock_offset(struct erofs_sb_info *sbi, unsigned xattr_id)
-{
- return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
-}
-
-extern const struct xattr_handler erofs_xattr_user_handler;
-extern const struct xattr_handler erofs_xattr_trusted_handler;
-#ifdef CONFIG_EROFS_FS_SECURITY
-extern const struct xattr_handler erofs_xattr_security_handler;
-#endif
-
-static inline const struct xattr_handler *erofs_xattr_handler(unsigned index)
-{
-static const struct xattr_handler *xattr_handler_map[] = {
- [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
-#endif
- [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
-#ifdef CONFIG_EROFS_FS_SECURITY
- [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
-#endif
-};
- return index && index < ARRAY_SIZE(xattr_handler_map) ?
- xattr_handler_map[index] : NULL;
-}
-
-#ifdef CONFIG_EROFS_FS_XATTR
-extern const struct xattr_handler *erofs_xattr_handlers[];
-
-int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
-ssize_t erofs_listxattr(struct dentry *, char *, size_t);
-#else
-static int __maybe_unused erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
-{
- return -ENOTSUPP;
-}
-
-static ssize_t __maybe_unused erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
-{
- return -ENOTSUPP;
-}
-#endif
-
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
-struct posix_acl *erofs_get_acl(struct inode *inode, int type);
-#else
-#define erofs_get_acl (NULL)
-#endif
-
-#endif
-
diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c
deleted file mode 100644
index 9c0bd65c46bf..000000000000
--- a/drivers/staging/erofs/zmap.c
+++ /dev/null
@@ -1,463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/drivers/staging/erofs/zmap.c
- *
- * Copyright (C) 2018-2019 HUAWEI, Inc.
- * http://www.huawei.com/
- * Created by Gao Xiang <gaoxiang25@huawei.com>
- */
-#include "internal.h"
-#include <asm/unaligned.h>
-#include <trace/events/erofs.h>
-
-int z_erofs_fill_inode(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
-
- if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
- vi->z_advise = 0;
- vi->z_algorithmtype[0] = 0;
- vi->z_algorithmtype[1] = 0;
- vi->z_logical_clusterbits = EROFS_SB(sb)->clusterbits;
- vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
- vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
- set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
- }
-
- inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
- return 0;
-}
-
-static int fill_inode_lazy(struct inode *inode)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct super_block *const sb = inode->i_sb;
- int err;
- erofs_off_t pos;
- struct page *page;
- void *kaddr;
- struct z_erofs_map_header *h;
-
- if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
- return 0;
-
- if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE))
- return -ERESTARTSYS;
-
- err = 0;
- if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
- goto out_unlock;
-
- DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
-
- pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
- vi->xattr_isize, 8);
- page = erofs_get_meta_page(sb, erofs_blknr(pos), false);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto out_unlock;
- }
-
- kaddr = kmap_atomic(page);
-
- h = kaddr + erofs_blkoff(pos);
- vi->z_advise = le16_to_cpu(h->h_advise);
- vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
- vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
-
- if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
- errln("unknown compression format %u for nid %llu, please upgrade kernel",
- vi->z_algorithmtype[0], vi->nid);
- err = -ENOTSUPP;
- goto unmap_done;
- }
-
- vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
- vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits +
- ((h->h_clusterbits >> 3) & 3);
-
- if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) {
- errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
- vi->z_physical_clusterbits[0], vi->nid);
- err = -ENOTSUPP;
- goto unmap_done;
- }
-
- vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
- ((h->h_clusterbits >> 5) & 7);
-unmap_done:
- kunmap_atomic(kaddr);
- unlock_page(page);
- put_page(page);
-
- set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
-out_unlock:
- clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags);
- return err;
-}
-
-struct z_erofs_maprecorder {
- struct inode *inode;
- struct erofs_map_blocks *map;
- void *kaddr;
-
- unsigned long lcn;
- /* compression extent information gathered */
- u8 type;
- u16 clusterofs;
- u16 delta[2];
- erofs_blk_t pblk;
-};
-
-static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
- erofs_blk_t eblk)
-{
- struct super_block *const sb = m->inode->i_sb;
- struct erofs_map_blocks *const map = m->map;
- struct page *mpage = map->mpage;
-
- if (mpage) {
- if (mpage->index == eblk) {
- if (!m->kaddr)
- m->kaddr = kmap_atomic(mpage);
- return 0;
- }
-
- if (m->kaddr) {
- kunmap_atomic(m->kaddr);
- m->kaddr = NULL;
- }
- put_page(mpage);
- }
-
- mpage = erofs_get_meta_page(sb, eblk, false);
- if (IS_ERR(mpage)) {
- map->mpage = NULL;
- return PTR_ERR(mpage);
- }
- m->kaddr = kmap_atomic(mpage);
- unlock_page(mpage);
- map->mpage = mpage;
- return 0;
-}
-
-static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
-{
- struct inode *const inode = m->inode;
- struct erofs_vnode *const vi = EROFS_V(inode);
- const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
- const erofs_off_t pos =
- Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
- vi->xattr_isize) +
- lcn * sizeof(struct z_erofs_vle_decompressed_index);
- struct z_erofs_vle_decompressed_index *di;
- unsigned int advise, type;
- int err;
-
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
- if (err)
- return err;
-
- m->lcn = lcn;
- di = m->kaddr + erofs_blkoff(pos);
-
- advise = le16_to_cpu(di->di_advise);
- type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
- ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
- switch (type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- m->clusterofs = 1 << vi->z_logical_clusterbits;
- m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
- m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
- break;
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- m->clusterofs = le16_to_cpu(di->di_clusterofs);
- m->pblk = le32_to_cpu(di->di_u.blkaddr);
- break;
- default:
- DBG_BUGON(1);
- return -EIO;
- }
- m->type = type;
- return 0;
-}
-
-static unsigned int decode_compactedbits(unsigned int lobits,
- unsigned int lomask,
- u8 *in, unsigned int pos, u8 *type)
-{
- const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
- const unsigned int lo = v & lomask;
-
- *type = (v >> lobits) & 3;
- return lo;
-}
-
-static int unpack_compacted_index(struct z_erofs_maprecorder *m,
- unsigned int amortizedshift,
- unsigned int eofs)
-{
- struct erofs_vnode *const vi = EROFS_V(m->inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const unsigned int lomask = (1 << lclusterbits) - 1;
- unsigned int vcnt, base, lo, encodebits, nblk;
- int i;
- u8 *in, type;
-
- if (1 << amortizedshift == 4)
- vcnt = 2;
- else if (1 << amortizedshift == 2 && lclusterbits == 12)
- vcnt = 16;
- else
- return -ENOTSUPP;
-
- encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
- base = round_down(eofs, vcnt << amortizedshift);
- in = m->kaddr + base;
-
- i = (eofs - base) >> amortizedshift;
-
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- m->type = type;
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
- m->clusterofs = 1 << lclusterbits;
- if (i + 1 != vcnt) {
- m->delta[0] = lo;
- return 0;
- }
- /*
- * since the last lcluster in the pack is special,
- * of which lo saves delta[1] rather than delta[0].
- * Hence, get delta[0] by the previous lcluster indirectly.
- */
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * (i - 1), &type);
- if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
- lo = 0;
- m->delta[0] = lo + 1;
- return 0;
- }
- m->clusterofs = lo;
- m->delta[0] = 0;
- /* figout out blkaddr (pblk) for HEAD lclusters */
- nblk = 1;
- while (i > 0) {
- --i;
- lo = decode_compactedbits(lclusterbits, lomask,
- in, encodebits * i, &type);
- if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
- i -= lo;
-
- if (i >= 0)
- ++nblk;
- }
- in += (vcnt << amortizedshift) - sizeof(__le32);
- m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
- return 0;
-}
-
-static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
-{
- struct inode *const inode = m->inode;
- struct erofs_vnode *const vi = EROFS_V(inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
- vi->inode_isize + vi->xattr_isize, 8) +
- sizeof(struct z_erofs_map_header);
- const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
- unsigned int compacted_4b_initial, compacted_2b;
- unsigned int amortizedshift;
- erofs_off_t pos;
- int err;
-
- if (lclusterbits != 12)
- return -ENOTSUPP;
-
- if (lcn >= totalidx)
- return -EINVAL;
-
- m->lcn = lcn;
- /* used to align to 32-byte (compacted_2b) alignment */
- compacted_4b_initial = (32 - ebase % 32) / 4;
- if (compacted_4b_initial == 32 / 4)
- compacted_4b_initial = 0;
-
- if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
- compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
- else
- compacted_2b = 0;
-
- pos = ebase;
- if (lcn < compacted_4b_initial) {
- amortizedshift = 2;
- goto out;
- }
- pos += compacted_4b_initial * 4;
- lcn -= compacted_4b_initial;
-
- if (lcn < compacted_2b) {
- amortizedshift = 1;
- goto out;
- }
- pos += compacted_2b * 2;
- lcn -= compacted_2b;
- amortizedshift = 2;
-out:
- pos += lcn * (1 << amortizedshift);
- err = z_erofs_reload_indexes(m, erofs_blknr(pos));
- if (err)
- return err;
- return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
-}
-
-static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn)
-{
- const unsigned int datamode = EROFS_V(m->inode)->datamode;
-
- if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
- return vle_legacy_load_cluster_from_disk(m, lcn);
-
- if (datamode == EROFS_INODE_FLAT_COMPRESSION)
- return compacted_load_cluster_from_disk(m, lcn);
-
- return -EINVAL;
-}
-
-static int vle_extent_lookback(struct z_erofs_maprecorder *m,
- unsigned int lookback_distance)
-{
- struct erofs_vnode *const vi = EROFS_V(m->inode);
- struct erofs_map_blocks *const map = m->map;
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
- unsigned long lcn = m->lcn;
- int err;
-
- if (lcn < lookback_distance) {
- DBG_BUGON(1);
- return -EIO;
- }
-
- /* load extent head logical cluster if needed */
- lcn -= lookback_distance;
- err = vle_load_cluster_from_disk(m, lcn);
- if (err)
- return err;
-
- switch (m->type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- return vle_extent_lookback(m, m->delta[0]);
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- map->m_la = (lcn << lclusterbits) | m->clusterofs;
- break;
- default:
- errln("unknown type %u at lcn %lu of nid %llu",
- m->type, lcn, vi->nid);
- DBG_BUGON(1);
- return -EIO;
- }
- return 0;
-}
-
-int z_erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- int flags)
-{
- struct erofs_vnode *const vi = EROFS_V(inode);
- struct z_erofs_maprecorder m = {
- .inode = inode,
- .map = map,
- };
- int err = 0;
- unsigned int lclusterbits, endoff;
- unsigned long long ofs, end;
-
- trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
-
- /* when trying to read beyond EOF, leave it unmapped */
- if (unlikely(map->m_la >= inode->i_size)) {
- map->m_llen = map->m_la + 1 - inode->i_size;
- map->m_la = inode->i_size;
- map->m_flags = 0;
- goto out;
- }
-
- err = fill_inode_lazy(inode);
- if (err)
- goto out;
-
- lclusterbits = vi->z_logical_clusterbits;
- ofs = map->m_la;
- m.lcn = ofs >> lclusterbits;
- endoff = ofs & ((1 << lclusterbits) - 1);
-
- err = vle_load_cluster_from_disk(&m, m.lcn);
- if (err)
- goto unmap_out;
-
- map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */
- end = (m.lcn + 1ULL) << lclusterbits;
-
- switch (m.type) {
- case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
- if (endoff >= m.clusterofs)
- map->m_flags &= ~EROFS_MAP_ZIPPED;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
- if (endoff >= m.clusterofs) {
- map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
- break;
- }
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (unlikely(!m.lcn)) {
- errln("invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EIO;
- goto unmap_out;
- }
- end = (m.lcn << lclusterbits) | m.clusterofs;
- map->m_flags |= EROFS_MAP_FULL_MAPPED;
- m.delta[0] = 1;
- /* fallthrough */
- case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
- /* get the correspoinding first chunk */
- err = vle_extent_lookback(&m, m.delta[0]);
- if (unlikely(err))
- goto unmap_out;
- break;
- default:
- errln("unknown type %u at offset %llu of nid %llu",
- m.type, ofs, vi->nid);
- err = -EIO;
- goto unmap_out;
- }
-
- map->m_llen = end - map->m_la;
- map->m_plen = 1 << lclusterbits;
- map->m_pa = blknr_to_addr(m.pblk);
- map->m_flags |= EROFS_MAP_MAPPED;
-
-unmap_out:
- if (m.kaddr)
- kunmap_atomic(m.kaddr);
-
-out:
- debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
- __func__, map->m_la, map->m_pa,
- map->m_llen, map->m_plen, map->m_flags);
-
- trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
-
- /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
- DBG_BUGON(err < 0 && err != -ENOMEM);
- return err;
-}
-
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
new file mode 100644
index 000000000000..290dbfc7ace1
--- /dev/null
+++ b/drivers/staging/exfat/Kconfig
@@ -0,0 +1,49 @@
+config EXFAT_FS
+ tristate "exFAT fs support"
+ depends on BLOCK
+ select NLS
+ help
+ This adds support for the exFAT file system.
+
+config EXFAT_DONT_MOUNT_VFAT
+ bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+ depends on EXFAT_FS
+ default y
+ help
+ By default, the exFAT driver will only mount exFAT filesystems, and refuse
+ to mount fat/vfat filesystems. Set this to 'n' to allow the exFAT driver
+ to mount these filesystems.
+
+config EXFAT_DISCARD
+ bool "enable discard support"
+ depends on EXFAT_FS
+ default y
+
+config EXFAT_DELAYED_SYNC
+ bool "enable delayed sync"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_KERNEL_DEBUG
+ bool "enable kernel debug features via ioctl"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_DEBUG_MSG
+ bool "print debug messages"
+ depends on EXFAT_FS
+ default n
+
+config EXFAT_DEFAULT_CODEPAGE
+ int "Default codepage for exFAT"
+ default 437
+ depends on EXFAT_FS
+ help
+ This option should be set to the codepage of your exFAT filesystems.
+
+config EXFAT_DEFAULT_IOCHARSET
+ string "Default iocharset for exFAT"
+ default "utf8"
+ depends on EXFAT_FS
+ help
+ Set this to the default input/output character set you'd like exFAT to use.
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
new file mode 100644
index 000000000000..84944dfbae28
--- /dev/null
+++ b/drivers/staging/exfat/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_EXFAT_FS) += exfat.o
+
+exfat-y := exfat_core.o \
+ exfat_super.o \
+ exfat_blkdev.o \
+ exfat_cache.o \
+ exfat_nls.o \
+ exfat_upcase.o
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
new file mode 100644
index 000000000000..a3eb282f9efc
--- /dev/null
+++ b/drivers/staging/exfat/TODO
@@ -0,0 +1,12 @@
+exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
+same for ffsWriteFile.
+
+exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell.
+There's only one place that calls it with a non-zero argument.
+
+ffsTruncateFile - if (old_size <= new_size) {
+That doesn't look right. How did it ever work? Are they relying on lazy
+block allocation when actual writes happen? If nothing else, it never
+does the 'fid->size = new_size' and do the inode update....
+
+ffsSetAttr() is just dangling in the breeze, not wired up at all...
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
new file mode 100644
index 000000000000..6c12f2d79f4d
--- /dev/null
+++ b/drivers/staging/exfat/exfat.h
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef _EXFAT_H
+#define _EXFAT_H
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ /* For Debugging Purpose */
+ /* IOCTL code 'f' used by
+ * - file systems typically #0~0x1F
+ * - embedded terminal devices #128~
+ * - exts for debugging purpose #99
+ * number 100 and 101 is available now but has possible conflicts
+ */
+#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long)
+#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long)
+
+#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01
+#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+#ifdef CONFIG_EXFAT_DEBUG_MSG
+#define DEBUG 1
+#else
+#undef DEBUG
+#endif
+
+#define DENTRY_SIZE 32 /* dir entry size */
+#define DENTRY_SIZE_BITS 5
+
+/* PBR entries */
+#define PBR_SIGNATURE 0xAA55
+#define EXT_SIGNATURE 0xAA550000
+#define VOL_LABEL "NO NAME " /* size should be 11 */
+#define OEM_NAME "MSWIN4.1" /* size should be 8 */
+#define STR_FAT12 "FAT12 " /* size should be 8 */
+#define STR_FAT16 "FAT16 " /* size should be 8 */
+#define STR_FAT32 "FAT32 " /* size should be 8 */
+#define STR_EXFAT "EXFAT " /* size should be 8 */
+#define VOL_CLEAN 0x0000
+#define VOL_DIRTY 0x0002
+
+/* max number of clusters */
+#define FAT12_THRESHOLD 4087 /* 2^12 - 1 + 2 (clu 0 & 1) */
+#define FAT16_THRESHOLD 65527 /* 2^16 - 1 + 2 */
+#define FAT32_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
+#define EXFAT_THRESHOLD 268435457 /* 2^28 - 1 + 2 */
+
+/* file types */
+#define TYPE_UNUSED 0x0000
+#define TYPE_DELETED 0x0001
+#define TYPE_INVALID 0x0002
+#define TYPE_CRITICAL_PRI 0x0100
+#define TYPE_BITMAP 0x0101
+#define TYPE_UPCASE 0x0102
+#define TYPE_VOLUME 0x0103
+#define TYPE_DIR 0x0104
+#define TYPE_FILE 0x011F
+#define TYPE_SYMLINK 0x015F
+#define TYPE_CRITICAL_SEC 0x0200
+#define TYPE_STREAM 0x0201
+#define TYPE_EXTEND 0x0202
+#define TYPE_ACL 0x0203
+#define TYPE_BENIGN_PRI 0x0400
+#define TYPE_GUID 0x0401
+#define TYPE_PADDING 0x0402
+#define TYPE_ACLTAB 0x0403
+#define TYPE_BENIGN_SEC 0x0800
+#define TYPE_ALL 0x0FFF
+
+/* time modes */
+#define TM_CREATE 0
+#define TM_MODIFY 1
+#define TM_ACCESS 2
+
+/* checksum types */
+#define CS_DIR_ENTRY 0
+#define CS_PBR_SECTOR 1
+#define CS_DEFAULT 2
+
+#define CLUSTER_16(x) ((u16)(x))
+#define CLUSTER_32(x) ((u32)(x))
+
+#define START_SECTOR(x) \
+ ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \
+ p_fs->data_start_sector)
+
+#define IS_LAST_SECTOR_IN_CLUSTER(sec) \
+ ((((sec) - p_fs->data_start_sector + 1) & \
+ ((1 << p_fs->sectors_per_clu_bits) - 1)) == 0)
+
+#define GET_CLUSTER_FROM_SECTOR(sec) \
+ ((u32)((((sec) - p_fs->data_start_sector) >> \
+ p_fs->sectors_per_clu_bits) + 2))
+
+#define GET16(p_src) \
+ (((u16)(p_src)[0]) | (((u16)(p_src)[1]) << 8))
+#define GET32(p_src) \
+ (((u32)(p_src)[0]) | (((u32)(p_src)[1]) << 8) | \
+ (((u32)(p_src)[2]) << 16) | (((u32)(p_src)[3]) << 24))
+#define GET64(p_src) \
+ (((u64)(p_src)[0]) | (((u64)(p_src)[1]) << 8) | \
+ (((u64)(p_src)[2]) << 16) | (((u64)(p_src)[3]) << 24) | \
+ (((u64)(p_src)[4]) << 32) | (((u64)(p_src)[5]) << 40) | \
+ (((u64)(p_src)[6]) << 48) | (((u64)(p_src)[7]) << 56))
+
+#define SET16(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u16)(src)) >> 8); \
+ } while (0)
+#define SET32(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u32)(src)) >> 8); \
+ (p_dst)[2] = (u8)(((u32)(src)) >> 16); \
+ (p_dst)[3] = (u8)(((u32)(src)) >> 24); \
+ } while (0)
+#define SET64(p_dst, src) \
+ do { \
+ (p_dst)[0] = (u8)(src); \
+ (p_dst)[1] = (u8)(((u64)(src)) >> 8); \
+ (p_dst)[2] = (u8)(((u64)(src)) >> 16); \
+ (p_dst)[3] = (u8)(((u64)(src)) >> 24); \
+ (p_dst)[4] = (u8)(((u64)(src)) >> 32); \
+ (p_dst)[5] = (u8)(((u64)(src)) >> 40); \
+ (p_dst)[6] = (u8)(((u64)(src)) >> 48); \
+ (p_dst)[7] = (u8)(((u64)(src)) >> 56); \
+ } while (0)
+
+#ifdef __LITTLE_ENDIAN
+#define GET16_A(p_src) (*((u16 *)(p_src)))
+#define GET32_A(p_src) (*((u32 *)(p_src)))
+#define GET64_A(p_src) (*((u64 *)(p_src)))
+#define SET16_A(p_dst, src) (*((u16 *)(p_dst)) = (u16)(src))
+#define SET32_A(p_dst, src) (*((u32 *)(p_dst)) = (u32)(src))
+#define SET64_A(p_dst, src) (*((u64 *)(p_dst)) = (u64)(src))
+#else /* BIG_ENDIAN */
+#define GET16_A(p_src) GET16(p_src)
+#define GET32_A(p_src) GET32(p_src)
+#define GET64_A(p_src) GET64(p_src)
+#define SET16_A(p_dst, src) SET16(p_dst, src)
+#define SET32_A(p_dst, src) SET32(p_dst, src)
+#define SET64_A(p_dst, src) SET64(p_dst, src)
+#endif
+
+/* cache size (in number of sectors) */
+/* (should be an exponential value of 2) */
+#define FAT_CACHE_SIZE 128
+#define FAT_CACHE_HASH_SIZE 64
+#define BUF_CACHE_SIZE 256
+#define BUF_CACHE_HASH_SIZE 64
+
+/* Upcase table macro */
+#define HIGH_INDEX_BIT (8)
+#define HIGH_INDEX_MASK (0xFF00)
+#define LOW_INDEX_BIT (16 - HIGH_INDEX_BIT)
+#define UTBL_ROW_COUNT BIT(LOW_INDEX_BIT)
+#define UTBL_COL_COUNT BIT(HIGH_INDEX_BIT)
+
+static inline u16 get_col_index(u16 i)
+{
+ return i >> LOW_INDEX_BIT;
+}
+
+static inline u16 get_row_index(u16 i)
+{
+ return i & ~HIGH_INDEX_MASK;
+}
+
+#define EXFAT_SUPER_MAGIC (0x2011BAB0L)
+#define EXFAT_ROOT_INO 1
+
+/* FAT types */
+#define FAT12 0x01 /* FAT12 */
+#define FAT16 0x0E /* Win95 FAT16 (LBA) */
+#define FAT32 0x0C /* Win95 FAT32 (LBA) */
+#define EXFAT 0x07 /* exFAT */
+
+/* file name lengths */
+#define MAX_CHARSET_SIZE 3 /* max size of multi-byte character */
+#define MAX_PATH_DEPTH 15 /* max depth of path name */
+#define MAX_NAME_LENGTH 256 /* max len of filename including NULL */
+#define MAX_PATH_LENGTH 260 /* max len of pathname including NULL */
+#define DOS_NAME_LENGTH 11 /* DOS filename length excluding NULL */
+#define DOS_PATH_LENGTH 80 /* DOS pathname length excluding NULL */
+
+/* file attributes */
+#define ATTR_NORMAL 0x0000
+#define ATTR_READONLY 0x0001
+#define ATTR_HIDDEN 0x0002
+#define ATTR_SYSTEM 0x0004
+#define ATTR_VOLUME 0x0008
+#define ATTR_SUBDIR 0x0010
+#define ATTR_ARCHIVE 0x0020
+#define ATTR_SYMLINK 0x0040
+#define ATTR_EXTEND 0x000F
+#define ATTR_RWMASK 0x007E
+
+/* file creation modes */
+#define FM_REGULAR 0x00
+#define FM_SYMLINK 0x40
+
+/* return values */
+#define FFS_SUCCESS 0
+#define FFS_MEDIAERR 1
+#define FFS_FORMATERR 2
+#define FFS_MOUNTED 3
+#define FFS_NOTMOUNTED 4
+#define FFS_ALIGNMENTERR 5
+#define FFS_SEMAPHOREERR 6
+#define FFS_INVALIDPATH 7
+#define FFS_INVALIDFID 8
+#define FFS_NOTFOUND 9
+#define FFS_FILEEXIST 10
+#define FFS_PERMISSIONERR 11
+#define FFS_NOTOPENED 12
+#define FFS_MAXOPENED 13
+#define FFS_FULL 14
+#define FFS_EOF 15
+#define FFS_DIRBUSY 16
+#define FFS_MEMORYERR 17
+#define FFS_NAMETOOLONG 18
+#define FFS_ERROR 19
+
+#define NUM_UPCASE 2918
+
+#define DOS_CUR_DIR_NAME ". "
+#define DOS_PAR_DIR_NAME ".. "
+
+#ifdef __LITTLE_ENDIAN
+#define UNI_CUR_DIR_NAME ".\0"
+#define UNI_PAR_DIR_NAME ".\0.\0"
+#else
+#define UNI_CUR_DIR_NAME "\0."
+#define UNI_PAR_DIR_NAME "\0.\0."
+#endif
+
+struct date_time_t {
+ u16 Year;
+ u16 Month;
+ u16 Day;
+ u16 Hour;
+ u16 Minute;
+ u16 Second;
+ u16 MilliSecond;
+};
+
+struct part_info_t {
+ u32 Offset; /* start sector number of the partition */
+ u32 Size; /* in sectors */
+};
+
+struct dev_info_t {
+ u32 SecSize; /* sector size in bytes */
+ u32 DevSize; /* block device size in sectors */
+};
+
+struct vol_info_t {
+ u32 FatType;
+ u32 ClusterSize;
+ u32 NumClusters;
+ u32 FreeClusters;
+ u32 UsedClusters;
+};
+
+/* directory structure */
+struct chain_t {
+ u32 dir;
+ s32 size;
+ u8 flags;
+};
+
+struct file_id_t {
+ struct chain_t dir;
+ s32 entry;
+ u32 type;
+ u32 attr;
+ u32 start_clu;
+ u64 size;
+ u8 flags;
+ s64 rwoffset;
+ s32 hint_last_off;
+ u32 hint_last_clu;
+};
+
+struct dir_entry_t {
+ char Name[MAX_NAME_LENGTH * MAX_CHARSET_SIZE];
+
+ /* used only for FAT12/16/32, not used for exFAT */
+ char ShortName[DOS_NAME_LENGTH + 2];
+
+ u32 Attr;
+ u64 Size;
+ u32 NumSubdirs;
+ struct date_time_t CreateTimestamp;
+ struct date_time_t ModifyTimestamp;
+ struct date_time_t AccessTimestamp;
+};
+
+struct timestamp_t {
+ u16 sec; /* 0 ~ 59 */
+ u16 min; /* 0 ~ 59 */
+ u16 hour; /* 0 ~ 23 */
+ u16 day; /* 1 ~ 31 */
+ u16 mon; /* 1 ~ 12 */
+ u16 year; /* 0 ~ 127 (since 1980) */
+};
+
+/* MS_DOS FAT partition boot record (512 bytes) */
+struct pbr_sector_t {
+ u8 jmp_boot[3];
+ u8 oem_name[8];
+ u8 bpb[109];
+ u8 boot_code[390];
+ u8 signature[2];
+};
+
+/* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */
+struct bpb16_t {
+ u8 sector_size[2];
+ u8 sectors_per_clu;
+ u8 num_reserved[2];
+ u8 num_fats;
+ u8 num_root_entries[2];
+ u8 num_sectors[2];
+ u8 media_type;
+ u8 num_fat_sectors[2];
+ u8 sectors_in_track[2];
+ u8 num_heads[2];
+ u8 num_hid_sectors[4];
+ u8 num_huge_sectors[4];
+
+ u8 phy_drv_no;
+ u8 reserved;
+ u8 ext_signature;
+ u8 vol_serial[4];
+ u8 vol_label[11];
+ u8 vol_type[8];
+};
+
+/* MS-DOS FAT32 BIOS parameter block (79 bytes) */
+struct bpb32_t {
+ u8 sector_size[2];
+ u8 sectors_per_clu;
+ u8 num_reserved[2];
+ u8 num_fats;
+ u8 num_root_entries[2];
+ u8 num_sectors[2];
+ u8 media_type;
+ u8 num_fat_sectors[2];
+ u8 sectors_in_track[2];
+ u8 num_heads[2];
+ u8 num_hid_sectors[4];
+ u8 num_huge_sectors[4];
+ u8 num_fat32_sectors[4];
+ u8 ext_flags[2];
+ u8 fs_version[2];
+ u8 root_cluster[4];
+ u8 fsinfo_sector[2];
+ u8 backup_sector[2];
+ u8 reserved[12];
+
+ u8 phy_drv_no;
+ u8 ext_reserved;
+ u8 ext_signature;
+ u8 vol_serial[4];
+ u8 vol_label[11];
+ u8 vol_type[8];
+};
+
+/* MS-DOS EXFAT BIOS parameter block (109 bytes) */
+struct bpbex_t {
+ u8 reserved1[53];
+ u8 vol_offset[8];
+ u8 vol_length[8];
+ u8 fat_offset[4];
+ u8 fat_length[4];
+ u8 clu_offset[4];
+ u8 clu_count[4];
+ u8 root_cluster[4];
+ u8 vol_serial[4];
+ u8 fs_version[2];
+ u8 vol_flags[2];
+ u8 sector_size_bits;
+ u8 sectors_per_clu_bits;
+ u8 num_fats;
+ u8 phy_drv_no;
+ u8 perc_in_use;
+ u8 reserved2[7];
+};
+
+/* MS-DOS FAT file system information sector (512 bytes) */
+struct fsi_sector_t {
+ u8 signature1[4];
+ u8 reserved1[480];
+ u8 signature2[4];
+ u8 free_cluster[4];
+ u8 next_cluster[4];
+ u8 reserved2[14];
+ u8 signature3[2];
+};
+
+/* MS-DOS FAT directory entry (32 bytes) */
+struct dentry_t {
+ u8 dummy[32];
+};
+
+struct dos_dentry_t {
+ u8 name[DOS_NAME_LENGTH];
+ u8 attr;
+ u8 lcase;
+ u8 create_time_ms;
+ u8 create_time[2];
+ u8 create_date[2];
+ u8 access_date[2];
+ u8 start_clu_hi[2];
+ u8 modify_time[2];
+ u8 modify_date[2];
+ u8 start_clu_lo[2];
+ u8 size[4];
+};
+
+/* MS-DOS FAT extended directory entry (32 bytes) */
+struct ext_dentry_t {
+ u8 order;
+ u8 unicode_0_4[10];
+ u8 attr;
+ u8 sysid;
+ u8 checksum;
+ u8 unicode_5_10[12];
+ u8 start_clu[2];
+ u8 unicode_11_12[4];
+};
+
+/* MS-DOS EXFAT file directory entry (32 bytes) */
+struct file_dentry_t {
+ u8 type;
+ u8 num_ext;
+ u8 checksum[2];
+ u8 attr[2];
+ u8 reserved1[2];
+ u8 create_time[2];
+ u8 create_date[2];
+ u8 modify_time[2];
+ u8 modify_date[2];
+ u8 access_time[2];
+ u8 access_date[2];
+ u8 create_time_ms;
+ u8 modify_time_ms;
+ u8 access_time_ms;
+ u8 reserved2[9];
+};
+
+/* MS-DOS EXFAT stream extension directory entry (32 bytes) */
+struct strm_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 reserved1;
+ u8 name_len;
+ u8 name_hash[2];
+ u8 reserved2[2];
+ u8 valid_size[8];
+ u8 reserved3[4];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT file name directory entry (32 bytes) */
+struct name_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 unicode_0_14[30];
+};
+
+/* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */
+struct bmap_dentry_t {
+ u8 type;
+ u8 flags;
+ u8 reserved[18];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT up-case table directory entry (32 bytes) */
+struct case_dentry_t {
+ u8 type;
+ u8 reserved1[3];
+ u8 checksum[4];
+ u8 reserved2[12];
+ u8 start_clu[4];
+ u8 size[8];
+};
+
+/* MS-DOS EXFAT volume label directory entry (32 bytes) */
+struct volm_dentry_t {
+ u8 type;
+ u8 label_len;
+ u8 unicode_0_10[22];
+ u8 reserved[8];
+};
+
+/* unused entry hint information */
+struct uentry_t {
+ u32 dir;
+ s32 entry;
+ struct chain_t clu;
+};
+
+/* DOS name structure */
+struct dos_name_t {
+ u8 name[DOS_NAME_LENGTH];
+ u8 name_case;
+};
+
+/* unicode name structure */
+struct uni_name_t {
+ u16 name[MAX_NAME_LENGTH];
+ u16 name_hash;
+ u8 name_len;
+};
+
+struct buf_cache_t {
+ struct buf_cache_t *next;
+ struct buf_cache_t *prev;
+ struct buf_cache_t *hash_next;
+ struct buf_cache_t *hash_prev;
+ s32 drv;
+ sector_t sec;
+ u32 flag;
+ struct buffer_head *buf_bh;
+};
+
+struct fs_func {
+ s32 (*alloc_cluster)(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+ void (*free_cluster)(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+ s32 (*count_used_clusters)(struct super_block *sb);
+
+ s32 (*init_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size);
+ s32 (*init_ext_entry)(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+ s32 (*find_dir_entry)(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+ void (*delete_dir_entry)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ s32 offset, s32 num_entries);
+ void (*get_uni_name_from_ext_entry)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+ s32 (*count_ext_entries)(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ struct dentry_t *p_entry);
+ s32 (*calc_num_entries)(struct uni_name_t *p_uniname);
+
+ u32 (*get_entry_type)(struct dentry_t *p_entry);
+ void (*set_entry_type)(struct dentry_t *p_entry, u32 type);
+ u32 (*get_entry_attr)(struct dentry_t *p_entry);
+ void (*set_entry_attr)(struct dentry_t *p_entry, u32 attr);
+ u8 (*get_entry_flag)(struct dentry_t *p_entry);
+ void (*set_entry_flag)(struct dentry_t *p_entry, u8 flag);
+ u32 (*get_entry_clu0)(struct dentry_t *p_entry);
+ void (*set_entry_clu0)(struct dentry_t *p_entry, u32 clu0);
+ u64 (*get_entry_size)(struct dentry_t *p_entry);
+ void (*set_entry_size)(struct dentry_t *p_entry, u64 size);
+ void (*get_entry_time)(struct dentry_t *p_entry,
+ struct timestamp_t *tp, u8 mode);
+ void (*set_entry_time)(struct dentry_t *p_entry,
+ struct timestamp_t *tp, u8 mode);
+};
+
+struct fs_info_t {
+ u32 drv; /* drive ID */
+ u32 vol_type; /* volume FAT type */
+ u32 vol_id; /* volume serial number */
+
+ u64 num_sectors; /* num of sectors in volume */
+ u32 num_clusters; /* num of clusters in volume */
+ u32 cluster_size; /* cluster size in bytes */
+ u32 cluster_size_bits;
+ u32 sectors_per_clu; /* cluster size in sectors */
+ u32 sectors_per_clu_bits;
+
+ u32 PBR_sector; /* PBR sector */
+ u32 FAT1_start_sector; /* FAT1 start sector */
+ u32 FAT2_start_sector; /* FAT2 start sector */
+ u32 root_start_sector; /* root dir start sector */
+ u32 data_start_sector; /* data area start sector */
+ u32 num_FAT_sectors; /* num of FAT sectors */
+
+ u32 root_dir; /* root dir cluster */
+ u32 dentries_in_root; /* num of dentries in root dir */
+ u32 dentries_per_clu; /* num of dentries per cluster */
+
+ u32 vol_flag; /* volume dirty flag */
+ struct buffer_head *pbr_bh; /* PBR sector */
+
+ u32 map_clu; /* allocation bitmap start cluster */
+ u32 map_sectors; /* num of allocation bitmap sectors */
+ struct buffer_head **vol_amap; /* allocation bitmap */
+
+ u16 **vol_utbl; /* upcase table */
+
+ u32 clu_srch_ptr; /* cluster search pointer */
+ u32 used_clusters; /* number of used clusters */
+ struct uentry_t hint_uentry; /* unused entry hint information */
+
+ u32 dev_ejected; /* block device operation error flag */
+
+ struct fs_func *fs_func;
+ struct semaphore v_sem;
+
+ /* FAT cache */
+ struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
+ struct buf_cache_t FAT_cache_lru_list;
+ struct buf_cache_t FAT_cache_hash_list[FAT_CACHE_HASH_SIZE];
+
+ /* buf cache */
+ struct buf_cache_t buf_cache_array[BUF_CACHE_SIZE];
+ struct buf_cache_t buf_cache_lru_list;
+ struct buf_cache_t buf_cache_hash_list[BUF_CACHE_HASH_SIZE];
+};
+
+#define ES_2_ENTRIES 2
+#define ES_3_ENTRIES 3
+#define ES_ALL_ENTRIES 0
+
+struct entry_set_cache_t {
+ /* sector number that contains file_entry */
+ sector_t sector;
+
+ /* byte offset in the sector */
+ s32 offset;
+
+ /*
+ * flag in stream entry.
+ * 01 for cluster chain,
+ * 03 for contig. clusteres.
+ */
+ s32 alloc_flag;
+
+ u32 num_entries;
+
+ /* __buf should be the last member */
+ void *__buf;
+};
+
+#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */
+#define EXFAT_ERRORS_PANIC 2 /* panic on error */
+#define EXFAT_ERRORS_RO 3 /* remount r/o on error */
+
+/* ioctl command */
+#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32)
+
+struct exfat_mount_options {
+ kuid_t fs_uid;
+ kgid_t fs_gid;
+ unsigned short fs_fmask;
+ unsigned short fs_dmask;
+
+ /* permission for setting the [am]time */
+ unsigned short allow_utime;
+
+ /* codepage for shortname conversions */
+ unsigned short codepage;
+
+ /* charset for filename input/display */
+ char *iocharset;
+
+ unsigned char casesensitive;
+
+ /* on error: continue, panic, remount-ro */
+ unsigned char errors;
+#ifdef CONFIG_EXFAT_DISCARD
+ /* flag on if -o dicard specified and device support discard() */
+ unsigned char discard;
+#endif /* CONFIG_EXFAT_DISCARD */
+};
+
+#define EXFAT_HASH_BITS 8
+#define EXFAT_HASH_SIZE BIT(EXFAT_HASH_BITS)
+
+/*
+ * EXFAT file system in-core superblock data
+ */
+struct bd_info_t {
+ s32 sector_size; /* in bytes */
+ s32 sector_size_bits;
+ s32 sector_size_mask;
+
+ /* total number of sectors in this block device */
+ s32 num_sectors;
+
+ /* opened or not */
+ bool opened;
+};
+
+struct exfat_sb_info {
+ struct fs_info_t fs_info;
+ struct bd_info_t bd_info;
+
+ struct exfat_mount_options options;
+
+ int s_dirt;
+ struct mutex s_lock;
+ struct nls_table *nls_disk; /* Codepage used on disk */
+ struct nls_table *nls_io; /* Charset used for input and display */
+
+ struct inode *fat_inode;
+
+ spinlock_t inode_hash_lock;
+ struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ long debug_flags;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+};
+
+/*
+ * EXFAT file system inode data in memory
+ */
+struct exfat_inode_info {
+ struct file_id_t fid;
+ char *target;
+ /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */
+ loff_t mmu_private; /* physically allocated size */
+ loff_t i_pos; /* on-disk position of directory entry or 0 */
+ struct hlist_node i_hash_fat; /* hash by i_location */
+ struct rw_semaphore truncate_lock;
+ struct inode vfs_inode;
+ struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */
+};
+
+#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info))
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+ return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+/* NLS management function */
+u16 nls_upper(struct super_block *sb, u16 a);
+int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b);
+int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
+void nls_uniname_to_dosname(struct super_block *sb,
+ struct dos_name_t *p_dosname,
+ struct uni_name_t *p_uniname, bool *p_lossy);
+void nls_dosname_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
+ struct uni_name_t *p_uniname);
+void nls_cstring_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname, u8 *p_cstring,
+ bool *p_lossy);
+
+/* buffer cache management */
+void buf_init(struct super_block *sb);
+void buf_shutdown(struct super_block *sb);
+int FAT_read(struct super_block *sb, u32 loc, u32 *content);
+s32 FAT_write(struct super_block *sb, u32 loc, u32 content);
+u8 *FAT_getblk(struct super_block *sb, sector_t sec);
+void FAT_modify(struct super_block *sb, sector_t sec);
+void FAT_release_all(struct super_block *sb);
+void FAT_sync(struct super_block *sb);
+u8 *buf_getblk(struct super_block *sb, sector_t sec);
+void buf_modify(struct super_block *sb, sector_t sec);
+void buf_lock(struct super_block *sb, sector_t sec);
+void buf_unlock(struct super_block *sb, sector_t sec);
+void buf_release(struct super_block *sb, sector_t sec);
+void buf_release_all(struct super_block *sb);
+void buf_sync(struct super_block *sb);
+
+/* fs management functions */
+void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
+void fs_error(struct super_block *sb);
+
+/* cluster management functions */
+s32 clear_cluster(struct super_block *sb, u32 clu);
+s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain);
+void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse);
+u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain);
+s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
+s32 fat_count_used_clusters(struct super_block *sb);
+s32 exfat_count_used_clusters(struct super_block *sb);
+void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
+
+/* allocation bitmap management functions */
+s32 load_alloc_bitmap(struct super_block *sb);
+void free_alloc_bitmap(struct super_block *sb);
+s32 set_alloc_bitmap(struct super_block *sb, u32 clu);
+s32 clr_alloc_bitmap(struct super_block *sb, u32 clu);
+u32 test_alloc_bitmap(struct super_block *sb, u32 clu);
+void sync_alloc_bitmap(struct super_block *sb);
+
+/* upcase table management functions */
+s32 load_upcase_table(struct super_block *sb);
+void free_upcase_table(struct super_block *sb);
+
+/* dir entry management functions */
+u32 fat_get_entry_type(struct dentry_t *p_entry);
+u32 exfat_get_entry_type(struct dentry_t *p_entry);
+void fat_set_entry_type(struct dentry_t *p_entry, u32 type);
+void exfat_set_entry_type(struct dentry_t *p_entry, u32 type);
+u32 fat_get_entry_attr(struct dentry_t *p_entry);
+u32 exfat_get_entry_attr(struct dentry_t *p_entry);
+void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
+u8 fat_get_entry_flag(struct dentry_t *p_entry);
+u8 exfat_get_entry_flag(struct dentry_t *p_entry);
+void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
+u32 fat_get_entry_clu0(struct dentry_t *p_entry);
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
+void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
+u64 fat_get_entry_size(struct dentry_t *p_entry);
+u64 exfat_get_entry_size(struct dentry_t *p_entry);
+void fat_set_entry_size(struct dentry_t *p_entry, u64 size);
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
+struct timestamp_t *tm_current(struct timestamp_t *tm);
+void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode);
+s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ u32 type, u32 start_clu, u64 size);
+s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size);
+s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname);
+void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu);
+void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
+ u16 *uniname);
+void init_file_entry(struct file_dentry_t *ep, u32 type);
+void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu,
+ u64 size);
+void init_name_entry(struct name_dentry_t *ep, u16 *uniname);
+void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries);
+
+s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ sector_t *sector, s32 *offset);
+struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
+ s32 offset);
+struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, sector_t *sector);
+struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u32 type,
+ struct dentry_t **file_ep);
+void release_entry_set(struct entry_set_cache_t *es);
+s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es);
+s32 write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ struct dentry_t *ep, u32 count);
+s32 search_deleted_or_unused_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 num_entries);
+s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir,
+ s32 num_entries);
+s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type);
+s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry);
+s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
+ u32 type);
+void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry);
+void update_dir_checksum_with_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es);
+bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
+
+/* name conversion functions */
+s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 *entries,
+ struct dos_name_t *p_dosname);
+void get_uni_name_from_dos_entry(struct super_block *sb,
+ struct dos_dentry_t *ep,
+ struct uni_name_t *p_uniname, u8 mode);
+void fat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname);
+s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep,
+ u16 *uniname, s32 order);
+s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep,
+ u16 *uniname, s32 order);
+s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct dos_name_t *p_dosname);
+void fat_attach_count_to_dos_name(u8 *dosname, s32 count);
+s32 fat_calc_num_entries(struct uni_name_t *p_uniname);
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
+u8 calc_checksum_1byte(void *data, s32 len, u8 chksum);
+u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
+u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type);
+
+/* name resolution functions */
+s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname);
+s32 resolve_name(u8 *name, u8 **arg);
+
+/* file operation functions */
+s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
+s32 create_dir(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 create_file(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid);
+void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry);
+s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 old_entry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid);
+s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
+ struct chain_t *p_newdir, struct uni_name_t *p_uniname,
+ struct file_id_t *fid);
+
+/* sector read/write functions */
+int sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, bool read);
+int sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, bool sync);
+int multi_sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, s32 num_secs, bool read);
+int multi_sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, s32 num_secs, bool sync);
+
+void bdev_open(struct super_block *sb);
+void bdev_close(struct super_block *sb);
+int bdev_read(struct super_block *sb, sector_t secno,
+ struct buffer_head **bh, u32 num_secs, bool read);
+int bdev_write(struct super_block *sb, sector_t secno,
+ struct buffer_head *bh, u32 num_secs, bool sync);
+int bdev_sync(struct super_block *sb);
+
+extern const u8 uni_upcase[];
+#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
new file mode 100644
index 000000000000..f086c75e7076
--- /dev/null
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include "exfat.h"
+
+void bdev_open(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bd->opened)
+ return;
+
+ p_bd->sector_size = bdev_logical_block_size(sb->s_bdev);
+ p_bd->sector_size_bits = ilog2(p_bd->sector_size);
+ p_bd->sector_size_mask = p_bd->sector_size - 1;
+ p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >>
+ p_bd->sector_size_bits;
+ p_bd->opened = true;
+}
+
+void bdev_close(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ p_bd->opened = false;
+}
+
+int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
+ u32 num_secs, bool read)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ if (*bh)
+ __brelse(*bh);
+
+ if (read)
+ *bh = __bread(sb->s_bdev, secno,
+ num_secs << p_bd->sector_size_bits);
+ else
+ *bh = __getblk(sb->s_bdev, secno,
+ num_secs << p_bd->sector_size_bits);
+
+ if (*bh)
+ return 0;
+
+ WARN(!p_fs->dev_ejected,
+ "[EXFAT] No bh, device seems wrong or to be ejected.\n");
+
+ return FFS_MEDIAERR;
+}
+
+int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
+ u32 num_secs, bool sync)
+{
+ s32 count;
+ struct buffer_head *bh2;
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ if (secno == bh->b_blocknr) {
+ lock_buffer(bh);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ unlock_buffer(bh);
+ if (sync && (sync_dirty_buffer(bh) != 0))
+ return FFS_MEDIAERR;
+ } else {
+ count = num_secs << p_bd->sector_size_bits;
+
+ bh2 = __getblk(sb->s_bdev, secno, count);
+ if (!bh2)
+ goto no_bh;
+
+ lock_buffer(bh2);
+ memcpy(bh2->b_data, bh->b_data, count);
+ set_buffer_uptodate(bh2);
+ mark_buffer_dirty(bh2);
+ unlock_buffer(bh2);
+ if (sync && (sync_dirty_buffer(bh2) != 0)) {
+ __brelse(bh2);
+ goto no_bh;
+ }
+ __brelse(bh2);
+ }
+
+ return 0;
+
+no_bh:
+ WARN(!p_fs->dev_ejected,
+ "[EXFAT] No bh, device seems wrong or to be ejected.\n");
+
+ return FFS_MEDIAERR;
+}
+
+int bdev_sync(struct super_block *sb)
+{
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ long flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
+ return FFS_MEDIAERR;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ if (!p_bd->opened)
+ return FFS_MEDIAERR;
+
+ return sync_blockdev(sb->s_bdev);
+}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
new file mode 100644
index 000000000000..1565ce65d39f
--- /dev/null
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include "exfat.h"
+
+#define LOCKBIT 0x01
+#define DIRTYBIT 0x02
+
+/* Local variables */
+static DEFINE_SEMAPHORE(f_sem);
+static DEFINE_SEMAPHORE(b_sem);
+
+static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
+{
+ s32 off;
+ struct buf_cache_t *bp, *hp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ off = (sec +
+ (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->FAT_cache_hash_list[off];
+ for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
+ if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
+ WARN(!bp->buf_bh,
+ "[EXFAT] FAT_cache has no bh. It will make system panic.\n");
+
+ touch_buffer(bp->buf_bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static void push_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->next = list->next;
+ bp->prev = list;
+ list->next->prev = bp;
+ list->next = bp;
+}
+
+static void push_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev = list->prev;
+ bp->next = list;
+ list->prev->next = bp;
+ list->prev = bp;
+}
+
+static void move_to_mru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_mru(bp, list);
+}
+
+static void move_to_lru(struct buf_cache_t *bp, struct buf_cache_t *list)
+{
+ bp->prev->next = bp->next;
+ bp->next->prev = bp->prev;
+ push_to_lru(bp, list);
+}
+
+static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = p_fs->FAT_cache_lru_list.prev;
+
+ move_to_mru(bp, &p_fs->FAT_cache_lru_list);
+ return bp;
+}
+
+static void FAT_cache_insert_hash(struct super_block *sb,
+ struct buf_cache_t *bp)
+{
+ s32 off;
+ struct buf_cache_t *hp;
+ struct fs_info_t *p_fs;
+
+ p_fs = &(EXFAT_SB(sb)->fs_info);
+ off = (bp->sec +
+ (bp->sec >> p_fs->sectors_per_clu_bits)) &
+ (FAT_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->FAT_cache_hash_list[off];
+ bp->hash_next = hp->hash_next;
+ bp->hash_prev = hp;
+ hp->hash_next->hash_prev = bp;
+ hp->hash_next = bp;
+}
+
+static void FAT_cache_remove_hash(struct buf_cache_t *bp)
+{
+ (bp->hash_prev)->hash_next = bp->hash_next;
+ (bp->hash_next)->hash_prev = bp->hash_prev;
+}
+
+static void buf_cache_insert_hash(struct super_block *sb,
+ struct buf_cache_t *bp)
+{
+ s32 off;
+ struct buf_cache_t *hp;
+ struct fs_info_t *p_fs;
+
+ p_fs = &(EXFAT_SB(sb)->fs_info);
+ off = (bp->sec +
+ (bp->sec >> p_fs->sectors_per_clu_bits)) &
+ (BUF_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->buf_cache_hash_list[off];
+ bp->hash_next = hp->hash_next;
+ bp->hash_prev = hp;
+ hp->hash_next->hash_prev = bp;
+ hp->hash_next = bp;
+}
+
+static void buf_cache_remove_hash(struct buf_cache_t *bp)
+{
+ (bp->hash_prev)->hash_next = bp->hash_next;
+ (bp->hash_next)->hash_prev = bp->hash_prev;
+}
+
+void buf_init(struct super_block *sb)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ int i;
+
+ /* LRU list */
+ p_fs->FAT_cache_lru_list.next = &p_fs->FAT_cache_lru_list;
+ p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list;
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++) {
+ p_fs->FAT_cache_array[i].drv = -1;
+ p_fs->FAT_cache_array[i].sec = ~0;
+ p_fs->FAT_cache_array[i].flag = 0;
+ p_fs->FAT_cache_array[i].buf_bh = NULL;
+ p_fs->FAT_cache_array[i].prev = NULL;
+ p_fs->FAT_cache_array[i].next = NULL;
+ push_to_mru(&p_fs->FAT_cache_array[i],
+ &p_fs->FAT_cache_lru_list);
+ }
+
+ p_fs->buf_cache_lru_list.next = &p_fs->buf_cache_lru_list;
+ p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list;
+
+ for (i = 0; i < BUF_CACHE_SIZE; i++) {
+ p_fs->buf_cache_array[i].drv = -1;
+ p_fs->buf_cache_array[i].sec = ~0;
+ p_fs->buf_cache_array[i].flag = 0;
+ p_fs->buf_cache_array[i].buf_bh = NULL;
+ p_fs->buf_cache_array[i].prev = NULL;
+ p_fs->buf_cache_array[i].next = NULL;
+ push_to_mru(&p_fs->buf_cache_array[i],
+ &p_fs->buf_cache_lru_list);
+ }
+
+ /* HASH list */
+ for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
+ p_fs->FAT_cache_hash_list[i].drv = -1;
+ p_fs->FAT_cache_hash_list[i].sec = ~0;
+ p_fs->FAT_cache_hash_list[i].hash_next =
+ &p_fs->FAT_cache_hash_list[i];
+ p_fs->FAT_cache_hash_list[i].hash_prev =
+ &p_fs->FAT_cache_hash_list[i];
+ }
+
+ for (i = 0; i < FAT_CACHE_SIZE; i++)
+ FAT_cache_insert_hash(sb, &p_fs->FAT_cache_array[i]);
+
+ for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
+ p_fs->buf_cache_hash_list[i].drv = -1;
+ p_fs->buf_cache_hash_list[i].sec = ~0;
+ p_fs->buf_cache_hash_list[i].hash_next =
+ &p_fs->buf_cache_hash_list[i];
+ p_fs->buf_cache_hash_list[i].hash_prev =
+ &p_fs->buf_cache_hash_list[i];
+ }
+
+ for (i = 0; i < BUF_CACHE_SIZE; i++)
+ buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
+}
+
+void buf_shutdown(struct super_block *sb)
+{
+}
+
+static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
+{
+ s32 off;
+ u32 _content;
+ sector_t sec;
+ u8 *fat_sector, *fat_entry;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_fs->vol_type == FAT12) {
+ sec = p_fs->FAT1_start_sector +
+ ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
+ off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ _content = (u32)fat_sector[off];
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+
+ _content |= (u32)fat_sector[0] << 8;
+ } else {
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+ _content = GET16(fat_entry);
+ }
+
+ if (loc & 1)
+ _content >>= 4;
+
+ _content &= 0x00000FFF;
+
+ if (_content >= CLUSTER_16(0x0FF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == FAT16) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 1));
+ off = (loc << 1) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ _content = GET16_A(fat_entry);
+
+ _content &= 0x0000FFFF;
+
+ if (_content >= CLUSTER_16(0xFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == FAT32) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ _content = GET32_A(fat_entry);
+
+ _content &= 0x0FFFFFFF;
+
+ if (_content >= CLUSTER_32(0x0FFFFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ } else if (p_fs->vol_type == EXFAT) {
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+ _content = GET32_A(fat_entry);
+
+ if (_content >= CLUSTER_32(0xFFFFFFF8)) {
+ *content = CLUSTER_32(~0);
+ return 0;
+ }
+ *content = CLUSTER_32(_content);
+ return 0;
+ }
+
+ /* Unknown volume type, throw in the towel and go home */
+ *content = CLUSTER_32(~0);
+ return 0;
+}
+
+/* in : sb, loc
+ * out: content
+ * returns 0 on success
+ * -1 on error
+ */
+int FAT_read(struct super_block *sb, u32 loc, u32 *content)
+{
+ s32 ret;
+
+ down(&f_sem);
+ ret = __FAT_read(sb, loc, content);
+ up(&f_sem);
+
+ return ret;
+}
+
+static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
+{
+ s32 off;
+ sector_t sec;
+ u8 *fat_sector, *fat_entry;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_fs->vol_type == FAT12) {
+ content &= 0x00000FFF;
+
+ sec = p_fs->FAT1_start_sector +
+ ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
+ off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ if (loc & 1) { /* odd */
+ content <<= 4;
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector[off] = (u8)(content |
+ (fat_sector[off] &
+ 0x0F));
+ FAT_modify(sb, sec);
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_sector[0] = (u8)(content >> 8);
+ } else {
+ fat_entry = &fat_sector[off];
+ content |= GET16(fat_entry) & 0x000F;
+
+ SET16(fat_entry, content);
+ }
+ } else { /* even */
+ fat_sector[off] = (u8)(content);
+
+ if (off == (p_bd->sector_size - 1)) {
+ fat_sector[off] = (u8)(content);
+ FAT_modify(sb, sec);
+
+ fat_sector = FAT_getblk(sb, ++sec);
+ if (!fat_sector)
+ return -1;
+ fat_sector[0] = (u8)((fat_sector[0] & 0xF0) |
+ (content >> 8));
+ } else {
+ fat_entry = &fat_sector[off];
+ content |= GET16(fat_entry) & 0xF000;
+
+ SET16(fat_entry, content);
+ }
+ }
+ }
+
+ else if (p_fs->vol_type == FAT16) {
+ content &= 0x0000FFFF;
+
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 1));
+ off = (loc << 1) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ SET16_A(fat_entry, content);
+ } else if (p_fs->vol_type == FAT32) {
+ content &= 0x0FFFFFFF;
+
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ content |= GET32_A(fat_entry) & 0xF0000000;
+
+ SET32_A(fat_entry, content);
+ } else { /* p_fs->vol_type == EXFAT */
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
+
+ fat_sector = FAT_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
+
+ fat_entry = &fat_sector[off];
+
+ SET32_A(fat_entry, content);
+ }
+
+ FAT_modify(sb, sec);
+ return 0;
+}
+
+int FAT_write(struct super_block *sb, u32 loc, u32 content)
+{
+ s32 ret;
+
+ down(&f_sem);
+ ret = __FAT_write(sb, loc, content);
+ up(&f_sem);
+
+ return ret;
+}
+
+u8 *FAT_getblk(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = FAT_cache_find(sb, sec);
+ if (bp) {
+ move_to_mru(bp, &p_fs->FAT_cache_lru_list);
+ return bp->buf_bh->b_data;
+ }
+
+ bp = FAT_cache_get(sb, sec);
+
+ FAT_cache_remove_hash(bp);
+
+ bp->drv = p_fs->drv;
+ bp->sec = sec;
+ bp->flag = 0;
+
+ FAT_cache_insert_hash(sb, bp);
+
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ FAT_cache_remove_hash(bp);
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+ bp->buf_bh = NULL;
+
+ move_to_lru(bp, &p_fs->FAT_cache_lru_list);
+ return NULL;
+ }
+
+ return bp->buf_bh->b_data;
+}
+
+void FAT_modify(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ bp = FAT_cache_find(sb, sec);
+ if (bp)
+ sector_write(sb, sec, bp->buf_bh, 0);
+}
+
+void FAT_release_all(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&f_sem);
+
+ bp = p_fs->FAT_cache_lru_list.next;
+ while (bp != &p_fs->FAT_cache_lru_list) {
+ if (bp->drv == p_fs->drv) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+ }
+ bp = bp->next;
+ }
+
+ up(&f_sem);
+}
+
+void FAT_sync(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&f_sem);
+
+ bp = p_fs->FAT_cache_lru_list.next;
+ while (bp != &p_fs->FAT_cache_lru_list) {
+ if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
+ sync_dirty_buffer(bp->buf_bh);
+ bp->flag &= ~(DIRTYBIT);
+ }
+ bp = bp->next;
+ }
+
+ up(&f_sem);
+}
+
+static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
+{
+ s32 off;
+ struct buf_cache_t *bp, *hp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ off = (sec + (sec >> p_fs->sectors_per_clu_bits)) &
+ (BUF_CACHE_HASH_SIZE - 1);
+
+ hp = &p_fs->buf_cache_hash_list[off];
+ for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) {
+ if ((bp->drv == p_fs->drv) && (bp->sec == sec)) {
+ touch_buffer(bp->buf_bh);
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = p_fs->buf_cache_lru_list.prev;
+ while (bp->flag & LOCKBIT)
+ bp = bp->prev;
+
+ move_to_mru(bp, &p_fs->buf_cache_lru_list);
+ return bp;
+}
+
+static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bp = buf_cache_find(sb, sec);
+ if (bp) {
+ move_to_mru(bp, &p_fs->buf_cache_lru_list);
+ return bp->buf_bh->b_data;
+ }
+
+ bp = buf_cache_get(sb, sec);
+
+ buf_cache_remove_hash(bp);
+
+ bp->drv = p_fs->drv;
+ bp->sec = sec;
+ bp->flag = 0;
+
+ buf_cache_insert_hash(sb, bp);
+
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ buf_cache_remove_hash(bp);
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+ bp->buf_bh = NULL;
+
+ move_to_lru(bp, &p_fs->buf_cache_lru_list);
+ return NULL;
+ }
+
+ return bp->buf_bh->b_data;
+}
+
+u8 *buf_getblk(struct super_block *sb, sector_t sec)
+{
+ u8 *buf;
+
+ down(&b_sem);
+ buf = __buf_getblk(sb, sec);
+ up(&b_sem);
+
+ return buf;
+}
+
+void buf_modify(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ sector_write(sb, sec, bp->buf_bh, 0);
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_lock(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ bp->flag |= LOCKBIT;
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_unlock(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp))
+ bp->flag &= ~(LOCKBIT);
+
+ WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
+ (unsigned long long)sec);
+
+ up(&b_sem);
+}
+
+void buf_release(struct super_block *sb, sector_t sec)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = buf_cache_find(sb, sec);
+ if (likely(bp)) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+
+ move_to_lru(bp, &p_fs->buf_cache_lru_list);
+ }
+
+ up(&b_sem);
+}
+
+void buf_release_all(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = p_fs->buf_cache_lru_list.next;
+ while (bp != &p_fs->buf_cache_lru_list) {
+ if (bp->drv == p_fs->drv) {
+ bp->drv = -1;
+ bp->sec = ~0;
+ bp->flag = 0;
+
+ if (bp->buf_bh) {
+ __brelse(bp->buf_bh);
+ bp->buf_bh = NULL;
+ }
+ }
+ bp = bp->next;
+ }
+
+ up(&b_sem);
+}
+
+void buf_sync(struct super_block *sb)
+{
+ struct buf_cache_t *bp;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ down(&b_sem);
+
+ bp = p_fs->buf_cache_lru_list.next;
+ while (bp != &p_fs->buf_cache_lru_list) {
+ if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) {
+ sync_dirty_buffer(bp->buf_bh);
+ bp->flag &= ~(DIRTYBIT);
+ }
+ bp = bp->next;
+ }
+
+ up(&b_sem);
+}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
new file mode 100644
index 000000000000..b3e9cf725cf5
--- /dev/null
+++ b/drivers/staging/exfat/exfat_core.c
@@ -0,0 +1,3701 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include "exfat.h"
+
+static void __set_sb_dirty(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ sbi->s_dirt = 1;
+}
+
+static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
+
+static char *reserved_names[] = {
+ "AUX ", "CON ", "NUL ", "PRN ",
+ "COM1 ", "COM2 ", "COM3 ", "COM4 ",
+ "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
+ "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
+ "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
+ NULL
+};
+
+static u8 free_bit[] = {
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */
+ 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */
+ 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */
+ 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */
+ 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */
+ 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */
+ 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */
+ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */
+};
+
+static u8 used_bit[] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */
+ 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */
+ 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */
+ 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */
+ 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */
+ 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */
+ 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */
+ 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */
+ 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */
+};
+
+#define BITMAP_LOC(v) ((v) >> 3)
+#define BITMAP_SHIFT(v) ((v) & 0x07)
+
+static inline s32 exfat_bitmap_test(u8 *bitmap, int i)
+{
+ u8 data;
+
+ data = bitmap[BITMAP_LOC(i)];
+ if ((data >> BITMAP_SHIFT(i)) & 0x01)
+ return 1;
+ return 0;
+}
+
+static inline void exfat_bitmap_set(u8 *bitmap, int i)
+{
+ bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i));
+}
+
+static inline void exfat_bitmap_clear(u8 *bitmap, int i)
+{
+ bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i));
+}
+
+/*
+ * File System Management Functions
+ */
+
+void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
+{
+ struct pbr_sector_t *p_pbr;
+ struct bpbex_t *p_bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_fs->vol_flag == new_flag)
+ return;
+
+ p_fs->vol_flag = new_flag;
+
+ if (p_fs->vol_type == EXFAT) {
+ if (!p_fs->pbr_bh) {
+ if (sector_read(sb, p_fs->PBR_sector,
+ &p_fs->pbr_bh, 1) != FFS_SUCCESS)
+ return;
+ }
+
+ p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
+ p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ SET16(p_bpb->vol_flags, (u16)new_flag);
+
+ /* XXX duyoung
+ * what can we do here? (cuz fs_set_vol_flags() is void)
+ */
+ if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
+ else
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
+ }
+}
+
+void fs_error(struct super_block *sb)
+{
+ struct exfat_mount_options *opts = &EXFAT_SB(sb)->options;
+
+ if (opts->errors == EXFAT_ERRORS_PANIC) {
+ panic("[EXFAT] Filesystem panic from previous error\n");
+ } else if ((opts->errors == EXFAT_ERRORS_RO) && !sb_rdonly(sb)) {
+ sb->s_flags |= SB_RDONLY;
+ pr_err("[EXFAT] Filesystem has been set read-only\n");
+ }
+}
+
+/*
+ * Cluster Management Functions
+ */
+
+s32 clear_cluster(struct super_block *sb, u32 clu)
+{
+ sector_t s, n;
+ s32 ret = FFS_SUCCESS;
+ struct buffer_head *tmp_bh = NULL;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */
+ s = p_fs->root_start_sector;
+ n = p_fs->data_start_sector;
+ } else {
+ s = START_SECTOR(clu);
+ n = s + p_fs->sectors_per_clu;
+ }
+
+ for (; s < n; s++) {
+ ret = sector_read(sb, s, &tmp_bh, 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
+ ret = sector_write(sb, s, tmp_bh, 0);
+ if (ret != FFS_SUCCESS)
+ break;
+ }
+
+ brelse(tmp_bh);
+ return ret;
+}
+
+s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain)
+{
+ int i, num_clusters = 0;
+ u32 new_clu, last_clu = CLUSTER_32(~0), read_clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ new_clu = p_chain->dir;
+ if (new_clu == CLUSTER_32(~0))
+ new_clu = p_fs->clu_srch_ptr;
+ else if (new_clu >= p_fs->num_clusters)
+ new_clu = 2;
+
+ __set_sb_dirty(sb);
+
+ p_chain->dir = CLUSTER_32(~0);
+
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ if (FAT_read(sb, new_clu, &read_clu) != 0)
+ return -1;
+
+ if (read_clu == CLUSTER_32(0)) {
+ if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -1;
+ num_clusters++;
+
+ if (p_chain->dir == CLUSTER_32(~0)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (FAT_write(sb, last_clu, new_clu) < 0)
+ return -1;
+ }
+
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ p_fs->clu_srch_ptr = new_clu;
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters += num_clusters;
+
+ return num_clusters;
+ }
+ }
+ if ((++new_clu) >= p_fs->num_clusters)
+ new_clu = 2;
+ }
+
+ p_fs->clu_srch_ptr = new_clu;
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters += num_clusters;
+
+ return num_clusters;
+}
+
+s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+ struct chain_t *p_chain)
+{
+ s32 num_clusters = 0;
+ u32 hint_clu, new_clu, last_clu = CLUSTER_32(~0);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ hint_clu = p_chain->dir;
+ if (hint_clu == CLUSTER_32(~0)) {
+ hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr - 2);
+ if (hint_clu == CLUSTER_32(~0))
+ return 0;
+ } else if (hint_clu >= p_fs->num_clusters) {
+ hint_clu = 2;
+ p_chain->flags = 0x01;
+ }
+
+ __set_sb_dirty(sb);
+
+ p_chain->dir = CLUSTER_32(~0);
+
+ while ((new_clu = test_alloc_bitmap(sb, hint_clu - 2)) != CLUSTER_32(~0)) {
+ if (new_clu != hint_clu) {
+ if (p_chain->flags == 0x03) {
+ exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters);
+ p_chain->flags = 0x01;
+ }
+ }
+
+ if (set_alloc_bitmap(sb, new_clu - 2) != FFS_SUCCESS)
+ return -1;
+
+ num_clusters++;
+
+ if (p_chain->flags == 0x01) {
+ if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -1;
+ }
+
+ if (p_chain->dir == CLUSTER_32(~0)) {
+ p_chain->dir = new_clu;
+ } else {
+ if (p_chain->flags == 0x01) {
+ if (FAT_write(sb, last_clu, new_clu) < 0)
+ return -1;
+ }
+ }
+ last_clu = new_clu;
+
+ if ((--num_alloc) == 0) {
+ p_fs->clu_srch_ptr = hint_clu;
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return num_clusters;
+ }
+
+ hint_clu = new_clu + 1;
+ if (hint_clu >= p_fs->num_clusters) {
+ hint_clu = 2;
+
+ if (p_chain->flags == 0x03) {
+ exfat_chain_cont_cluster(sb, p_chain->dir,
+ num_clusters);
+ p_chain->flags = 0x01;
+ }
+ }
+ }
+
+ p_fs->clu_srch_ptr = hint_clu;
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters += num_clusters;
+
+ p_chain->size += num_clusters;
+ return num_clusters;
+}
+
+void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse)
+{
+ s32 num_clusters = 0;
+ u32 clu, prev;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int i;
+ sector_t sector;
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return;
+ __set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ if (p_chain->size <= 0)
+ return;
+
+ do {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector + i);
+ }
+
+ prev = clu;
+ if (FAT_read(sb, clu, &clu) == -1)
+ break;
+
+ if (FAT_write(sb, prev, CLUSTER_32(0)) < 0)
+ break;
+ num_clusters++;
+
+ } while (clu != CLUSTER_32(~0));
+
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters -= num_clusters;
+}
+
+void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+ s32 do_relse)
+{
+ s32 num_clusters = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int i;
+ sector_t sector;
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return;
+
+ if (p_chain->size <= 0) {
+ pr_err("[EXFAT] free_cluster : skip free-req clu:%u, because of zero-size truncation\n",
+ p_chain->dir);
+ return;
+ }
+
+ __set_sb_dirty(sb);
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ do {
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector + i);
+ }
+
+ if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ break;
+ clu++;
+
+ num_clusters++;
+ } while (num_clusters < p_chain->size);
+ } else {
+ do {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (do_relse) {
+ sector = START_SECTOR(clu);
+ for (i = 0; i < p_fs->sectors_per_clu; i++)
+ buf_release(sb, sector + i);
+ }
+
+ if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ break;
+
+ if (FAT_read(sb, clu, &clu) == -1)
+ break;
+ num_clusters++;
+ } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
+ }
+
+ if (p_fs->used_clusters != UINT_MAX)
+ p_fs->used_clusters -= num_clusters;
+}
+
+u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
+{
+ u32 clu, next;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ clu += p_chain->size - 1;
+ } else {
+ while ((FAT_read(sb, clu, &next) == 0) &&
+ (next != CLUSTER_32(~0))) {
+ if (p_fs->dev_ejected)
+ break;
+ clu = next;
+ }
+ }
+
+ return clu;
+}
+
+s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
+{
+ int i, count = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
+ return 0;
+
+ clu = p_chain->dir;
+
+ if (p_chain->flags == 0x03) {
+ count = p_chain->size;
+ } else {
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ count++;
+ if (FAT_read(sb, clu, &clu) != 0)
+ return 0;
+ if (clu == CLUSTER_32(~0))
+ break;
+ }
+ }
+
+ return count;
+}
+
+s32 fat_count_used_clusters(struct super_block *sb)
+{
+ int i, count = 0;
+ u32 clu;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = 2; i < p_fs->num_clusters; i++) {
+ if (FAT_read(sb, i, &clu) != 0)
+ break;
+ if (clu != CLUSTER_32(0))
+ count++;
+ }
+
+ return count;
+}
+
+s32 exfat_count_used_clusters(struct super_block *sb)
+{
+ int i, map_i, map_b, count = 0;
+ u8 k;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ map_i = map_b = 0;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
+ count += used_bit[k];
+
+ if ((++map_b) >= p_bd->sector_size) {
+ map_i++;
+ map_b = 0;
+ }
+ }
+
+ return count;
+}
+
+void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
+{
+ if (len == 0)
+ return;
+
+ while (len > 1) {
+ if (FAT_write(sb, chain, chain + 1) < 0)
+ break;
+ chain++;
+ len--;
+ }
+ FAT_write(sb, chain, CLUSTER_32(~0));
+}
+
+/*
+ * Allocation Bitmap Management Functions
+ */
+
+s32 load_alloc_bitmap(struct super_block *sb)
+{
+ int i, j, ret;
+ u32 map_size;
+ u32 type;
+ sector_t sector;
+ struct chain_t clu;
+ struct bmap_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu.dir = p_fs->root_dir;
+ clu.flags = 0x01;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < p_fs->dentries_per_clu; i++) {
+ ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_BITMAP)
+ continue;
+
+ if (ep->flags == 0x0) {
+ p_fs->map_clu = GET32_A(ep->start_clu);
+ map_size = (u32)GET64_A(ep->size);
+
+ p_fs->map_sectors = ((map_size - 1) >> p_bd->sector_size_bits) + 1;
+
+ p_fs->vol_amap = kmalloc_array(p_fs->map_sectors,
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!p_fs->vol_amap)
+ return FFS_MEMORYERR;
+
+ sector = START_SECTOR(p_fs->map_clu);
+
+ for (j = 0; j < p_fs->map_sectors; j++) {
+ p_fs->vol_amap[j] = NULL;
+ ret = sector_read(sb, sector + j, &(p_fs->vol_amap[j]), 1);
+ if (ret != FFS_SUCCESS) {
+ /* release all buffers and free vol_amap */
+ i = 0;
+ while (i < j)
+ brelse(p_fs->vol_amap[i++]);
+
+ kfree(p_fs->vol_amap);
+ p_fs->vol_amap = NULL;
+ return ret;
+ }
+ }
+
+ p_fs->pbr_bh = NULL;
+ return FFS_SUCCESS;
+ }
+ }
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+
+ return FFS_FORMATERR;
+}
+
+void free_alloc_bitmap(struct super_block *sb)
+{
+ int i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ brelse(p_fs->pbr_bh);
+
+ for (i = 0; i < p_fs->map_sectors; i++)
+ __brelse(p_fs->vol_amap[i]);
+
+ kfree(p_fs->vol_amap);
+ p_fs->vol_amap = NULL;
+}
+
+s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
+
+ sector = START_SECTOR(p_fs->map_clu) + i;
+
+ exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
+
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+}
+
+s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+#ifdef CONFIG_EXFAT_DISCARD
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+ int ret;
+#endif /* CONFIG_EXFAT_DISCARD */
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
+
+ sector = START_SECTOR(p_fs->map_clu) + i;
+
+ exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
+
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard) {
+ ret = sb_issue_discard(sb, START_SECTOR(clu),
+ (1 << p_fs->sectors_per_clu_bits),
+ GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("discard not supported by device, disabling");
+ opts->discard = 0;
+ }
+ }
+#endif /* CONFIG_EXFAT_DISCARD */
+}
+
+u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (p_bd->sector_size_bits + 3);
+ map_b = (clu >> 3) & p_bd->sector_size_mask;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < p_fs->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= p_bd->sector_size) ||
+ (clu_base >= p_fs->num_clusters)) {
+ if ((++map_i) >= p_fs->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUSTER_32(~0);
+}
+
+void sync_alloc_bitmap(struct super_block *sb)
+{
+ int i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (!p_fs->vol_amap)
+ return;
+
+ for (i = 0; i < p_fs->map_sectors; i++)
+ sync_dirty_buffer(p_fs->vol_amap[i]);
+}
+
+/*
+ * Upcase table Management Functions
+ */
+static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
+ u32 num_sectors, u32 utbl_checksum)
+{
+ int i, ret = FFS_ERROR;
+ u32 j;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct buffer_head *tmp_bh = NULL;
+ sector_t end_sector = num_sectors + sector;
+
+ bool skip = false;
+ u32 index = 0;
+ u16 uni = 0;
+ u16 **upcase_table;
+
+ u32 checksum = 0;
+
+ upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
+ GFP_KERNEL);
+ if (!upcase_table)
+ return FFS_MEMORYERR;
+ memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+
+ while (sector < end_sector) {
+ ret = sector_read(sb, sector, &tmp_bh, 1);
+ if (ret != FFS_SUCCESS) {
+ pr_debug("sector read (0x%llX)fail\n",
+ (unsigned long long)sector);
+ goto error;
+ }
+ sector++;
+
+ for (i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) {
+ uni = GET16(((u8 *)tmp_bh->b_data) + i);
+
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
+ i);
+ checksum = ((checksum & 1) ? 0x80000000 : 0) +
+ (checksum >> 1) + *(((u8 *)tmp_bh->b_data) +
+ (i + 1));
+
+ if (skip) {
+ pr_debug("skip from 0x%X ", index);
+ index += uni;
+ pr_debug("to 0x%X (amount of 0x%X)\n",
+ index, uni);
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (!upcase_table[col_index]) {
+ pr_debug("alloc = 0x%X\n", col_index);
+ upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
+ sizeof(u16), GFP_KERNEL);
+ if (!upcase_table[col_index]) {
+ ret = FFS_MEMORYERR;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+ }
+ if (index >= 0xFFFF && utbl_checksum == checksum) {
+ if (tmp_bh)
+ brelse(tmp_bh);
+ return FFS_SUCCESS;
+ }
+ ret = FFS_ERROR;
+error:
+ if (tmp_bh)
+ brelse(tmp_bh);
+ free_upcase_table(sb);
+ return ret;
+}
+
+static s32 __load_default_upcase_table(struct super_block *sb)
+{
+ int i, ret = FFS_ERROR;
+ u32 j;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ bool skip = false;
+ u32 index = 0;
+ u16 uni = 0;
+ u16 **upcase_table;
+
+ upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
+ GFP_KERNEL);
+ if (!upcase_table)
+ return FFS_MEMORYERR;
+ memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
+
+ for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) {
+ uni = GET16(uni_upcase + i);
+ if (skip) {
+ pr_debug("skip from 0x%X ", index);
+ index += uni;
+ pr_debug("to 0x%X (amount of 0x%X)\n", index, uni);
+ skip = false;
+ } else if (uni == index) {
+ index++;
+ } else if (uni == 0xFFFF) {
+ skip = true;
+ } else { /* uni != index , uni != 0xFFFF */
+ u16 col_index = get_col_index(index);
+
+ if (!upcase_table[col_index]) {
+ pr_debug("alloc = 0x%X\n", col_index);
+ upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!upcase_table[col_index]) {
+ ret = FFS_MEMORYERR;
+ goto error;
+ }
+
+ for (j = 0; j < UTBL_ROW_COUNT; j++)
+ upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j;
+ }
+
+ upcase_table[col_index][get_row_index(index)] = uni;
+ index++;
+ }
+ }
+
+ if (index >= 0xFFFF)
+ return FFS_SUCCESS;
+
+error:
+ /* FATAL error: default upcase table has error */
+ free_upcase_table(sb);
+ return ret;
+}
+
+s32 load_upcase_table(struct super_block *sb)
+{
+ int i;
+ u32 tbl_clu, tbl_size;
+ sector_t sector;
+ u32 type, num_sectors;
+ struct chain_t clu;
+ struct case_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ clu.dir = p_fs->root_dir;
+ clu.flags = 0x01;
+
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ for (i = 0; i < p_fs->dentries_per_clu; i++) {
+ ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if (type != TYPE_UPCASE)
+ continue;
+
+ tbl_clu = GET32_A(ep->start_clu);
+ tbl_size = (u32)GET64_A(ep->size);
+
+ sector = START_SECTOR(tbl_clu);
+ num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1;
+ if (__load_upcase_table(sb, sector, num_sectors,
+ GET32_A(ep->checksum)) != FFS_SUCCESS)
+ break;
+ return FFS_SUCCESS;
+ }
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+ /* load default upcase table */
+ return __load_default_upcase_table(sb);
+}
+
+void free_upcase_table(struct super_block *sb)
+{
+ u32 i;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ u16 **upcase_table;
+
+ upcase_table = p_fs->vol_utbl;
+ for (i = 0; i < UTBL_COL_COUNT; i++)
+ kfree(upcase_table[i]);
+
+ kfree(p_fs->vol_utbl);
+ p_fs->vol_utbl = NULL;
+}
+
+/*
+ * Directory Entry Management Functions
+ */
+
+u32 fat_get_entry_type(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ if (*(ep->name) == 0x0)
+ return TYPE_UNUSED;
+
+ else if (*(ep->name) == 0xE5)
+ return TYPE_DELETED;
+
+ else if (ep->attr == ATTR_EXTEND)
+ return TYPE_EXTEND;
+
+ else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_VOLUME)
+ return TYPE_VOLUME;
+
+ else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_SUBDIR)
+ return TYPE_DIR;
+
+ return TYPE_FILE;
+}
+
+u32 exfat_get_entry_type(struct dentry_t *p_entry)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ if (ep->type == 0x0) {
+ return TYPE_UNUSED;
+ } else if (ep->type < 0x80) {
+ return TYPE_DELETED;
+ } else if (ep->type == 0x80) {
+ return TYPE_INVALID;
+ } else if (ep->type < 0xA0) {
+ if (ep->type == 0x81) {
+ return TYPE_BITMAP;
+ } else if (ep->type == 0x82) {
+ return TYPE_UPCASE;
+ } else if (ep->type == 0x83) {
+ return TYPE_VOLUME;
+ } else if (ep->type == 0x85) {
+ if (GET16_A(ep->attr) & ATTR_SUBDIR)
+ return TYPE_DIR;
+ else
+ return TYPE_FILE;
+ }
+ return TYPE_CRITICAL_PRI;
+ } else if (ep->type < 0xC0) {
+ if (ep->type == 0xA0)
+ return TYPE_GUID;
+ else if (ep->type == 0xA1)
+ return TYPE_PADDING;
+ else if (ep->type == 0xA2)
+ return TYPE_ACLTAB;
+ return TYPE_BENIGN_PRI;
+ } else if (ep->type < 0xE0) {
+ if (ep->type == 0xC0)
+ return TYPE_STREAM;
+ else if (ep->type == 0xC1)
+ return TYPE_EXTEND;
+ else if (ep->type == 0xC2)
+ return TYPE_ACL;
+ return TYPE_CRITICAL_SEC;
+ }
+
+ return TYPE_BENIGN_SEC;
+}
+
+void fat_set_entry_type(struct dentry_t *p_entry, u32 type)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ if (type == TYPE_UNUSED)
+ *(ep->name) = 0x0;
+
+ else if (type == TYPE_DELETED)
+ *(ep->name) = 0xE5;
+
+ else if (type == TYPE_EXTEND)
+ ep->attr = ATTR_EXTEND;
+
+ else if (type == TYPE_DIR)
+ ep->attr = ATTR_SUBDIR;
+
+ else if (type == TYPE_FILE)
+ ep->attr = ATTR_ARCHIVE;
+
+ else if (type == TYPE_SYMLINK)
+ ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
+}
+
+void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ if (type == TYPE_UNUSED) {
+ ep->type = 0x0;
+ } else if (type == TYPE_DELETED) {
+ ep->type &= ~0x80;
+ } else if (type == TYPE_STREAM) {
+ ep->type = 0xC0;
+ } else if (type == TYPE_EXTEND) {
+ ep->type = 0xC1;
+ } else if (type == TYPE_BITMAP) {
+ ep->type = 0x81;
+ } else if (type == TYPE_UPCASE) {
+ ep->type = 0x82;
+ } else if (type == TYPE_VOLUME) {
+ ep->type = 0x83;
+ } else if (type == TYPE_DIR) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_SUBDIR);
+ } else if (type == TYPE_FILE) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_ARCHIVE);
+ } else if (type == TYPE_SYMLINK) {
+ ep->type = 0x85;
+ SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK);
+ }
+}
+
+u32 fat_get_entry_attr(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ return (u32)ep->attr;
+}
+
+u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ return (u32)GET16_A(ep->attr);
+}
+
+void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ ep->attr = (u8)attr;
+}
+
+void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+{
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ SET16_A(ep->attr, (u16)attr);
+}
+
+u8 fat_get_entry_flag(struct dentry_t *p_entry)
+{
+ return 0x01;
+}
+
+u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ return ep->flags;
+}
+
+void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+{
+}
+
+void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ ep->flags = flags;
+}
+
+u32 fat_get_entry_clu0(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ return ((u32)GET16_A(ep->start_clu_hi) << 16) |
+ GET16_A(ep->start_clu_lo);
+}
+
+u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ return GET32_A(ep->start_clu);
+}
+
+void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
+ SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
+}
+
+void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ SET32_A(ep->start_clu, start_clu);
+}
+
+u64 fat_get_entry_size(struct dentry_t *p_entry)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ return (u64)GET32_A(ep->size);
+}
+
+u64 exfat_get_entry_size(struct dentry_t *p_entry)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ return GET64_A(ep->valid_size);
+}
+
+void fat_set_entry_size(struct dentry_t *p_entry, u64 size)
+{
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ SET32_A(ep->size, (u32)size);
+}
+
+void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+{
+ struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
+
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
+}
+
+void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t = 0x00, d = 0x21;
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = GET16_A(ep->create_time);
+ d = GET16_A(ep->create_date);
+ break;
+ case TM_MODIFY:
+ t = GET16_A(ep->modify_time);
+ d = GET16_A(ep->modify_date);
+ break;
+ }
+
+ tp->sec = (t & 0x001F) << 1;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+}
+
+void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t = 0x00, d = 0x21;
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ switch (mode) {
+ case TM_CREATE:
+ t = GET16_A(ep->create_time);
+ d = GET16_A(ep->create_date);
+ break;
+ case TM_MODIFY:
+ t = GET16_A(ep->modify_time);
+ d = GET16_A(ep->modify_date);
+ break;
+ case TM_ACCESS:
+ t = GET16_A(ep->access_time);
+ d = GET16_A(ep->access_date);
+ break;
+ }
+
+ tp->sec = (t & 0x001F) << 1;
+ tp->min = (t >> 5) & 0x003F;
+ tp->hour = (t >> 11);
+ tp->day = (d & 0x001F);
+ tp->mon = (d >> 5) & 0x000F;
+ tp->year = (d >> 9);
+}
+
+void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t, d;
+ struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ SET16_A(ep->create_time, t);
+ SET16_A(ep->create_date, d);
+ break;
+ case TM_MODIFY:
+ SET16_A(ep->modify_time, t);
+ SET16_A(ep->modify_date, d);
+ break;
+ }
+}
+
+void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+ u8 mode)
+{
+ u16 t, d;
+ struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
+
+ t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
+ d = (tp->year << 9) | (tp->mon << 5) | tp->day;
+
+ switch (mode) {
+ case TM_CREATE:
+ SET16_A(ep->create_time, t);
+ SET16_A(ep->create_date, d);
+ break;
+ case TM_MODIFY:
+ SET16_A(ep->modify_time, t);
+ SET16_A(ep->modify_date, d);
+ break;
+ case TM_ACCESS:
+ SET16_A(ep->access_time, t);
+ SET16_A(ep->access_date, d);
+ break;
+ }
+}
+
+s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ u32 type, u32 start_clu, u64 size)
+{
+ sector_t sector;
+ struct dos_dentry_t *dos_ep;
+
+ dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!dos_ep)
+ return FFS_MEDIAERR;
+
+ init_dos_entry(dos_ep, type, start_clu);
+ buf_modify(sb, sector);
+
+ return FFS_SUCCESS;
+}
+
+s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, u32 type, u32 start_clu, u64 size)
+{
+ sector_t sector;
+ u8 flags;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+
+ flags = (type == TYPE_FILE) ? 0x01 : 0x03;
+
+ /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return FFS_MEDIAERR;
+
+ strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
+ &sector);
+ if (!strm_ep)
+ return FFS_MEDIAERR;
+
+ init_file_entry(file_ep, type);
+ buf_modify(sb, sector);
+
+ init_strm_entry(strm_ep, flags, start_clu, size);
+ buf_modify(sb, sector);
+
+ return FFS_SUCCESS;
+}
+
+static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i;
+ sector_t sector;
+ u8 chksum;
+ u16 *uniname = p_uniname->name;
+ struct dos_dentry_t *dos_ep;
+ struct ext_dentry_t *ext_ep;
+
+ dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!dos_ep)
+ return FFS_MEDIAERR;
+
+ dos_ep->lcase = p_dosname->name_case;
+ memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
+ buf_modify(sb, sector);
+
+ if ((--num_entries) > 0) {
+ chksum = calc_checksum_1byte((void *)dos_ep->name,
+ DOS_NAME_LENGTH, 0);
+
+ for (i = 1; i < num_entries; i++) {
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb,
+ p_dir,
+ entry - i,
+ &sector);
+ if (!ext_ep)
+ return FFS_MEDIAERR;
+
+ init_ext_entry(ext_ep, i, chksum, uniname);
+ buf_modify(sb, sector);
+ uniname += 13;
+ }
+
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry - i,
+ &sector);
+ if (!ext_ep)
+ return FFS_MEDIAERR;
+
+ init_ext_entry(ext_ep, i + 0x40, chksum, uniname);
+ buf_modify(sb, sector);
+ }
+
+ return FFS_SUCCESS;
+}
+
+static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 num_entries,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i;
+ sector_t sector;
+ u16 *uniname = p_uniname->name;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+ struct name_dentry_t *name_ep;
+
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return FFS_MEDIAERR;
+
+ file_ep->num_ext = (u8)(num_entries - 1);
+ buf_modify(sb, sector);
+
+ strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
+ &sector);
+ if (!strm_ep)
+ return FFS_MEDIAERR;
+
+ strm_ep->name_len = p_uniname->name_len;
+ SET16_A(strm_ep->name_hash, p_uniname->name_hash);
+ buf_modify(sb, sector);
+
+ for (i = 2; i < num_entries; i++) {
+ name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry + i,
+ &sector);
+ if (!name_ep)
+ return FFS_MEDIAERR;
+
+ init_name_entry(name_ep, uniname);
+ buf_modify(sb, sector);
+ uniname += 15;
+ }
+
+ update_dir_checksum(sb, p_dir, entry);
+
+ return FFS_SUCCESS;
+}
+
+void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu)
+{
+ struct timestamp_t tm, *tp;
+
+ fat_set_entry_type((struct dentry_t *)ep, type);
+ SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
+ SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
+ SET32_A(ep->size, 0);
+
+ tp = tm_current(&tm);
+ fat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ fat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ SET16_A(ep->access_date, 0);
+ ep->create_time_ms = 0;
+}
+
+void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
+{
+ int i;
+ bool end = false;
+
+ fat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->order = (u8)order;
+ ep->sysid = 0;
+ ep->checksum = chksum;
+ SET16_A(ep->start_clu, 0);
+
+ for (i = 0; i < 10; i += 2) {
+ if (!end) {
+ SET16(ep->unicode_0_4 + i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16(ep->unicode_0_4 + i, 0xFFFF);
+ }
+ }
+
+ for (i = 0; i < 12; i += 2) {
+ if (!end) {
+ SET16_A(ep->unicode_5_10 + i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16_A(ep->unicode_5_10 + i, 0xFFFF);
+ }
+ }
+
+ for (i = 0; i < 4; i += 2) {
+ if (!end) {
+ SET16_A(ep->unicode_11_12 + i, *uniname);
+ if (*uniname == 0x0)
+ end = true;
+ else
+ uniname++;
+ } else {
+ SET16_A(ep->unicode_11_12 + i, 0xFFFF);
+ }
+ }
+}
+
+void init_file_entry(struct file_dentry_t *ep, u32 type)
+{
+ struct timestamp_t tm, *tp;
+
+ exfat_set_entry_type((struct dentry_t *)ep, type);
+
+ tp = tm_current(&tm);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
+ ep->create_time_ms = 0;
+ ep->modify_time_ms = 0;
+ ep->access_time_ms = 0;
+}
+
+void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
+ ep->flags = flags;
+ SET32_A(ep->start_clu, start_clu);
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
+}
+
+void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
+{
+ int i;
+
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 30; i++, i++) {
+ SET16_A(ep->unicode_0_14 + i, *uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
+{
+ int i;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = num_entries - 1; i >= order; i--) {
+ ep = get_entry_in_dir(sb, p_dir, entry - i, &sector);
+ if (!ep)
+ return;
+
+ p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ buf_modify(sb, sector);
+ }
+}
+
+void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
+{
+ int i;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = order; i < num_entries; i++) {
+ ep = get_entry_in_dir(sb, p_dir, entry + i, &sector);
+ if (!ep)
+ return;
+
+ p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
+ buf_modify(sb, sector);
+ }
+}
+
+void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry)
+{
+ int i, num_entries;
+ sector_t sector;
+ u16 chksum;
+ struct file_dentry_t *file_ep;
+ struct dentry_t *ep;
+
+ file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ &sector);
+ if (!file_ep)
+ return;
+
+ buf_lock(sb, sector);
+
+ num_entries = (s32)file_ep->num_ext + 1;
+ chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
+ CS_DIR_ENTRY);
+
+ for (i = 1; i < num_entries; i++) {
+ ep = get_entry_in_dir(sb, p_dir, entry + i, NULL);
+ if (!ep) {
+ buf_unlock(sb, sector);
+ return;
+ }
+
+ chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
+ CS_DEFAULT);
+ }
+
+ SET16_A(file_ep->checksum, chksum);
+ buf_modify(sb, sector);
+ buf_unlock(sb, sector);
+}
+
+void update_dir_checksum_with_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es)
+{
+ struct dentry_t *ep;
+ u16 chksum = 0;
+ s32 chksum_type = CS_DIR_ENTRY, i;
+
+ ep = (struct dentry_t *)&(es->__buf);
+ for (i = 0; i < es->num_entries; i++) {
+ pr_debug("%s ep %p\n", __func__, ep);
+ chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
+ chksum_type);
+ ep++;
+ chksum_type = CS_DEFAULT;
+ }
+
+ ep = (struct dentry_t *)&(es->__buf);
+ SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
+ write_whole_entry_set(sb, es);
+}
+
+static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
+ s32 byte_offset, u32 *clu)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ s32 clu_offset;
+ u32 cur_clu;
+
+ clu_offset = byte_offset >> p_fs->cluster_size_bits;
+ cur_clu = p_dir->dir;
+
+ if (p_dir->flags == 0x03) {
+ cur_clu += clu_offset;
+ } else {
+ while (clu_offset > 0) {
+ if (FAT_read(sb, cur_clu, &cur_clu) == -1)
+ return FFS_MEDIAERR;
+ clu_offset--;
+ }
+ }
+
+ if (clu)
+ *clu = cur_clu;
+ return FFS_SUCCESS;
+}
+
+s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+ sector_t *sector, s32 *offset)
+{
+ s32 off, ret;
+ u32 clu = 0;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ off = entry << DENTRY_SIZE_BITS;
+
+ if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */
+ *offset = off & p_bd->sector_size_mask;
+ *sector = off >> p_bd->sector_size_bits;
+ *sector += p_fs->root_start_sector;
+ } else {
+ ret = _walk_fat_chain(sb, p_dir, off, &clu);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ /* byte offset in cluster */
+ off &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ *offset = off & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ *sector = off >> p_bd->sector_size_bits;
+ *sector += START_SECTOR(clu);
+ }
+ return FFS_SUCCESS;
+}
+
+struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
+ s32 offset)
+{
+ u8 *buf;
+
+ buf = buf_getblk(sb, sector);
+
+ if (!buf)
+ return NULL;
+
+ return (struct dentry_t *)(buf + offset);
+}
+
+struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, sector_t *sector)
+{
+ s32 off;
+ sector_t sec;
+ u8 *buf;
+
+ if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS)
+ return NULL;
+
+ buf = buf_getblk(sb, sec);
+
+ if (!buf)
+ return NULL;
+
+ if (sector)
+ *sector = sec;
+ return (struct dentry_t *)(buf + off);
+}
+
+/* returns a set of dentries for a file or dir.
+ * Note that this is a copy (dump) of dentries so that user should call write_entry_set()
+ * to apply changes made in this entry set to the real device.
+ * in:
+ * sb+p_dir+entry: indicates a file/dir
+ * type: specifies how many dentries should be included.
+ * out:
+ * file_ep: will point the first dentry(= file dentry) on success
+ * return:
+ * pointer of entry set on success,
+ * NULL on failure.
+ */
+
+#define ES_MODE_STARTED 0
+#define ES_MODE_GET_FILE_ENTRY 1
+#define ES_MODE_GET_STRM_ENTRY 2
+#define ES_MODE_GET_NAME_ENTRY 3
+#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4
+struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u32 type,
+ struct dentry_t **file_ep)
+{
+ s32 off, ret, byte_offset;
+ u32 clu = 0;
+ sector_t sec;
+ u32 entry_type;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct entry_set_cache_t *es = NULL;
+ struct dentry_t *ep, *pos;
+ u8 *buf;
+ u8 num_entries;
+ s32 mode = ES_MODE_STARTED;
+ size_t bufsize;
+
+ pr_debug("%s entered p_dir dir %u flags %x size %d\n",
+ __func__, p_dir->dir, p_dir->flags, p_dir->size);
+
+ byte_offset = entry << DENTRY_SIZE_BITS;
+ ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
+ if (ret != FFS_SUCCESS)
+ return NULL;
+
+ /* byte offset in cluster */
+ byte_offset &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ off = byte_offset & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ sec = byte_offset >> p_bd->sector_size_bits;
+ sec += START_SECTOR(clu);
+
+ buf = buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+
+ ep = (struct dentry_t *)(buf + off);
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type != TYPE_FILE)
+ && (entry_type != TYPE_DIR))
+ goto err_out;
+
+ if (type == ES_ALL_ENTRIES)
+ num_entries = ((struct file_dentry_t *)ep)->num_ext + 1;
+ else
+ num_entries = type;
+
+ bufsize = offsetof(struct entry_set_cache_t, __buf) + (num_entries) *
+ sizeof(struct dentry_t);
+ pr_debug("%s: trying to kmalloc %zx bytes for %d entries\n", __func__,
+ bufsize, num_entries);
+ es = kmalloc(bufsize, GFP_KERNEL);
+ if (!es)
+ goto err_out;
+
+ es->num_entries = num_entries;
+ es->sector = sec;
+ es->offset = off;
+ es->alloc_flag = p_dir->flags;
+
+ pos = (struct dentry_t *)&es->__buf;
+
+ while (num_entries) {
+ /*
+ * instead of copying whole sector, we will check every entry.
+ * this will provide minimum stablity and consistency.
+ */
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED))
+ goto err_out;
+
+ switch (mode) {
+ case ES_MODE_STARTED:
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR))
+ mode = ES_MODE_GET_FILE_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_FILE_ENTRY:
+ if (entry_type == TYPE_STREAM)
+ mode = ES_MODE_GET_STRM_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_STRM_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ mode = ES_MODE_GET_NAME_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_NAME_ENTRY:
+ if (entry_type == TYPE_EXTEND)
+ break;
+ else if (entry_type == TYPE_STREAM)
+ goto err_out;
+ else if (entry_type & TYPE_CRITICAL_SEC)
+ mode = ES_MODE_GET_CRITICAL_SEC_ENTRY;
+ else
+ goto err_out;
+ break;
+ case ES_MODE_GET_CRITICAL_SEC_ENTRY:
+ if ((entry_type == TYPE_EXTEND) ||
+ (entry_type == TYPE_STREAM))
+ goto err_out;
+ else if ((entry_type & TYPE_CRITICAL_SEC) !=
+ TYPE_CRITICAL_SEC)
+ goto err_out;
+ break;
+ }
+
+ memcpy(pos, ep, sizeof(struct dentry_t));
+
+ if (--num_entries == 0)
+ break;
+
+ if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) <
+ (off & p_bd->sector_size_mask)) {
+ /* get the next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (FAT_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ buf = buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ off = 0;
+ ep = (struct dentry_t *)(buf);
+ } else {
+ ep++;
+ off += DENTRY_SIZE;
+ }
+ pos++;
+ }
+
+ if (file_ep)
+ *file_ep = (struct dentry_t *)&(es->__buf);
+
+ pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
+ __func__, es, (unsigned long long)es->sector, es->offset,
+ es->alloc_flag, es->num_entries, &es->__buf);
+ return es;
+err_out:
+ pr_debug("%s exited NULL (es %p)\n", __func__, es);
+ kfree(es);
+ return NULL;
+}
+
+void release_entry_set(struct entry_set_cache_t *es)
+{
+ pr_debug("%s es=%p\n", __func__, es);
+ kfree(es);
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ sector_t sec, s32 off, u32 count)
+{
+ s32 num_entries, buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&(es->__buf);
+
+ pr_debug("%s entered es %p sec %llu off %d count %d\n",
+ __func__, es, (unsigned long long)sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* white per sector base */
+ remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
+ copy_entries = min_t(s32,
+ remaining_byte_in_sector >> DENTRY_SIZE_BITS,
+ num_entries);
+ buf = buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
+ pr_debug("copying %d entries from %p to sector %llu\n",
+ copy_entries, (esbuf + buf_off),
+ (unsigned long long)sec);
+ memcpy(buf + off, esbuf + buf_off,
+ copy_entries << DENTRY_SIZE_BITS);
+ buf_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ clu = GET_CLUSTER_FROM_SECTOR(sec);
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (FAT_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ pr_debug("%s exited successfully\n", __func__);
+ return FFS_SUCCESS;
+err_out:
+ pr_debug("%s failed\n", __func__);
+ return FFS_ERROR;
+}
+
+/* write back all entries in entry set */
+s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector,
+ es->offset,
+ es->num_entries);
+}
+
+/* write back some entries in entry set */
+s32 write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es, struct dentry_t *ep, u32 count)
+{
+ s32 ret, byte_offset, off;
+ u32 clu = 0;
+ sector_t sec;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct chain_t dir;
+
+ /* vaidity check */
+ if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries)
+ return FFS_ERROR;
+
+ dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector);
+ dir.flags = es->alloc_flag;
+ dir.size = 0xffffffff; /* XXX */
+
+ byte_offset = (es->sector - START_SECTOR(dir.dir)) <<
+ p_bd->sector_size_bits;
+ byte_offset += ((void **)ep - &(es->__buf)) + es->offset;
+
+ ret = _walk_fat_chain(sb, &dir, byte_offset, &clu);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ /* byte offset in cluster */
+ byte_offset &= p_fs->cluster_size - 1;
+
+ /* byte offset in sector */
+ off = byte_offset & p_bd->sector_size_mask;
+
+ /* sector offset in cluster */
+ sec = byte_offset >> p_bd->sector_size_bits;
+ sec += START_SECTOR(clu);
+ return __write_partial_entries_in_entry_set(sb, es, sec, off, count);
+}
+
+/* search EMPTY CONTINUOUS "num_entries" entries */
+s32 search_deleted_or_unused_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 num_entries)
+{
+ int i, dentry, num_empty = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ if (p_fs->hint_uentry.dir == p_dir->dir) {
+ if (p_fs->hint_uentry.entry == -1)
+ return -1;
+
+ clu.dir = p_fs->hint_uentry.clu.dir;
+ clu.size = p_fs->hint_uentry.clu.size;
+ clu.flags = p_fs->hint_uentry.clu.flags;
+
+ dentry = p_fs->hint_uentry.entry;
+ } else {
+ p_fs->hint_uentry.entry = -1;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ dentry = 0;
+ }
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu - 1);
+
+ for (; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -1;
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED) {
+ num_empty++;
+ if (p_fs->hint_uentry.entry == -1) {
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = dentry;
+
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = clu.size;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ } else if (type == TYPE_DELETED) {
+ num_empty++;
+ } else {
+ num_empty = 0;
+ }
+
+ if (num_empty >= num_entries) {
+ p_fs->hint_uentry.dir = CLUSTER_32(~0);
+ p_fs->hint_uentry.entry = -1;
+
+ if (p_fs->vol_type == EXFAT)
+ return dentry - (num_entries - 1);
+ else
+ return dentry;
+ }
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
+{
+ s32 ret, dentry;
+ u32 last_clu;
+ sector_t sector;
+ u64 size = 0;
+ struct chain_t clu;
+ struct dentry_t *ep = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ return search_deleted_or_unused_entry(sb, p_dir, num_entries);
+
+ while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (p_fs->vol_type == EXFAT) {
+ if (p_dir->dir != p_fs->root_dir)
+ size = i_size_read(inode);
+ }
+
+ last_clu = find_last_cluster(sb, p_dir);
+ clu.dir = last_clu + 1;
+ clu.size = 0;
+ clu.flags = p_dir->flags;
+
+ /* (1) allocate a cluster */
+ ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
+ if (ret < 1)
+ return -1;
+
+ if (clear_cluster(sb, clu.dir) != FFS_SUCCESS)
+ return -1;
+
+ /* (2) append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size);
+ p_dir->flags = 0x01;
+ p_fs->hint_uentry.clu.flags = 0x01;
+ }
+ if (clu.flags == 0x01)
+ if (FAT_write(sb, last_clu, clu.dir) < 0)
+ return -1;
+
+ if (p_fs->hint_uentry.entry == -1) {
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS);
+
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = 0;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ p_fs->hint_uentry.clu.size++;
+ p_dir->size++;
+
+ /* (3) update the directory entry */
+ if (p_fs->vol_type == EXFAT) {
+ if (p_dir->dir != p_fs->root_dir) {
+ size += p_fs->cluster_size;
+
+ ep = get_entry_in_dir(sb, &fid->dir,
+ fid->entry + 1, &sector);
+ if (!ep)
+ return -1;
+ p_fs->fs_func->set_entry_size(ep, size);
+ p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ buf_modify(sb, sector);
+
+ update_dir_checksum(sb, &(fid->dir),
+ fid->entry);
+ }
+ }
+
+ i_size_write(inode, i_size_read(inode) + p_fs->cluster_size);
+ EXFAT_I(inode)->mmu_private += p_fs->cluster_size;
+ EXFAT_I(inode)->fid.size += p_fs->cluster_size;
+ EXFAT_I(inode)->fid.flags = p_dir->flags;
+ inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9);
+ }
+
+ return dentry;
+}
+
+/* return values of fat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -1 : (root dir, ".") it is the root dir itself
+ * -2 : entry with the name does not exist
+ */
+s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type)
+{
+ int i, dentry = 0, len;
+ s32 order = 0;
+ bool is_feasible_entry = true, has_ext_entry = false;
+ s32 dentries_per_clu;
+ u32 entry_type;
+ u16 entry_uniname[14], *uniname = NULL, unichar;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct dos_dentry_t *dos_ep;
+ struct ext_dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == p_fs->root_dir) {
+ if ((!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_CUR_DIR_NAME)) ||
+ (!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_PAR_DIR_NAME)))
+ return -1; // special case, root directory itself
+ }
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -2;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ if ((type == TYPE_ALL) || (type == entry_type)) {
+ if (is_feasible_entry && has_ext_entry)
+ return dentry;
+
+ dos_ep = (struct dos_dentry_t *)ep;
+ if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))
+ return dentry;
+ }
+ is_feasible_entry = true;
+ has_ext_entry = false;
+ } else if (entry_type == TYPE_EXTEND) {
+ if (is_feasible_entry) {
+ ext_ep = (struct ext_dentry_t *)ep;
+ if (ext_ep->order > 0x40) {
+ order = (s32)(ext_ep->order - 0x40);
+ uniname = p_uniname->name + 13 * (order - 1);
+ } else {
+ order = (s32)ext_ep->order;
+ uniname -= 13;
+ }
+
+ len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
+
+ unichar = *(uniname + len);
+ *(uniname + len) = 0x0;
+
+ if (nls_uniname_cmp(sb, uniname, entry_uniname))
+ is_feasible_entry = false;
+
+ *(uniname + len) = unichar;
+ }
+ has_ext_entry = true;
+ } else if (entry_type == TYPE_UNUSED) {
+ return -2;
+ }
+ is_feasible_entry = true;
+ has_ext_entry = false;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -2;
+ }
+
+ return -2;
+}
+
+/* return values of exfat_find_dir_entry()
+ * >= 0 : return dir entiry position with the name in dir
+ * -1 : (root dir, ".") it is the root dir itself
+ * -2 : entry with the name does not exist
+ */
+s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 num_entries,
+ struct dos_name_t *p_dosname, u32 type)
+{
+ int i = 0, dentry = 0, num_ext_entries = 0, len, step;
+ s32 order = 0;
+ bool is_feasible_entry = false;
+ s32 dentries_per_clu, num_empty = 0;
+ u32 entry_type;
+ u16 entry_uniname[16], *uniname = NULL, unichar;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct file_dentry_t *file_ep;
+ struct strm_dentry_t *strm_ep;
+ struct name_dentry_t *name_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == p_fs->root_dir) {
+ if ((!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_CUR_DIR_NAME)) ||
+ (!nls_uniname_cmp(sb, p_uniname->name,
+ (u16 *)UNI_PAR_DIR_NAME)))
+ return -1; // special case, root directory itself
+ }
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ p_fs->hint_uentry.dir = p_dir->dir;
+ p_fs->hint_uentry.entry = -1;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ while (i < dentries_per_clu) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -2;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+ step = 1;
+
+ if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) {
+ is_feasible_entry = false;
+
+ if (p_fs->hint_uentry.entry == -1) {
+ num_empty++;
+
+ if (num_empty == 1) {
+ p_fs->hint_uentry.clu.dir = clu.dir;
+ p_fs->hint_uentry.clu.size = clu.size;
+ p_fs->hint_uentry.clu.flags = clu.flags;
+ }
+ if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED))
+ p_fs->hint_uentry.entry = dentry - (num_empty - 1);
+ }
+
+ if (entry_type == TYPE_UNUSED)
+ return -2;
+ } else {
+ num_empty = 0;
+
+ if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
+ file_ep = (struct file_dentry_t *)ep;
+ if ((type == TYPE_ALL) || (type == entry_type)) {
+ num_ext_entries = file_ep->num_ext;
+ is_feasible_entry = true;
+ } else {
+ is_feasible_entry = false;
+ step = file_ep->num_ext + 1;
+ }
+ } else if (entry_type == TYPE_STREAM) {
+ if (is_feasible_entry) {
+ strm_ep = (struct strm_dentry_t *)ep;
+ if (p_uniname->name_hash == GET16_A(strm_ep->name_hash) &&
+ p_uniname->name_len == strm_ep->name_len) {
+ order = 1;
+ } else {
+ is_feasible_entry = false;
+ step = num_ext_entries;
+ }
+ }
+ } else if (entry_type == TYPE_EXTEND) {
+ if (is_feasible_entry) {
+ name_ep = (struct name_dentry_t *)ep;
+
+ if ((++order) == 2)
+ uniname = p_uniname->name;
+ else
+ uniname += 15;
+
+ len = extract_uni_name_from_name_entry(name_ep,
+ entry_uniname, order);
+
+ unichar = *(uniname + len);
+ *(uniname + len) = 0x0;
+
+ if (nls_uniname_cmp(sb, uniname, entry_uniname)) {
+ is_feasible_entry = false;
+ step = num_ext_entries - order + 1;
+ } else if (order == num_ext_entries) {
+ p_fs->hint_uentry.dir = CLUSTER_32(~0);
+ p_fs->hint_uentry.entry = -1;
+ return dentry - (num_ext_entries);
+ }
+
+ *(uniname + len) = unichar;
+ }
+ } else {
+ is_feasible_entry = false;
+ }
+ }
+
+ i += step;
+ dentry += step;
+ }
+
+ i -= dentries_per_clu;
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -2;
+ }
+ }
+
+ return -2;
+}
+
+s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry)
+{
+ s32 count = 0;
+ u8 chksum;
+ struct dos_dentry_t *dos_ep = (struct dos_dentry_t *)p_entry;
+ struct ext_dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ chksum = calc_checksum_1byte((void *)dos_ep->name, DOS_NAME_LENGTH, 0);
+
+ for (entry--; entry >= 0; entry--) {
+ ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
+ entry, NULL);
+ if (!ext_ep)
+ return -1;
+
+ if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) ==
+ TYPE_EXTEND) && (ext_ep->checksum == chksum)) {
+ count++;
+ if (ext_ep->order > 0x40)
+ return count;
+ } else {
+ return count;
+ }
+ }
+
+ return count;
+}
+
+s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, struct dentry_t *p_entry)
+{
+ int i, count = 0;
+ u32 type;
+ struct file_dentry_t *file_ep = (struct file_dentry_t *)p_entry;
+ struct dentry_t *ext_ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) {
+ ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL);
+ if (!ext_ep)
+ return -1;
+
+ type = p_fs->fs_func->get_entry_type(ext_ep);
+ if ((type == TYPE_EXTEND) || (type == TYPE_STREAM))
+ count++;
+ else
+ return count;
+ }
+
+ return count;
+}
+
+s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
+ u32 type)
+{
+ int i, count = 0;
+ s32 dentries_per_clu;
+ u32 entry_type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ return -1;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if (entry_type == TYPE_UNUSED)
+ return count;
+ if (!(type & TYPE_CRITICAL_PRI) &&
+ !(type & TYPE_BENIGN_PRI))
+ continue;
+
+ if ((type == TYPE_ALL) || (type == entry_type))
+ count++;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return -1;
+ }
+ }
+
+ return count;
+}
+
+bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
+{
+ int i, count = 0;
+ s32 dentries_per_clu;
+ u32 type;
+ struct chain_t clu;
+ struct dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.size = p_dir->size;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = get_entry_in_dir(sb, &clu, i, NULL);
+ if (!ep)
+ break;
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ return true;
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ return false;
+
+ if (p_fs->vol_type == EXFAT)
+ return false;
+ if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2))
+ return false;
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ }
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * Name Conversion Functions
+ */
+
+/* input : dir, uni_name
+ * output : num_of_entry, dos_name(format : aaaaaa~1.bbb)
+ */
+s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, s32 *entries,
+ struct dos_name_t *p_dosname)
+{
+ s32 ret, num_entries;
+ bool lossy = false;
+ char **r;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
+ if (num_entries == 0)
+ return FFS_INVALIDPATH;
+
+ if (p_fs->vol_type != EXFAT) {
+ nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy);
+
+ if (lossy) {
+ ret = fat_generate_dos_name(sb, p_dir, p_dosname);
+ if (ret)
+ return ret;
+ } else {
+ for (r = reserved_names; *r; r++) {
+ if (!strncmp((void *)p_dosname->name, *r, 8))
+ return FFS_INVALIDPATH;
+ }
+
+ if (p_dosname->name_case != 0xFF)
+ num_entries = 1;
+ }
+
+ if (num_entries > 1)
+ p_dosname->name_case = 0x0;
+ }
+
+ *entries = num_entries;
+
+ return FFS_SUCCESS;
+}
+
+void get_uni_name_from_dos_entry(struct super_block *sb,
+ struct dos_dentry_t *ep,
+ struct uni_name_t *p_uniname, u8 mode)
+{
+ struct dos_name_t dos_name;
+
+ if (mode == 0x0)
+ dos_name.name_case = 0x0;
+ else
+ dos_name.name_case = ep->lcase;
+
+ memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
+ nls_dosname_to_uniname(sb, p_uniname, &dos_name);
+}
+
+void fat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname)
+{
+ int i;
+ struct ext_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ for (entry--, i = 1; entry >= 0; entry--, i++) {
+ ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
+ NULL);
+ if (!ep)
+ return;
+
+ if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) ==
+ TYPE_EXTEND) {
+ extract_uni_name_from_ext_entry(ep, uniname, i);
+ if (ep->order > 0x40)
+ return;
+ } else {
+ return;
+ }
+
+ uniname += 13;
+ }
+}
+
+void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+ struct chain_t *p_dir, s32 entry,
+ u16 *uniname)
+{
+ int i;
+ struct dentry_t *ep;
+ struct entry_set_cache_t *es;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ if (!es || es->num_entries < 3) {
+ if (es)
+ release_entry_set(es);
+ return;
+ }
+
+ ep += 2;
+
+ /*
+ * First entry : file entry
+ * Second entry : stream-extension entry
+ * Third entry : first file-name entry
+ * So, the index of first file-name dentry should start from 2.
+ */
+ for (i = 2; i < es->num_entries; i++, ep++) {
+ if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND)
+ extract_uni_name_from_name_entry((struct name_dentry_t *)
+ ep, uniname, i);
+ else
+ goto out;
+ uniname += 15;
+ }
+
+out:
+ release_entry_set(es);
+}
+
+s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname,
+ s32 order)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 10; i += 2) {
+ *uniname = GET16(ep->unicode_0_4 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ if (order < 20) {
+ for (i = 0; i < 12; i += 2) {
+ *uniname = GET16_A(ep->unicode_5_10 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ } else {
+ for (i = 0; i < 8; i += 2) {
+ *uniname = GET16_A(ep->unicode_5_10 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+ *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */
+ return len;
+ }
+
+ for (i = 0; i < 4; i += 2) {
+ *uniname = GET16_A(ep->unicode_11_12 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+}
+
+s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
+ s32 order)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 30; i += 2) {
+ *uniname = GET16_A(ep->unicode_0_14 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
+ }
+
+ *uniname = 0x0;
+ return len;
+}
+
+s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
+ struct dos_name_t *p_dosname)
+{
+ int i, j, count = 0;
+ bool count_begin = false;
+ s32 dentries_per_clu;
+ u32 type;
+ u8 bmap[128/* 1 ~ 1023 */];
+ struct chain_t clu;
+ struct dos_dentry_t *ep;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ memset(bmap, 0, sizeof(bmap));
+ exfat_bitmap_set(bmap, 0);
+
+ if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+ else
+ dentries_per_clu = p_fs->dentries_per_clu;
+
+ clu.dir = p_dir->dir;
+ clu.flags = p_dir->flags;
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ for (i = 0; i < dentries_per_clu; i++) {
+ ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu,
+ i, NULL);
+ if (!ep)
+ return FFS_MEDIAERR;
+
+ type = p_fs->fs_func->get_entry_type((struct dentry_t *)
+ ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ count = 0;
+ count_begin = false;
+
+ for (j = 0; j < 8; j++) {
+ if (ep->name[j] == ' ')
+ break;
+
+ if (ep->name[j] == '~') {
+ count_begin = true;
+ } else if (count_begin) {
+ if ((ep->name[j] >= '0') &&
+ (ep->name[j] <= '9')) {
+ count = count * 10 +
+ (ep->name[j] - '0');
+ } else {
+ count = 0;
+ count_begin = false;
+ }
+ }
+ }
+
+ if ((count > 0) && (count < 1024))
+ exfat_bitmap_set(bmap, count);
+ }
+
+ if (p_dir->dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ return FFS_MEDIAERR;
+ }
+
+ count = 0;
+ for (i = 0; i < 128; i++) {
+ if (bmap[i] != 0xFF) {
+ for (j = 0; j < 8; j++) {
+ if (exfat_bitmap_test(&bmap[i], j) == 0) {
+ count = (i << 3) + j;
+ break;
+ }
+ }
+ if (count != 0)
+ break;
+ }
+ }
+
+ if ((count == 0) || (count >= 1024))
+ return FFS_FILEEXIST;
+ fat_attach_count_to_dos_name(p_dosname->name, count);
+
+ /* Now dos_name has DOS~????.EXT */
+ return FFS_SUCCESS;
+}
+
+void fat_attach_count_to_dos_name(u8 *dosname, s32 count)
+{
+ int i, j, length;
+ char str_count[6];
+
+ snprintf(str_count, sizeof(str_count), "~%d", count);
+ length = strlen(str_count);
+
+ i = 0;
+ j = 0;
+ while (j <= (8 - length)) {
+ i = j;
+ if (dosname[j] == ' ')
+ break;
+ if (dosname[j] & 0x80)
+ j += 2;
+ else
+ j++;
+ }
+
+ for (j = 0; j < length; i++, j++)
+ dosname[i] = (u8)str_count[j];
+
+ if (i == 7)
+ dosname[7] = ' ';
+}
+
+s32 fat_calc_num_entries(struct uni_name_t *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 dos name entry + extended entries */
+ return (len - 1) / 13 + 2;
+}
+
+s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+{
+ s32 len;
+
+ len = p_uniname->name_len;
+ if (len == 0)
+ return 0;
+
+ /* 1 file entry + 1 stream entry + name entries */
+ return (len - 1) / 15 + 3;
+}
+
+u8 calc_checksum_1byte(void *data, s32 len, u8 chksum)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
+
+ return chksum;
+}
+
+u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ switch (type) {
+ case CS_DIR_ENTRY:
+ for (i = 0; i < len; i++, c++) {
+ if ((i == 2) || (i == 3))
+ continue;
+ chksum = (((chksum & 1) << 15) |
+ ((chksum & 0xFFFE) >> 1)) + (u16)*c;
+ }
+ break;
+ default
+ :
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 15) |
+ ((chksum & 0xFFFE) >> 1)) + (u16)*c;
+ }
+
+ return chksum;
+}
+
+u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ switch (type) {
+ case CS_PBR_SECTOR:
+ for (i = 0; i < len; i++, c++) {
+ if ((i == 106) || (i == 107) || (i == 112))
+ continue;
+ chksum = (((chksum & 1) << 31) |
+ ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
+ }
+ break;
+ default
+ :
+ for (i = 0; i < len; i++, c++)
+ chksum = (((chksum & 1) << 31) |
+ ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
+ }
+
+ return chksum;
+}
+
+/*
+ * Name Resolution Functions
+ */
+
+/* return values of resolve_path()
+ * > 0 : return the length of the path
+ * < 0 : return error
+ */
+s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname)
+{
+ bool lossy = false;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ if (strscpy(name_buf, path, sizeof(name_buf)) < 0)
+ return FFS_INVALIDPATH;
+
+ nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
+ if (lossy)
+ return FFS_INVALIDPATH;
+
+ fid->size = i_size_read(inode);
+
+ p_dir->dir = fid->start_clu;
+ p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
+ p_dir->flags = fid->flags;
+
+ return FFS_SUCCESS;
+}
+
+/*
+ * File Operation Functions
+ */
+static struct fs_func fat_fs_func = {
+ .alloc_cluster = fat_alloc_cluster,
+ .free_cluster = fat_free_cluster,
+ .count_used_clusters = fat_count_used_clusters,
+
+ .init_dir_entry = fat_init_dir_entry,
+ .init_ext_entry = fat_init_ext_entry,
+ .find_dir_entry = fat_find_dir_entry,
+ .delete_dir_entry = fat_delete_dir_entry,
+ .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry,
+ .count_ext_entries = fat_count_ext_entries,
+ .calc_num_entries = fat_calc_num_entries,
+
+ .get_entry_type = fat_get_entry_type,
+ .set_entry_type = fat_set_entry_type,
+ .get_entry_attr = fat_get_entry_attr,
+ .set_entry_attr = fat_set_entry_attr,
+ .get_entry_flag = fat_get_entry_flag,
+ .set_entry_flag = fat_set_entry_flag,
+ .get_entry_clu0 = fat_get_entry_clu0,
+ .set_entry_clu0 = fat_set_entry_clu0,
+ .get_entry_size = fat_get_entry_size,
+ .set_entry_size = fat_set_entry_size,
+ .get_entry_time = fat_get_entry_time,
+ .set_entry_time = fat_set_entry_time,
+};
+
+s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ s32 num_reserved, num_root_sectors;
+ struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
+ num_root_sectors = ((num_root_sectors - 1) >>
+ p_bd->sector_size_bits) + 1;
+
+ p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
+ p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->FAT2_start_sector +
+ p_fs->num_FAT_sectors;
+ p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors;
+
+ p_fs->num_sectors = GET16(p_bpb->num_sectors);
+ if (p_fs->num_sectors == 0)
+ p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
+
+ num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
+ p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
+ p_fs->sectors_per_clu_bits) + 2;
+ /* because the cluster index starts with 2 */
+
+ if (p_fs->num_clusters < FAT12_THRESHOLD)
+ p_fs->vol_type = FAT12;
+ else
+ p_fs->vol_type = FAT16;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = 0;
+ p_fs->dentries_in_root = GET16(p_bpb->num_root_entries);
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = VOL_CLEAN;
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = UINT_MAX;
+
+ p_fs->fs_func = &fat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ s32 num_reserved;
+ struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
+ p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->FAT2_start_sector +
+ p_fs->num_FAT_sectors;
+ p_fs->data_start_sector = p_fs->root_start_sector;
+
+ p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
+ num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
+
+ p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
+ p_fs->sectors_per_clu_bits) + 2;
+ /* because the cluster index starts with 2 */
+
+ p_fs->vol_type = FAT32;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = GET32(p_bpb->root_cluster);
+ p_fs->dentries_in_root = 0;
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = VOL_CLEAN;
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = UINT_MAX;
+
+ p_fs->fs_func = &fat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+static struct fs_func exfat_fs_func = {
+ .alloc_cluster = exfat_alloc_cluster,
+ .free_cluster = exfat_free_cluster,
+ .count_used_clusters = exfat_count_used_clusters,
+
+ .init_dir_entry = exfat_init_dir_entry,
+ .init_ext_entry = exfat_init_ext_entry,
+ .find_dir_entry = exfat_find_dir_entry,
+ .delete_dir_entry = exfat_delete_dir_entry,
+ .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry,
+ .count_ext_entries = exfat_count_ext_entries,
+ .calc_num_entries = exfat_calc_num_entries,
+
+ .get_entry_type = exfat_get_entry_type,
+ .set_entry_type = exfat_set_entry_type,
+ .get_entry_attr = exfat_get_entry_attr,
+ .set_entry_attr = exfat_set_entry_attr,
+ .get_entry_flag = exfat_get_entry_flag,
+ .set_entry_flag = exfat_set_entry_flag,
+ .get_entry_clu0 = exfat_get_entry_clu0,
+ .set_entry_clu0 = exfat_set_entry_clu0,
+ .get_entry_size = exfat_get_entry_size,
+ .set_entry_size = exfat_set_entry_size,
+ .get_entry_time = exfat_get_entry_time,
+ .set_entry_time = exfat_set_entry_time,
+};
+
+s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
+{
+ struct bpbex_t *p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ if (p_bpb->num_fats == 0)
+ return FFS_FORMATERR;
+
+ p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
+ p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
+ p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
+ p_bd->sector_size_bits;
+ p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
+
+ p_fs->num_FAT_sectors = GET32(p_bpb->fat_length);
+
+ p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset);
+ if (p_bpb->num_fats == 1)
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
+ else
+ p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
+ p_fs->num_FAT_sectors;
+
+ p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset);
+ p_fs->data_start_sector = p_fs->root_start_sector;
+
+ p_fs->num_sectors = GET64(p_bpb->vol_length);
+ p_fs->num_clusters = GET32(p_bpb->clu_count) + 2;
+ /* because the cluster index starts with 2 */
+
+ p_fs->vol_type = EXFAT;
+ p_fs->vol_id = GET32(p_bpb->vol_serial);
+
+ p_fs->root_dir = GET32(p_bpb->root_cluster);
+ p_fs->dentries_in_root = 0;
+ p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
+ DENTRY_SIZE_BITS);
+
+ p_fs->vol_flag = (u32)GET16(p_bpb->vol_flags);
+ p_fs->clu_srch_ptr = 2;
+ p_fs->used_clusters = UINT_MAX;
+
+ p_fs->fs_func = &exfat_fs_func;
+
+ return FFS_SUCCESS;
+}
+
+s32 create_dir(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, struct file_id_t *fid)
+{
+ s32 ret, dentry, num_entries;
+ u64 size;
+ struct chain_t clu;
+ struct dos_name_t dos_name, dot_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
+ &dos_name);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return FFS_FULL;
+
+ clu.dir = CLUSTER_32(~0);
+ clu.size = 0;
+ clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+ /* (1) allocate a cluster */
+ ret = fs_func->alloc_cluster(sb, 1, &clu);
+ if (ret < 0)
+ return FFS_MEDIAERR;
+ else if (ret == 0)
+ return FFS_FULL;
+
+ ret = clear_cluster(sb, clu.dir);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ if (p_fs->vol_type == EXFAT) {
+ size = p_fs->cluster_size;
+ } else {
+ size = 0;
+
+ /* initialize the . and .. entry
+ * Information for . points to itself
+ * Information for .. points to parent dir
+ */
+
+ dot_name.name_case = 0x0;
+ memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
+
+ ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir,
+ 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
+
+ if (p_dir->dir == p_fs->root_dir)
+ ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
+ CLUSTER_32(0), 0);
+ else
+ ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
+ p_dir->dir, 0);
+
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL,
+ &dot_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+ }
+
+ /* (2) update the directory entry */
+ /* make sub-dir entry in parent directory */
+ ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
+ size);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = size;
+ fid->start_clu = clu.dir;
+
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ return FFS_SUCCESS;
+}
+
+s32 create_file(struct inode *inode, struct chain_t *p_dir,
+ struct uni_name_t *p_uniname, u8 mode, struct file_id_t *fid)
+{
+ s32 ret, dentry, num_entries;
+ struct dos_name_t dos_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries,
+ &dos_name);
+ if (ret)
+ return ret;
+
+ /* find_empty_entry must be called before alloc_cluster() */
+ dentry = find_empty_entry(inode, p_dir, num_entries);
+ if (dentry < 0)
+ return FFS_FULL;
+
+ /* (1) update the directory entry */
+ /* fill the dos name directory entry information of the created file.
+ * the first cluster is not determined yet. (0)
+ */
+ ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
+ CLUSTER_32(0), 0);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fid->dir.dir = p_dir->dir;
+ fid->dir.size = p_dir->size;
+ fid->dir.flags = p_dir->flags;
+ fid->entry = dentry;
+
+ fid->attr = ATTR_ARCHIVE | mode;
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+
+ fid->type = TYPE_FILE;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ return FFS_SUCCESS;
+}
+
+void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
+{
+ s32 num_entries;
+ sector_t sector;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ ep = get_entry_in_dir(sb, p_dir, entry, &sector);
+ if (!ep)
+ return;
+
+ buf_lock(sb, sector);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
+ if (num_entries < 0) {
+ buf_unlock(sb, sector);
+ return;
+ }
+ num_entries++;
+
+ buf_unlock(sb, sector);
+
+ /* (1) update the directory entry */
+ fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
+}
+
+s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
+ struct uni_name_t *p_uniname, struct file_id_t *fid)
+{
+ s32 ret, newentry = -1, num_old_entries, num_new_entries;
+ sector_t sector_old, sector_new;
+ struct dos_name_t dos_name;
+ struct dentry_t *epold, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
+ if (!epold)
+ return FFS_MEDIAERR;
+
+ buf_lock(sb, sector_old);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
+ epold);
+ if (num_old_entries < 0) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
+ &num_new_entries, &dos_name);
+ if (ret) {
+ buf_unlock(sb, sector_old);
+ return ret;
+ }
+
+ if (num_old_entries < num_new_entries) {
+ newentry = find_empty_entry(inode, p_dir, num_new_entries);
+ if (newentry < 0) {
+ buf_unlock(sb, sector_old);
+ return FFS_FULL;
+ }
+
+ epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
+ if (!epnew) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fs_func->set_entry_attr(epnew,
+ fs_func->get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_old);
+
+ if (p_fs->vol_type == EXFAT) {
+ epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
+ &sector_old);
+ buf_lock(sb, sector_old);
+ epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
+ &sector_new);
+
+ if (!epold || !epnew) {
+ buf_unlock(sb, sector_old);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_old);
+ }
+
+ ret = fs_func->init_ext_entry(sb, p_dir, newentry,
+ num_new_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
+ num_old_entries);
+ fid->entry = newentry;
+ } else {
+ if (fs_func->get_entry_type(epold) == TYPE_FILE) {
+ fs_func->set_entry_attr(epold,
+ fs_func->get_entry_attr(epold) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_old);
+ buf_unlock(sb, sector_old);
+
+ ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
+ num_new_entries, p_uniname,
+ &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
+ num_old_entries);
+ }
+
+ return FFS_SUCCESS;
+}
+
+s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
+ struct chain_t *p_newdir, struct uni_name_t *p_uniname,
+ struct file_id_t *fid)
+{
+ s32 ret, newentry, num_new_entries, num_old_entries;
+ sector_t sector_mov, sector_new;
+ struct chain_t clu;
+ struct dos_name_t dos_name;
+ struct dentry_t *epmov, *epnew;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
+ if (!epmov)
+ return FFS_MEDIAERR;
+
+ /* check if the source and target directory is the same */
+ if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
+ fs_func->get_entry_clu0(epmov) == p_newdir->dir)
+ return FFS_INVALIDPATH;
+
+ buf_lock(sb, sector_mov);
+
+ /* buf_lock() before call count_ext_entries() */
+ num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
+ epmov);
+ if (num_old_entries < 0) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+ num_old_entries++;
+
+ ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
+ &num_new_entries, &dos_name);
+ if (ret) {
+ buf_unlock(sb, sector_mov);
+ return ret;
+ }
+
+ newentry = find_empty_entry(inode, p_newdir, num_new_entries);
+ if (newentry < 0) {
+ buf_unlock(sb, sector_mov);
+ return FFS_FULL;
+ }
+
+ epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
+ if (!epnew) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ if (fs_func->get_entry_type(epnew) == TYPE_FILE) {
+ fs_func->set_entry_attr(epnew, fs_func->get_entry_attr(epnew) |
+ ATTR_ARCHIVE);
+ fid->attr |= ATTR_ARCHIVE;
+ }
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_mov);
+
+ if (p_fs->vol_type == EXFAT) {
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
+ &sector_mov);
+ buf_lock(sb, sector_mov);
+ epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
+ &sector_new);
+ if (!epmov || !epnew) {
+ buf_unlock(sb, sector_mov);
+ return FFS_MEDIAERR;
+ }
+
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ buf_modify(sb, sector_new);
+ buf_unlock(sb, sector_mov);
+ } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) {
+ /* change ".." pointer to new parent dir */
+ clu.dir = fs_func->get_entry_clu0(epnew);
+ clu.flags = 0x01;
+
+ epnew = get_entry_in_dir(sb, &clu, 1, &sector_new);
+ if (!epnew)
+ return FFS_MEDIAERR;
+
+ if (p_newdir->dir == p_fs->root_dir)
+ fs_func->set_entry_clu0(epnew, CLUSTER_32(0));
+ else
+ fs_func->set_entry_clu0(epnew, p_newdir->dir);
+ buf_modify(sb, sector_new);
+ }
+
+ ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
+ p_uniname, &dos_name);
+ if (ret != FFS_SUCCESS)
+ return ret;
+
+ fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
+
+ fid->dir.dir = p_newdir->dir;
+ fid->dir.size = p_newdir->size;
+ fid->dir.flags = p_newdir->flags;
+
+ fid->entry = newentry;
+
+ return FFS_SUCCESS;
+}
+
+/*
+ * Sector Read/Write Functions
+ */
+
+int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
+ bool read)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
+ __func__, (unsigned long long)sec);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_read(sb, sec, bh, 1, read);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
+ bool sync)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu)\n",
+ __func__, (unsigned long long)sec);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!bh) {
+ pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_write(sb, sec, bh, 1, sync);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int multi_sector_read(struct super_block *sb, sector_t sec,
+ struct buffer_head **bh, s32 num_secs, bool read)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
+ __func__, (unsigned long long)sec, num_secs);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_read(sb, sec, bh, num_secs, read);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
+
+int multi_sector_write(struct super_block *sb, sector_t sec,
+ struct buffer_head *bh, s32 num_secs, bool sync)
+{
+ s32 ret = FFS_MEDIAERR;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
+ (p_fs->num_sectors > 0)) {
+ pr_err("[EXFAT] %s: out of range error! (sec = %llu, num_secs = %d)\n",
+ __func__, (unsigned long long)sec, num_secs);
+ fs_error(sb);
+ return ret;
+ }
+ if (!bh) {
+ pr_err("[EXFAT] %s: bh is NULL!\n", __func__);
+ fs_error(sb);
+ return ret;
+ }
+
+ if (!p_fs->dev_ejected) {
+ ret = bdev_write(sb, sec, bh, num_secs, sync);
+ if (ret != FFS_SUCCESS)
+ p_fs->dev_ejected = 1;
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
new file mode 100644
index 000000000000..03cb8290b5d2
--- /dev/null
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/string.h>
+#include <linux/nls.h>
+#include "exfat.h"
+
+static u16 bad_dos_chars[] = {
+ /* + , ; = [ ] */
+ 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
+ 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
+ 0
+};
+
+static u16 bad_uni_chars[] = {
+ /* " * / : < > ? \ | */
+ 0x0022, 0x002A, 0x002F, 0x003A,
+ 0x003C, 0x003E, 0x003F, 0x005C, 0x007C,
+ 0
+};
+
+static int convert_ch_to_uni(struct nls_table *nls, u16 *uni, u8 *ch,
+ bool *lossy)
+{
+ int len;
+
+ *uni = 0x0;
+
+ if (ch[0] < 0x80) {
+ *uni = (u16)ch[0];
+ return 1;
+ }
+
+ len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni);
+ if (len < 0) {
+ /* conversion failed */
+ pr_info("%s: fail to use nls\n", __func__);
+ if (lossy)
+ *lossy = true;
+ *uni = (u16)'_';
+ if (!strcmp(nls->charset, "utf8"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return len;
+}
+
+static int convert_uni_to_ch(struct nls_table *nls, u8 *ch, u16 uni,
+ bool *lossy)
+{
+ int len;
+
+ ch[0] = 0x0;
+
+ if (uni < 0x0080) {
+ ch[0] = (u8)uni;
+ return 1;
+ }
+
+ len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE);
+ if (len < 0) {
+ /* conversion failed */
+ pr_info("%s: fail to use nls\n", __func__);
+ if (lossy)
+ *lossy = true;
+ ch[0] = '_';
+ return 1;
+ }
+
+ return len;
+}
+
+u16 nls_upper(struct super_block *sb, u16 a)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ if (EXFAT_SB(sb)->options.casesensitive)
+ return a;
+ if (p_fs->vol_utbl && p_fs->vol_utbl[get_col_index(a)])
+ return p_fs->vol_utbl[get_col_index(a)][get_row_index(a)];
+ else
+ return a;
+}
+
+static u16 *nls_wstrchr(u16 *str, u16 wchar)
+{
+ while (*str) {
+ if (*(str++) == wchar)
+ return str;
+ }
+
+ return NULL;
+}
+
+int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b)
+{
+ return strncmp(a, b, DOS_NAME_LENGTH);
+}
+
+int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
+{
+ int i;
+
+ for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) {
+ if (nls_upper(sb, *a) != nls_upper(sb, *b))
+ return 1;
+ if (*a == 0x0)
+ return 0;
+ }
+ return 0;
+}
+
+void nls_uniname_to_dosname(struct super_block *sb,
+ struct dos_name_t *p_dosname,
+ struct uni_name_t *p_uniname, bool *p_lossy)
+{
+ int i, j, len;
+ bool lossy = false;
+ u8 buf[MAX_CHARSET_SIZE];
+ u8 lower = 0, upper = 0;
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ u16 *p, *last_period;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
+
+ for (i = 0; i < DOS_NAME_LENGTH; i++)
+ *(dosname + i) = ' ';
+
+ if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) {
+ *(dosname) = '.';
+ p_dosname->name_case = 0x0;
+ if (p_lossy)
+ *p_lossy = false;
+ return;
+ }
+
+ if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) {
+ *(dosname) = '.';
+ *(dosname + 1) = '.';
+ p_dosname->name_case = 0x0;
+ if (p_lossy)
+ *p_lossy = false;
+ return;
+ }
+
+ /* search for the last embedded period */
+ last_period = NULL;
+ for (p = uniname; *p; p++) {
+ if (*p == (u16)'.')
+ last_period = p;
+ }
+
+ i = 0;
+ while (i < DOS_NAME_LENGTH) {
+ if (i == 8) {
+ if (!last_period)
+ break;
+
+ if (uniname <= last_period) {
+ if (uniname < last_period)
+ lossy = true;
+ uniname = last_period + 1;
+ }
+ }
+
+ if (*uniname == (u16)'\0') {
+ break;
+ } else if (*uniname == (u16)' ') {
+ lossy = true;
+ } else if (*uniname == (u16)'.') {
+ if (uniname < last_period)
+ lossy = true;
+ else
+ i = 8;
+ } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
+ lossy = true;
+ *(dosname + i) = '_';
+ i++;
+ } else {
+ len = convert_uni_to_ch(nls, buf, *uniname, &lossy);
+
+ if (len > 1) {
+ if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH))
+ break;
+
+ if ((i < 8) && ((i + len) > 8)) {
+ i = 8;
+ continue;
+ }
+
+ lower = 0xFF;
+
+ for (j = 0; j < len; j++, i++)
+ *(dosname + i) = *(buf + j);
+ } else { /* len == 1 */
+ if ((*buf >= 'a') && (*buf <= 'z')) {
+ *(dosname + i) = *buf - ('a' - 'A');
+
+ if (i < 8)
+ lower |= 0x08;
+ else
+ lower |= 0x10;
+ } else if ((*buf >= 'A') && (*buf <= 'Z')) {
+ *(dosname + i) = *buf;
+
+ if (i < 8)
+ upper |= 0x08;
+ else
+ upper |= 0x10;
+ } else {
+ *(dosname + i) = *buf;
+ }
+ i++;
+ }
+ }
+
+ uniname++;
+ }
+
+ if (*dosname == 0xE5)
+ *dosname = 0x05;
+
+ if (*uniname != 0x0)
+ lossy = true;
+
+ if (upper & lower)
+ p_dosname->name_case = 0xFF;
+ else
+ p_dosname->name_case = lower;
+
+ if (p_lossy)
+ *p_lossy = lossy;
+}
+
+void nls_dosname_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname,
+ struct dos_name_t *p_dosname)
+{
+ int i = 0, j, n = 0;
+ u8 buf[DOS_NAME_LENGTH + 2];
+ u8 *dosname = p_dosname->name;
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
+
+ if (*dosname == 0x05) {
+ *buf = 0xE5;
+ i++;
+ n++;
+ }
+
+ for (; i < 8; i++, n++) {
+ if (*(dosname + i) == ' ')
+ break;
+
+ if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
+ (p_dosname->name_case & 0x08))
+ *(buf + n) = *(dosname + i) + ('a' - 'A');
+ else
+ *(buf + n) = *(dosname + i);
+ }
+ if (*(dosname + 8) != ' ') {
+ *(buf + n) = '.';
+ n++;
+ }
+
+ for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
+ if (*(dosname + i) == ' ')
+ break;
+
+ if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
+ (p_dosname->name_case & 0x10))
+ *(buf + n) = *(dosname + i) + ('a' - 'A');
+ else
+ *(buf + n) = *(dosname + i);
+ }
+ *(buf + n) = '\0';
+
+ i = 0;
+ j = 0;
+ while (j < (MAX_NAME_LENGTH - 1)) {
+ if (*(buf + i) == '\0')
+ break;
+
+ i += convert_ch_to_uni(nls, uniname, (buf + i), NULL);
+
+ uniname++;
+ j++;
+ }
+
+ *uniname = (u16)'\0';
+}
+
+void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
+ struct uni_name_t *p_uniname)
+{
+ int i, j, len;
+ u8 buf[MAX_CHARSET_SIZE];
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ if (!nls) {
+ len = utf16s_to_utf8s(uniname, MAX_NAME_LENGTH,
+ UTF16_HOST_ENDIAN, p_cstring,
+ MAX_NAME_LENGTH);
+ p_cstring[len] = 0;
+ return;
+ }
+
+ i = 0;
+ while (i < (MAX_NAME_LENGTH - 1)) {
+ if (*uniname == (u16)'\0')
+ break;
+
+ len = convert_uni_to_ch(nls, buf, *uniname, NULL);
+
+ if (len > 1) {
+ for (j = 0; j < len; j++)
+ *p_cstring++ = (char)*(buf + j);
+ } else { /* len == 1 */
+ *p_cstring++ = (char)*buf;
+ }
+
+ uniname++;
+ i++;
+ }
+
+ *p_cstring = '\0';
+}
+
+void nls_cstring_to_uniname(struct super_block *sb,
+ struct uni_name_t *p_uniname, u8 *p_cstring,
+ bool *p_lossy)
+{
+ int i, j;
+ bool lossy = false;
+ u8 *end_of_name;
+ u8 upname[MAX_NAME_LENGTH * 2];
+ u16 *uniname = p_uniname->name;
+ struct nls_table *nls = EXFAT_SB(sb)->nls_io;
+
+ /* strip all trailing spaces */
+ end_of_name = p_cstring + strlen(p_cstring);
+
+ while (*(--end_of_name) == ' ') {
+ if (end_of_name < p_cstring)
+ break;
+ }
+ *(++end_of_name) = '\0';
+
+ if (strcmp(p_cstring, ".") && strcmp(p_cstring, "..")) {
+ /* strip all trailing periods */
+ while (*(--end_of_name) == '.') {
+ if (end_of_name < p_cstring)
+ break;
+ }
+ *(++end_of_name) = '\0';
+ }
+
+ if (*p_cstring == '\0')
+ lossy = true;
+
+ if (!nls) {
+ i = utf8s_to_utf16s(p_cstring, MAX_NAME_LENGTH,
+ UTF16_HOST_ENDIAN, uniname,
+ MAX_NAME_LENGTH);
+ for (j = 0; j < i; j++)
+ SET16_A(upname + j * 2, nls_upper(sb, uniname[j]));
+ uniname[i] = '\0';
+ } else {
+ i = 0;
+ j = 0;
+ while (j < (MAX_NAME_LENGTH - 1)) {
+ if (*(p_cstring + i) == '\0')
+ break;
+
+ i += convert_ch_to_uni(nls, uniname,
+ (u8 *)(p_cstring + i), &lossy);
+
+ if ((*uniname < 0x0020) ||
+ nls_wstrchr(bad_uni_chars, *uniname))
+ lossy = true;
+
+ SET16_A(upname + j * 2, nls_upper(sb, *uniname));
+
+ uniname++;
+ j++;
+ }
+
+ if (*(p_cstring + i) != '\0')
+ lossy = true;
+ *uniname = (u16)'\0';
+ }
+
+ p_uniname->name_len = j;
+ p_uniname->name_hash = calc_checksum_2byte(upname, j << 1, 0,
+ CS_DEFAULT);
+
+ if (p_lossy)
+ *p_lossy = lossy;
+}
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
new file mode 100644
index 000000000000..5f6caee819a6
--- /dev/null
+++ b/drivers/staging/exfat/exfat_super.c
@@ -0,0 +1,4049 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
+#include <linux/mount.h>
+#include <linux/vfs.h>
+#include <linux/aio.h>
+#include <linux/iversion.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/writeback.h>
+#include <linux/log2.h>
+#include <linux/hash.h>
+#include <linux/backing-dev.h>
+#include <linux/sched.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+
+#include <linux/string.h>
+#include <linux/nls.h>
+#include <linux/mutex.h>
+#include <linux/swap.h>
+
+#define EXFAT_VERSION "1.3.0"
+
+#include "exfat.h"
+
+static struct kmem_cache *exfat_inode_cachep;
+
+static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE;
+static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET;
+
+#define INC_IVERSION(x) (inode_inc_iversion(x))
+#define GET_IVERSION(x) (inode_peek_iversion_raw(x))
+#define SET_IVERSION(x, y) (inode_set_iversion(x, y))
+
+static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos);
+static int exfat_sync_inode(struct inode *inode);
+static struct inode *exfat_build_inode(struct super_block *sb,
+ struct file_id_t *fid, loff_t i_pos);
+static int exfat_write_inode(struct inode *inode,
+ struct writeback_control *wbc);
+static void exfat_write_super(struct super_block *sb);
+
+#define UNIX_SECS_1980 315532800L
+#define UNIX_SECS_2108 4354819200L
+
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp)
+{
+ ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day,
+ tp->Hour, tp->Minute, tp->Second);
+
+ ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC;
+}
+
+/* Convert linear UNIX date to a FAT time/date pair. */
+static void exfat_time_unix2fat(struct timespec64 *ts, struct date_time_t *tp)
+{
+ time64_t second = ts->tv_sec;
+ struct tm tm;
+
+ time64_to_tm(second, 0, &tm);
+
+ if (second < UNIX_SECS_1980) {
+ tp->MilliSecond = 0;
+ tp->Second = 0;
+ tp->Minute = 0;
+ tp->Hour = 0;
+ tp->Day = 1;
+ tp->Month = 1;
+ tp->Year = 0;
+ return;
+ }
+
+ if (second >= UNIX_SECS_2108) {
+ tp->MilliSecond = 999;
+ tp->Second = 59;
+ tp->Minute = 59;
+ tp->Hour = 23;
+ tp->Day = 31;
+ tp->Month = 12;
+ tp->Year = 127;
+ return;
+ }
+
+ tp->MilliSecond = ts->tv_nsec / NSEC_PER_MSEC;
+ tp->Second = tm.tm_sec;
+ tp->Minute = tm.tm_min;
+ tp->Hour = tm.tm_hour;
+ tp->Day = tm.tm_mday;
+ tp->Month = tm.tm_mon + 1;
+ tp->Year = tm.tm_year + 1900 - 1980;
+}
+
+struct timestamp_t *tm_current(struct timestamp_t *tp)
+{
+ time64_t second = ktime_get_real_seconds();
+ struct tm tm;
+
+ time64_to_tm(second, 0, &tm);
+
+ if (second < UNIX_SECS_1980) {
+ tp->sec = 0;
+ tp->min = 0;
+ tp->hour = 0;
+ tp->day = 1;
+ tp->mon = 1;
+ tp->year = 0;
+ return tp;
+ }
+
+ if (second >= UNIX_SECS_2108) {
+ tp->sec = 59;
+ tp->min = 59;
+ tp->hour = 23;
+ tp->day = 31;
+ tp->mon = 12;
+ tp->year = 127;
+ return tp;
+ }
+
+ tp->sec = tm.tm_sec;
+ tp->min = tm.tm_min;
+ tp->hour = tm.tm_hour;
+ tp->day = tm.tm_mday;
+ tp->mon = tm.tm_mon + 1;
+ tp->year = tm.tm_year + 1900 - 1980;
+
+ return tp;
+}
+
+static void __lock_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ mutex_lock(&sbi->s_lock);
+}
+
+static void __unlock_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ mutex_unlock(&sbi->s_lock);
+}
+
+static int __is_sb_dirty(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ return sbi->s_dirt;
+}
+
+static void __set_sb_clean(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ sbi->s_dirt = 0;
+}
+
+static int __exfat_revalidate(struct dentry *dentry)
+{
+ return 0;
+}
+
+static int exfat_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (dentry->d_inode)
+ return 1;
+ return __exfat_revalidate(dentry);
+}
+
+static int exfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (dentry->d_inode)
+ return 1;
+
+ if (!flags)
+ return 0;
+
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ return 0;
+
+ return __exfat_revalidate(dentry);
+}
+
+static unsigned int __exfat_striptail_len(unsigned int len, const char *name)
+{
+ while (len && name[len - 1] == '.')
+ len--;
+ return len;
+}
+
+static unsigned int exfat_striptail_len(const struct qstr *qstr)
+{
+ return __exfat_striptail_len(qstr->len, qstr->name);
+}
+
+static int exfat_d_hash(const struct dentry *dentry, struct qstr *qstr)
+{
+ qstr->hash = full_name_hash(dentry, qstr->name,
+ exfat_striptail_len(qstr));
+ return 0;
+}
+
+static int exfat_d_hashi(const struct dentry *dentry, struct qstr *qstr)
+{
+ struct super_block *sb = dentry->d_sb;
+ const unsigned char *name;
+ unsigned int len;
+ unsigned long hash;
+
+ name = qstr->name;
+ len = exfat_striptail_len(qstr);
+
+ hash = init_name_hash(dentry);
+ while (len--)
+ hash = partial_name_hash(nls_upper(sb, *name++), hash);
+ qstr->hash = end_name_hash(hash);
+
+ return 0;
+}
+
+static int exfat_cmpi(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ struct nls_table *t = EXFAT_SB(dentry->d_sb)->nls_io;
+ unsigned int alen, blen;
+
+ alen = exfat_striptail_len(name);
+ blen = __exfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (!t) {
+ if (strncasecmp(name->name, str, alen) == 0)
+ return 0;
+ } else {
+ if (nls_strnicmp(t, name->name, str, alen) == 0)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int exfat_cmp(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
+{
+ unsigned int alen, blen;
+
+ alen = exfat_striptail_len(name);
+ blen = __exfat_striptail_len(len, str);
+ if (alen == blen) {
+ if (strncmp(name->name, str, alen) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+static const struct dentry_operations exfat_ci_dentry_ops = {
+ .d_revalidate = exfat_revalidate_ci,
+ .d_hash = exfat_d_hashi,
+ .d_compare = exfat_cmpi,
+};
+
+static const struct dentry_operations exfat_dentry_ops = {
+ .d_revalidate = exfat_revalidate,
+ .d_hash = exfat_d_hash,
+ .d_compare = exfat_cmp,
+};
+
+static DEFINE_SEMAPHORE(z_sem);
+
+static inline void fs_sync(struct super_block *sb, bool do_sync)
+{
+ if (do_sync)
+ bdev_sync(sb);
+}
+
+/*
+ * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to
+ * save ATTR_RO instead of ->i_mode.
+ *
+ * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only
+ * bit, it's just used as flag for app.
+ */
+static inline int exfat_mode_can_hold_ro(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ if (S_ISDIR(inode->i_mode))
+ return 0;
+
+ if ((~sbi->options.fs_fmask) & 0222)
+ return 1;
+ return 0;
+}
+
+/* Convert attribute bits and a mask to the UNIX mode. */
+static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, u32 attr,
+ mode_t mode)
+{
+ if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR))
+ mode &= ~0222;
+
+ if (attr & ATTR_SUBDIR)
+ return (mode & ~sbi->options.fs_dmask) | S_IFDIR;
+ else if (attr & ATTR_SYMLINK)
+ return (mode & ~sbi->options.fs_dmask) | S_IFLNK;
+ else
+ return (mode & ~sbi->options.fs_fmask) | S_IFREG;
+}
+
+/* Return the FAT attribute byte for this inode */
+static inline u32 exfat_make_attr(struct inode *inode)
+{
+ if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222))
+ return (EXFAT_I(inode)->fid.attr) | ATTR_READONLY;
+ else
+ return EXFAT_I(inode)->fid.attr;
+}
+
+static inline void exfat_save_attr(struct inode *inode, u32 attr)
+{
+ if (exfat_mode_can_hold_ro(inode))
+ EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK;
+ else
+ EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY);
+}
+
+static int ffsMountVol(struct super_block *sb)
+{
+ int i, ret;
+ struct pbr_sector_t *p_pbr;
+ struct buffer_head *tmp_bh = NULL;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ pr_info("[EXFAT] trying to mount...\n");
+
+ down(&z_sem);
+
+ buf_init(sb);
+
+ sema_init(&p_fs->v_sem, 1);
+ p_fs->dev_ejected = 0;
+
+ /* open the block device */
+ bdev_open(sb);
+
+ if (p_bd->sector_size < sb->s_blocksize) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ if (p_bd->sector_size > sb->s_blocksize)
+ sb_set_blocksize(sb, p_bd->sector_size);
+
+ /* read Sector 0 */
+ if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ p_fs->PBR_sector = 0;
+
+ p_pbr = (struct pbr_sector_t *)tmp_bh->b_data;
+
+ /* check the validity of PBR */
+ if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
+ brelse(tmp_bh);
+ bdev_close(sb);
+ ret = FFS_FORMATERR;
+ goto out;
+ }
+
+ /* fill fs_struct */
+ for (i = 0; i < 53; i++)
+ if (p_pbr->bpb[i])
+ break;
+
+ if (i < 53) {
+#ifdef CONFIG_EXFAT_DONT_MOUNT_VFAT
+ ret = -EINVAL;
+ printk(KERN_INFO "EXFAT: Attempted to mount VFAT filesystem\n");
+ goto out;
+#else
+ if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */
+ ret = fat16_mount(sb, p_pbr);
+ else
+ ret = fat32_mount(sb, p_pbr);
+#endif
+ } else {
+ ret = exfat_mount(sb, p_pbr);
+ }
+
+ brelse(tmp_bh);
+
+ if (ret) {
+ bdev_close(sb);
+ goto out;
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ ret = load_alloc_bitmap(sb);
+ if (ret) {
+ bdev_close(sb);
+ goto out;
+ }
+ ret = load_upcase_table(sb);
+ if (ret) {
+ free_alloc_bitmap(sb);
+ bdev_close(sb);
+ goto out;
+ }
+ }
+
+ if (p_fs->dev_ejected) {
+ if (p_fs->vol_type == EXFAT) {
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ }
+ bdev_close(sb);
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ pr_info("[EXFAT] mounted successfully\n");
+
+out:
+ up(&z_sem);
+
+ return ret;
+}
+
+static int ffsUmountVol(struct super_block *sb)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int err = FFS_SUCCESS;
+
+ pr_info("[EXFAT] trying to unmount...\n");
+
+ down(&z_sem);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ if (p_fs->vol_type == EXFAT) {
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ }
+
+ FAT_release_all(sb);
+ buf_release_all(sb);
+
+ /* close the block device */
+ bdev_close(sb);
+
+ if (p_fs->dev_ejected) {
+ pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
+ err = FFS_MEDIAERR;
+ }
+
+ buf_shutdown(sb);
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+ up(&z_sem);
+
+ pr_info("[EXFAT] unmounted successfully\n");
+
+ return err;
+}
+
+static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
+{
+ int err = FFS_SUCCESS;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of pointer parameters */
+ if (!info)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (p_fs->used_clusters == UINT_MAX)
+ p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
+
+ info->FatType = p_fs->vol_type;
+ info->ClusterSize = p_fs->cluster_size;
+ info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */
+ info->UsedClusters = p_fs->used_clusters;
+ info->FreeClusters = info->NumClusters - info->UsedClusters;
+
+ if (p_fs->dev_ejected)
+ err = FFS_MEDIAERR;
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return err;
+}
+
+static int ffsSyncVol(struct super_block *sb, bool do_sync)
+{
+ int err = FFS_SUCCESS;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* synchronize the file system */
+ fs_sync(sb, do_sync);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+
+ if (p_fs->dev_ejected)
+ err = FFS_MEDIAERR;
+
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return err;
+}
+
+/*----------------------------------------------------------------------*/
+/* File Operation Functions */
+/*----------------------------------------------------------------------*/
+
+static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
+{
+ int ret, dentry, num_entries;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct dos_name_t dos_name;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ pr_debug("%s entered\n", __func__);
+
+ /* check the validity of pointer parameters */
+ if (!fid || !path || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries,
+ &dos_name);
+ if (ret)
+ goto out;
+
+ /* search the file name for directories */
+ dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
+ &dos_name, TYPE_ALL);
+ if (dentry < -1) {
+ ret = FFS_NOTFOUND;
+ goto out;
+ }
+
+ fid->dir.dir = dir.dir;
+ fid->dir.size = dir.size;
+ fid->dir.flags = dir.flags;
+ fid->entry = dentry;
+
+ if (dentry == -1) {
+ fid->type = TYPE_DIR;
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+
+ fid->attr = ATTR_SUBDIR;
+ fid->flags = 0x01;
+ fid->size = 0;
+ fid->start_clu = p_fs->root_dir;
+ } else {
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &dir, dentry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep + 1;
+ } else {
+ ep = get_entry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ fid->type = p_fs->fs_func->get_entry_type(ep);
+ fid->rwoffset = 0;
+ fid->hint_last_off = -1;
+ fid->attr = p_fs->fs_func->get_entry_attr(ep);
+
+ fid->size = p_fs->fs_func->get_entry_size(ep2);
+ if ((fid->type == TYPE_FILE) && (fid->size == 0)) {
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUSTER_32(~0);
+ } else {
+ fid->flags = p_fs->fs_func->get_entry_flag(ep2);
+ fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
+ }
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
+ struct file_id_t *fid)
+{
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ int ret;
+
+ /* check the validity of pointer parameters */
+ if (!fid || !path || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* create a new file */
+ ret = create_file(inode, &dir, &uni_name, mode, fid);
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
+ u64 count, u64 *rcount)
+{
+ s32 offset, sec_offset, clu_offset;
+ u32 clu;
+ int ret = 0;
+ sector_t LogSector;
+ u64 oneblkread, read_bytes;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ /* check the validity of the given file id */
+ if (!fid)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if (!buffer)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count > (fid->size - fid->rwoffset))
+ count = fid->size - fid->rwoffset;
+
+ if (count == 0) {
+ if (rcount)
+ *rcount = 0;
+ ret = FFS_EOF;
+ goto out;
+ }
+
+ read_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ clu += clu_offset;
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu = fid->hint_last_clu;
+ }
+
+ while (clu_offset > 0) {
+ /* clu = FAT_read(sb, clu); */
+ if (FAT_read(sb, clu, &clu) == -1)
+ return FFS_MEDIAERR;
+
+ clu_offset--;
+ }
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >>
+ p_fs->cluster_size_bits);
+ fid->hint_last_clu = clu;
+
+ /* byte offset in cluster */
+ offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1));
+
+ /* sector offset in cluster */
+ sec_offset = offset >> p_bd->sector_size_bits;
+
+ /* byte offset in sector */
+ offset &= p_bd->sector_size_mask;
+
+ LogSector = START_SECTOR(clu) + sec_offset;
+
+ oneblkread = (u64)(p_bd->sector_size - offset);
+ if (oneblkread > count)
+ oneblkread = count;
+
+ if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)buffer + read_bytes,
+ (char *)tmp_bh->b_data, (s32)oneblkread);
+ } else {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)buffer + read_bytes,
+ (char *)tmp_bh->b_data + offset,
+ (s32)oneblkread);
+ }
+ count -= oneblkread;
+ read_bytes += oneblkread;
+ fid->rwoffset += oneblkread;
+ }
+ brelse(tmp_bh);
+
+/* How did this ever work and not leak a brlse()?? */
+err_out:
+ /* set the size of read bytes */
+ if (rcount)
+ *rcount = read_bytes;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
+ void *buffer, u64 count, u64 *wcount)
+{
+ bool modified = false;
+ s32 offset, sec_offset, clu_offset;
+ s32 num_clusters, num_alloc, num_alloced = (s32)~0;
+ int ret = 0;
+ u32 clu, last_clu;
+ sector_t LogSector, sector = 0;
+ u64 oneblkwrite, write_bytes;
+ struct chain_t new_clu;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct buffer_head *tmp_bh = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+
+ /* check the validity of the given file id */
+ if (!fid)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if (!buffer)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+ if (count == 0) {
+ if (wcount)
+ *wcount = 0;
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (fid->size == 0)
+ num_clusters = 0;
+ else
+ num_clusters = (s32)((fid->size - 1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ write_bytes = 0;
+
+ while (count > 0) {
+ clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ clu = last_clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ clu = CLUSTER_32(~0);
+ else
+ clu += clu_offset;
+ }
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu = fid->hint_last_clu;
+ }
+
+ while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
+ last_clu = clu;
+ /* clu = FAT_read(sb, clu); */
+ if (FAT_read(sb, clu, &clu) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+
+ if (clu == CLUSTER_32(~0)) {
+ num_alloc = (s32)((count - 1) >>
+ p_fs->cluster_size_bits) + 1;
+ new_clu.dir = (last_clu == CLUSTER_32(~0)) ?
+ CLUSTER_32(~0) : last_clu + 1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a chain of clusters */
+ num_alloced = p_fs->fs_func->alloc_cluster(sb,
+ num_alloc,
+ &new_clu);
+ if (num_alloced == 0)
+ break;
+ if (num_alloced < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ /* (2) append to the FAT chain */
+ if (last_clu == CLUSTER_32(~0)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ exfat_chain_cont_cluster(sb,
+ fid->start_clu,
+ num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01)
+ FAT_write(sb, last_clu, new_clu.dir);
+ }
+
+ num_clusters += num_alloced;
+ clu = new_clu.dir;
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >>
+ p_fs->cluster_size_bits);
+ fid->hint_last_clu = clu;
+
+ /* byte offset in cluster */
+ offset = (s32)(fid->rwoffset & (p_fs->cluster_size - 1));
+
+ /* sector offset in cluster */
+ sec_offset = offset >> p_bd->sector_size_bits;
+
+ /* byte offset in sector */
+ offset &= p_bd->sector_size_mask;
+
+ LogSector = START_SECTOR(clu) + sec_offset;
+
+ oneblkwrite = (u64)(p_bd->sector_size - offset);
+ if (oneblkwrite > count)
+ oneblkwrite = count;
+
+ if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 0) !=
+ FFS_SUCCESS)
+ goto err_out;
+ memcpy((char *)tmp_bh->b_data,
+ (char *)buffer + write_bytes, (s32)oneblkwrite);
+ if (sector_write(sb, LogSector, tmp_bh, 0) !=
+ FFS_SUCCESS) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ } else {
+ if ((offset > 0) ||
+ ((fid->rwoffset + oneblkwrite) < fid->size)) {
+ if (sector_read(sb, LogSector, &tmp_bh, 1) !=
+ FFS_SUCCESS)
+ goto err_out;
+ } else {
+ if (sector_read(sb, LogSector, &tmp_bh, 0) !=
+ FFS_SUCCESS)
+ goto err_out;
+ }
+
+ memcpy((char *)tmp_bh->b_data + offset,
+ (char *)buffer + write_bytes, (s32)oneblkwrite);
+ if (sector_write(sb, LogSector, tmp_bh, 0) !=
+ FFS_SUCCESS) {
+ brelse(tmp_bh);
+ goto err_out;
+ }
+ }
+
+ count -= oneblkwrite;
+ write_bytes += oneblkwrite;
+ fid->rwoffset += oneblkwrite;
+
+ fid->attr |= ATTR_ARCHIVE;
+
+ if (fid->size < fid->rwoffset) {
+ fid->size = fid->rwoffset;
+ modified = true;
+ }
+ }
+
+ brelse(tmp_bh);
+
+ /* (3) update the direcoty entry */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ goto err_out;
+ ep2 = ep + 1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep)
+ goto err_out;
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ p_fs->fs_func->set_entry_attr(ep, fid->attr);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+
+ if (modified) {
+ if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
+ p_fs->fs_func->set_entry_flag(ep2, fid->flags);
+
+ if (p_fs->fs_func->get_entry_size(ep2) != fid->size)
+ p_fs->fs_func->set_entry_size(ep2, fid->size);
+
+ if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
+ p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+err_out:
+ /* set the size of written bytes */
+ if (wcount)
+ *wcount = write_bytes;
+
+ if (num_alloced == 0)
+ ret = FFS_FULL;
+
+ else if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
+{
+ s32 num_clusters;
+ u32 last_clu = CLUSTER_32(0);
+ int ret = 0;
+ sector_t sector = 0;
+ struct chain_t clu;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct entry_set_cache_t *es = NULL;
+
+ pr_debug("%s entered (inode %p size %llu)\n", __func__, inode,
+ new_size);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_FILE) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+
+ if (fid->size != old_size) {
+ pr_err("[EXFAT] truncate : can't skip it because of size-mismatch(old:%lld->fid:%lld).\n",
+ old_size, fid->size);
+ }
+
+ if (old_size <= new_size) {
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ clu.dir = fid->start_clu;
+ clu.size = (s32)((old_size - 1) >> p_fs->cluster_size_bits) + 1;
+ clu.flags = fid->flags;
+
+ if (new_size > 0) {
+ num_clusters = (s32)((new_size - 1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ if (clu.flags == 0x03) {
+ clu.dir += num_clusters;
+ } else {
+ while (num_clusters > 0) {
+ last_clu = clu.dir;
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ num_clusters--;
+ }
+ }
+
+ clu.size -= num_clusters;
+ }
+
+ fid->size = new_size;
+ fid->attr |= ATTR_ARCHIVE;
+ if (new_size == 0) {
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+ fid->start_clu = CLUSTER_32(~0);
+ }
+
+ /* (1) update the directory entry */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep + 1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
+ p_fs->fs_func->set_entry_attr(ep, fid->attr);
+
+ p_fs->fs_func->set_entry_size(ep2, new_size);
+ if (new_size == 0) {
+ p_fs->fs_func->set_entry_flag(ep2, 0x01);
+ p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
+ }
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ /* (2) cut off from the FAT chain */
+ if (last_clu != CLUSTER_32(0)) {
+ if (fid->flags == 0x01)
+ FAT_write(sb, last_clu, CLUSTER_32(~0));
+ }
+
+ /* (3) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu, 0);
+
+ /* hint information */
+ fid->hint_last_off = -1;
+ if (fid->rwoffset > fid->size)
+ fid->rwoffset = fid->size;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ pr_debug("%s exited (%d)\n", __func__, ret);
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static void update_parent_info(struct file_id_t *fid,
+ struct inode *parent_inode)
+{
+ struct fs_info_t *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info);
+ struct file_id_t *parent_fid = &(EXFAT_I(parent_inode)->fid);
+
+ if (unlikely((parent_fid->flags != fid->dir.flags) ||
+ (parent_fid->size !=
+ (fid->dir.size << p_fs->cluster_size_bits)) ||
+ (parent_fid->start_clu != fid->dir.dir))) {
+ fid->dir.dir = parent_fid->start_clu;
+ fid->dir.flags = parent_fid->flags;
+ fid->dir.size = ((parent_fid->size + (p_fs->cluster_size - 1))
+ >> p_fs->cluster_size_bits);
+ }
+}
+
+static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
+ struct inode *new_parent_inode, struct dentry *new_dentry)
+{
+ s32 ret;
+ s32 dentry;
+ struct chain_t olddir, newdir;
+ struct chain_t *p_dir = NULL;
+ struct uni_name_t uni_name;
+ struct dentry_t *ep;
+ struct super_block *sb = old_parent_inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ u8 *new_path = (u8 *)new_dentry->d_name.name;
+ struct inode *new_inode = new_dentry->d_inode;
+ int num_entries;
+ struct file_id_t *new_fid = NULL;
+ s32 new_entry = 0;
+
+ /* check the validity of the given file id */
+ if (!fid)
+ return FFS_INVALIDFID;
+
+ /* check the validity of pointer parameters */
+ if (!new_path || (*new_path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ update_parent_info(fid, old_parent_inode);
+
+ olddir.dir = fid->dir.dir;
+ olddir.size = fid->dir.size;
+ olddir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ /* check if the old file is "." or ".." */
+ if (p_fs->vol_type != EXFAT) {
+ if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
+ ret = FFS_PERMISSIONERR;
+ goto out2;
+ }
+ }
+
+ ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out2;
+ }
+
+ if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ ret = FFS_PERMISSIONERR;
+ goto out2;
+ }
+
+ /* check whether new dir is existing directory and empty */
+ if (new_inode) {
+ u32 entry_type;
+
+ ret = FFS_MEDIAERR;
+ new_fid = &EXFAT_I(new_inode)->fid;
+
+ update_parent_info(new_fid, new_parent_inode);
+
+ p_dir = &(new_fid->dir);
+ new_entry = new_fid->entry;
+ ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep)
+ goto out;
+
+ entry_type = p_fs->fs_func->get_entry_type(ep);
+
+ if (entry_type == TYPE_DIR) {
+ struct chain_t new_clu;
+
+ new_clu.dir = new_fid->start_clu;
+ new_clu.size = (s32)((new_fid->size - 1) >>
+ p_fs->cluster_size_bits) + 1;
+ new_clu.flags = new_fid->flags;
+
+ if (!is_dir_empty(sb, &new_clu)) {
+ ret = FFS_FILEEXIST;
+ goto out;
+ }
+ }
+ }
+
+ /* check the validity of directory name in the given new pathname */
+ ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name);
+ if (ret)
+ goto out2;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ if (olddir.dir == newdir.dir)
+ ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name,
+ fid);
+ else
+ ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
+ &uni_name, fid);
+
+ if ((ret == FFS_SUCCESS) && new_inode) {
+ /* delete entries of new_dir */
+ ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
+ if (!ep)
+ goto out;
+
+ num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir,
+ new_entry, ep);
+ if (num_entries < 0)
+ goto out;
+ p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0,
+ num_entries + 1);
+ }
+out:
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out2:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
+{
+ s32 dentry;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir, clu_to_free;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of the given file id */
+ if (!fid)
+ return FFS_INVALIDFID;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ ep = get_entry_in_dir(sb, &dir, dentry, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
+ ret = FFS_PERMISSIONERR;
+ goto out;
+ }
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ remove_file(inode, &dir, dentry);
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ /* (2) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu_to_free, 0);
+
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+#if 0
+/* Not currently wired up */
+static int ffsSetAttr(struct inode *inode, u32 attr)
+{
+ u32 type;
+ int ret = FFS_SUCCESS;
+ sector_t sector = 0;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+ struct entry_set_cache_t *es = NULL;
+
+ if (fid->attr == attr) {
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+ return FFS_SUCCESS;
+ }
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ if (p_fs->dev_ejected)
+ return FFS_MEDIAERR;
+ return FFS_SUCCESS;
+ }
+ }
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* get the directory entry of given file */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ type = p_fs->fs_func->get_entry_type(ep);
+
+ if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
+ ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ else
+ ret = FFS_ERROR;
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* set the file attribute */
+ fid->attr = attr;
+ p_fs->fs_func->set_entry_attr(ep, attr);
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+#endif
+
+static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
+{
+ sector_t sector = 0;
+ s32 count;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct entry_set_cache_t *es = NULL;
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+ pr_debug("%s entered\n", __func__);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ info->Attr = ATTR_SUBDIR;
+ memset((char *)&info->CreateTimestamp, 0,
+ sizeof(struct date_time_t));
+ memset((char *)&info->ModifyTimestamp, 0,
+ sizeof(struct date_time_t));
+ memset((char *)&info->AccessTimestamp, 0,
+ sizeof(struct date_time_t));
+ strcpy(info->ShortName, ".");
+ strcpy(info->Name, ".");
+
+ dir.dir = p_fs->root_dir;
+ dir.flags = 0x01;
+
+ if (p_fs->root_dir == CLUSTER_32(0)) {
+ /* FAT16 root_dir */
+ info->Size = p_fs->dentries_in_root <<
+ DENTRY_SIZE_BITS;
+ } else {
+ info->Size = count_num_clusters(sb, &dir) <<
+ p_fs->cluster_size_bits;
+ }
+
+ count = count_dos_name_entries(sb, &dir, TYPE_DIR);
+ if (count < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ info->NumSubdirs = count;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ /* get the directory entry of given file or directory */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep + 1;
+ } else {
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ buf_lock(sb, sector);
+ }
+
+ /* set FILE_INFO structure using the acquired struct dentry_t */
+ info->Attr = p_fs->fs_func->get_entry_attr(ep);
+
+ p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ info->CreateTimestamp.Year = tm.year;
+ info->CreateTimestamp.Month = tm.mon;
+ info->CreateTimestamp.Day = tm.day;
+ info->CreateTimestamp.Hour = tm.hour;
+ info->CreateTimestamp.Minute = tm.min;
+ info->CreateTimestamp.Second = tm.sec;
+ info->CreateTimestamp.MilliSecond = 0;
+
+ p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ info->ModifyTimestamp.Year = tm.year;
+ info->ModifyTimestamp.Month = tm.mon;
+ info->ModifyTimestamp.Day = tm.day;
+ info->ModifyTimestamp.Hour = tm.hour;
+ info->ModifyTimestamp.Minute = tm.min;
+ info->ModifyTimestamp.Second = tm.sec;
+ info->ModifyTimestamp.MilliSecond = 0;
+
+ memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t));
+
+ *(uni_name.name) = 0x0;
+ /* XXX this is very bad for exfat cuz name is already included in es.
+ * API should be revised
+ */
+ p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry,
+ uni_name.name);
+ if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
+ get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
+ &uni_name, 0x1);
+ nls_uniname_to_cstring(sb, info->Name, &uni_name);
+
+ if (p_fs->vol_type == EXFAT) {
+ info->NumSubdirs = 2;
+ } else {
+ buf_unlock(sb, sector);
+ get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
+ &uni_name, 0x0);
+ nls_uniname_to_cstring(sb, info->ShortName, &uni_name);
+ info->NumSubdirs = 0;
+ }
+
+ info->Size = p_fs->fs_func->get_entry_size(ep2);
+
+ if (p_fs->vol_type == EXFAT)
+ release_entry_set(es);
+
+ if (is_dir) {
+ dir.dir = fid->start_clu;
+ dir.flags = 0x01;
+
+ if (info->Size == 0)
+ info->Size = (u64)count_num_clusters(sb, &dir) <<
+ p_fs->cluster_size_bits;
+
+ count = count_dos_name_entries(sb, &dir, TYPE_DIR);
+ if (count < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ info->NumSubdirs += count;
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ pr_debug("%s exited successfully\n", __func__);
+ return ret;
+}
+
+static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
+{
+ sector_t sector = 0;
+ int ret = FFS_SUCCESS;
+ struct timestamp_t tm;
+ struct dentry_t *ep, *ep2;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ u8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0;
+
+ pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (is_dir) {
+ if ((fid->dir.dir == p_fs->root_dir) &&
+ (fid->entry == -1)) {
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ ret = FFS_SUCCESS;
+ goto out;
+ }
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* get the directory entry of given file or directory */
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep + 1;
+ } else {
+ /* for other than exfat */
+ ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ ep2 = ep;
+ }
+
+ p_fs->fs_func->set_entry_attr(ep, info->Attr);
+
+ /* set FILE_INFO structure using the acquired struct dentry_t */
+ tm.sec = info->CreateTimestamp.Second;
+ tm.min = info->CreateTimestamp.Minute;
+ tm.hour = info->CreateTimestamp.Hour;
+ tm.day = info->CreateTimestamp.Day;
+ tm.mon = info->CreateTimestamp.Month;
+ tm.year = info->CreateTimestamp.Year;
+ p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE);
+
+ tm.sec = info->ModifyTimestamp.Second;
+ tm.min = info->ModifyTimestamp.Minute;
+ tm.hour = info->ModifyTimestamp.Hour;
+ tm.day = info->ModifyTimestamp.Day;
+ tm.mon = info->ModifyTimestamp.Month;
+ tm.year = info->ModifyTimestamp.Year;
+ p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY);
+
+ p_fs->fs_func->set_entry_size(ep2, info->Size);
+
+ if (p_fs->vol_type != EXFAT) {
+ buf_modify(sb, sector);
+ } else {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ pr_debug("%s exited (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
+{
+ s32 num_clusters, num_alloced;
+ bool modified = false;
+ u32 last_clu;
+ int ret = FFS_SUCCESS;
+ sector_t sector = 0;
+ struct chain_t new_clu;
+ struct dentry_t *ep;
+ struct entry_set_cache_t *es = NULL;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ /* check the validity of pointer parameters */
+ if (!clu)
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
+
+ if (EXFAT_I(inode)->mmu_private == 0)
+ num_clusters = 0;
+ else
+ num_clusters = (s32)((EXFAT_I(inode)->mmu_private - 1) >>
+ p_fs->cluster_size_bits) + 1;
+
+ *clu = last_clu = fid->start_clu;
+
+ if (fid->flags == 0x03) {
+ if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
+ last_clu += clu_offset - 1;
+
+ if (clu_offset == num_clusters)
+ *clu = CLUSTER_32(~0);
+ else
+ *clu += clu_offset;
+ }
+ } else {
+ /* hint information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ *clu = fid->hint_last_clu;
+ }
+
+ while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
+ last_clu = *clu;
+ if (FAT_read(sb, *clu, clu) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+
+ if (*clu == CLUSTER_32(~0)) {
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) :
+ last_clu + 1;
+ new_clu.size = 0;
+ new_clu.flags = fid->flags;
+
+ /* (1) allocate a cluster */
+ num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
+ if (num_alloced < 0) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ } else if (num_alloced == 0) {
+ ret = FFS_FULL;
+ goto out;
+ }
+
+ /* (2) append to the FAT chain */
+ if (last_clu == CLUSTER_32(~0)) {
+ if (new_clu.flags == 0x01)
+ fid->flags = 0x01;
+ fid->start_clu = new_clu.dir;
+ modified = true;
+ } else {
+ if (new_clu.flags != fid->flags) {
+ exfat_chain_cont_cluster(sb, fid->start_clu,
+ num_clusters);
+ fid->flags = 0x01;
+ modified = true;
+ }
+ if (new_clu.flags == 0x01)
+ FAT_write(sb, last_clu, new_clu.dir);
+ }
+
+ num_clusters += num_alloced;
+ *clu = new_clu.dir;
+
+ if (p_fs->vol_type == EXFAT) {
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ /* get stream entry */
+ ep++;
+ }
+
+ /* (3) update directory entry */
+ if (modified) {
+ if (p_fs->vol_type != EXFAT) {
+ ep = get_entry_in_dir(sb, &(fid->dir),
+ fid->entry, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+
+ if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
+ p_fs->fs_func->set_entry_flag(ep, fid->flags);
+
+ if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu)
+ p_fs->fs_func->set_entry_clu0(ep,
+ fid->start_clu);
+
+ if (p_fs->vol_type != EXFAT)
+ buf_modify(sb, sector);
+ }
+
+ if (p_fs->vol_type == EXFAT) {
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
+ }
+
+ /* add number of new blocks to inode */
+ inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
+ }
+
+ /* hint information */
+ fid->hint_last_off = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
+ fid->hint_last_clu = *clu;
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------*/
+/* Directory Operation Functions */
+/*----------------------------------------------------------------------*/
+
+static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
+{
+ int ret = FFS_SUCCESS;
+ struct chain_t dir;
+ struct uni_name_t uni_name;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ pr_debug("%s entered\n", __func__);
+
+ /* check the validity of pointer parameters */
+ if (!fid || !path || (*path == '\0'))
+ return FFS_ERROR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ /* check the validity of directory name in the given old pathname */
+ ret = resolve_path(inode, path, &dir, &uni_name);
+ if (ret)
+ goto out;
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ ret = create_dir(inode, &dir, &uni_name, fid);
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
+{
+ int i, dentry, clu_offset;
+ int ret = FFS_SUCCESS;
+ s32 dentries_per_clu, dentries_per_clu_bits = 0;
+ u32 type;
+ sector_t sector;
+ struct chain_t dir, clu;
+ struct uni_name_t uni_name;
+ struct timestamp_t tm;
+ struct dentry_t *ep;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct fs_func *fs_func = p_fs->fs_func;
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+
+ /* check the validity of pointer parameters */
+ if (!dir_entry)
+ return FFS_ERROR;
+
+ /* check if the given file ID is opened */
+ if (fid->type != TYPE_DIR)
+ return FFS_PERMISSIONERR;
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ if (fid->entry == -1) {
+ dir.dir = p_fs->root_dir;
+ dir.flags = 0x01;
+ } else {
+ dir.dir = fid->start_clu;
+ dir.size = (s32)(fid->size >> p_fs->cluster_size_bits);
+ dir.flags = fid->flags;
+ }
+
+ dentry = (s32)fid->rwoffset;
+
+ if (dir.dir == CLUSTER_32(0)) {
+ /* FAT16 root_dir */
+ dentries_per_clu = p_fs->dentries_in_root;
+
+ if (dentry == dentries_per_clu) {
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+ }
+ } else {
+ dentries_per_clu = p_fs->dentries_per_clu;
+ dentries_per_clu_bits = ilog2(dentries_per_clu);
+
+ clu_offset = dentry >> dentries_per_clu_bits;
+ clu.dir = dir.dir;
+ clu.size = dir.size;
+ clu.flags = dir.flags;
+
+ if (clu.flags == 0x03) {
+ clu.dir += clu_offset;
+ clu.size -= clu_offset;
+ } else {
+ /* hint_information */
+ if ((clu_offset > 0) && (fid->hint_last_off > 0) &&
+ (clu_offset >= fid->hint_last_off)) {
+ clu_offset -= fid->hint_last_off;
+ clu.dir = fid->hint_last_clu;
+ }
+
+ while (clu_offset > 0) {
+ /* clu.dir = FAT_read(sb, clu.dir); */
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ clu_offset--;
+ }
+ }
+ }
+
+ while (clu.dir != CLUSTER_32(~0)) {
+ if (p_fs->dev_ejected)
+ break;
+
+ if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */
+ i = dentry % dentries_per_clu;
+ else
+ i = dentry & (dentries_per_clu - 1);
+
+ for ( ; i < dentries_per_clu; i++, dentry++) {
+ ep = get_entry_in_dir(sb, &clu, i, &sector);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ type = fs_func->get_entry_type(ep);
+
+ if (type == TYPE_UNUSED)
+ break;
+
+ if ((type != TYPE_FILE) && (type != TYPE_DIR))
+ continue;
+
+ buf_lock(sb, sector);
+ dir_entry->Attr = fs_func->get_entry_attr(ep);
+
+ fs_func->get_entry_time(ep, &tm, TM_CREATE);
+ dir_entry->CreateTimestamp.Year = tm.year;
+ dir_entry->CreateTimestamp.Month = tm.mon;
+ dir_entry->CreateTimestamp.Day = tm.day;
+ dir_entry->CreateTimestamp.Hour = tm.hour;
+ dir_entry->CreateTimestamp.Minute = tm.min;
+ dir_entry->CreateTimestamp.Second = tm.sec;
+ dir_entry->CreateTimestamp.MilliSecond = 0;
+
+ fs_func->get_entry_time(ep, &tm, TM_MODIFY);
+ dir_entry->ModifyTimestamp.Year = tm.year;
+ dir_entry->ModifyTimestamp.Month = tm.mon;
+ dir_entry->ModifyTimestamp.Day = tm.day;
+ dir_entry->ModifyTimestamp.Hour = tm.hour;
+ dir_entry->ModifyTimestamp.Minute = tm.min;
+ dir_entry->ModifyTimestamp.Second = tm.sec;
+ dir_entry->ModifyTimestamp.MilliSecond = 0;
+
+ memset((char *)&dir_entry->AccessTimestamp, 0,
+ sizeof(struct date_time_t));
+
+ *(uni_name.name) = 0x0;
+ fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
+ uni_name.name);
+ if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
+ get_uni_name_from_dos_entry(sb,
+ (struct dos_dentry_t *)ep,
+ &uni_name, 0x1);
+ nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
+ buf_unlock(sb, sector);
+
+ if (p_fs->vol_type == EXFAT) {
+ ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
+ if (!ep) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ } else {
+ get_uni_name_from_dos_entry(sb,
+ (struct dos_dentry_t *)ep,
+ &uni_name, 0x0);
+ nls_uniname_to_cstring(sb, dir_entry->ShortName,
+ &uni_name);
+ }
+
+ dir_entry->Size = fs_func->get_entry_size(ep);
+
+ /* hint information */
+ if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */
+ } else {
+ fid->hint_last_off = dentry >>
+ dentries_per_clu_bits;
+ fid->hint_last_clu = clu.dir;
+ }
+
+ fid->rwoffset = (s64)(++dentry);
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+
+ if (dir.dir == CLUSTER_32(0))
+ break; /* FAT16 root_dir */
+
+ if (clu.flags == 0x03) {
+ if ((--clu.size) > 0)
+ clu.dir++;
+ else
+ clu.dir = CLUSTER_32(~0);
+ } else {
+ /* clu.dir = FAT_read(sb, clu.dir); */
+ if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = FFS_MEDIAERR;
+ goto out;
+ }
+ }
+ }
+
+ *(dir_entry->Name) = '\0';
+
+ fid->rwoffset = (s64)(++dentry);
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
+{
+ s32 dentry;
+ int ret = FFS_SUCCESS;
+ struct chain_t dir, clu_to_free;
+ struct super_block *sb = inode->i_sb;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ /* check the validity of the given file id */
+ if (!fid)
+ return FFS_INVALIDFID;
+
+ dir.dir = fid->dir.dir;
+ dir.size = fid->dir.size;
+ dir.flags = fid->dir.flags;
+
+ dentry = fid->entry;
+
+ /* check if the file is "." or ".." */
+ if (p_fs->vol_type != EXFAT) {
+ if ((dir.dir != p_fs->root_dir) && (dentry < 2))
+ return FFS_PERMISSIONERR;
+ }
+
+ /* acquire the lock for file system critical section */
+ down(&p_fs->v_sem);
+
+ clu_to_free.dir = fid->start_clu;
+ clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
+ clu_to_free.flags = fid->flags;
+
+ if (!is_dir_empty(sb, &clu_to_free)) {
+ ret = FFS_FILEEXIST;
+ goto out;
+ }
+
+ fs_set_vol_flags(sb, VOL_DIRTY);
+
+ /* (1) update the directory entry */
+ remove_file(inode, &dir, dentry);
+
+ /* (2) free the clusters */
+ p_fs->fs_func->free_cluster(sb, &clu_to_free, 1);
+
+ fid->size = 0;
+ fid->start_clu = CLUSTER_32(~0);
+ fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
+
+#ifdef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, false);
+ fs_set_vol_flags(sb, VOL_CLEAN);
+#endif
+
+ if (p_fs->dev_ejected)
+ ret = FFS_MEDIAERR;
+
+out:
+ /* release the lock for file system critical section */
+ up(&p_fs->v_sem);
+
+ return ret;
+}
+
+/*======================================================================*/
+/* Directory Entry Operations */
+/*======================================================================*/
+
+static int exfat_readdir(struct file *filp, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(filp);
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ struct dir_entry_t de;
+ unsigned long inum;
+ loff_t cpos;
+ int err = 0;
+
+ __lock_super(sb);
+
+ cpos = ctx->pos;
+ /* Fake . and .. for the root directory. */
+ if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) {
+ while (cpos < 2) {
+ if (inode->i_ino == EXFAT_ROOT_INO)
+ inum = EXFAT_ROOT_INO;
+ else if (cpos == 0)
+ inum = inode->i_ino;
+ else /* (cpos == 1) */
+ inum = parent_ino(filp->f_path.dentry);
+
+ if (!dir_emit_dots(filp, ctx))
+ goto out;
+ cpos++;
+ ctx->pos++;
+ }
+ if (cpos == 2)
+ cpos = 0;
+ }
+ if (cpos & (DENTRY_SIZE - 1)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+get_new:
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS;
+
+ err = ffsReadDir(inode, &de);
+ if (err) {
+ /* at least we tried to read a sector
+ * move cpos to next sector position (should be aligned)
+ */
+ if (err == FFS_MEDIAERR) {
+ cpos += 1 << p_bd->sector_size_bits;
+ cpos &= ~((1 << p_bd->sector_size_bits) - 1);
+ }
+
+ err = -EIO;
+ goto end_of_dir;
+ }
+
+ cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS;
+
+ if (!de.Name[0])
+ goto end_of_dir;
+
+ if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = inode->i_ino;
+ } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
+ inum = parent_ino(filp->f_path.dentry);
+ } else {
+ loff_t i_pos = ((loff_t)EXFAT_I(inode)->fid.start_clu << 32) |
+ ((EXFAT_I(inode)->fid.rwoffset - 1) & 0xffffffff);
+ struct inode *tmp = exfat_iget(sb, i_pos);
+
+ if (tmp) {
+ inum = tmp->i_ino;
+ iput(tmp);
+ } else {
+ inum = iunique(sb, EXFAT_ROOT_INO);
+ }
+ }
+
+ if (!dir_emit(ctx, de.Name, strlen(de.Name), inum,
+ (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
+ goto out;
+
+ ctx->pos = cpos;
+ goto get_new;
+
+end_of_dir:
+ ctx->pos = cpos;
+out:
+ __unlock_super(sb);
+ return err;
+}
+
+static int exfat_ioctl_volume_id(struct inode *dir)
+{
+ struct super_block *sb = dir->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+
+ return p_fs->vol_id;
+}
+
+static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+struct inode *inode = filp->f_path.dentry->d_inode;
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ unsigned int flags;
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+ switch (cmd) {
+ case EXFAT_IOCTL_GET_VOLUME_ID:
+ return exfat_ioctl_volume_id(inode);
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ case EXFAT_IOC_GET_DEBUGFLAGS: {
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ flags = sbi->debug_flags;
+ return put_user(flags, (int __user *)arg);
+ }
+ case EXFAT_IOC_SET_DEBUGFLAGS: {
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, (int __user *)arg))
+ return -EFAULT;
+
+ __lock_super(sb);
+ sbi->debug_flags = flags;
+ __unlock_super(sb);
+
+ return 0;
+ }
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+ default:
+ return -ENOTTY; /* Inappropriate ioctl for device */
+ }
+}
+
+static const struct file_operations exfat_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate = exfat_readdir,
+ .unlocked_ioctl = exfat_generic_ioctl,
+ .fsync = generic_file_fsync,
+};
+
+static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool excl)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else if (err == FFS_NAMETOOLONG)
+ err = -ENAMETOOLONG;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void)exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /*
+ * timestamp is already written, so mark_inode_dirty() is unnecessary.
+ */
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_find(struct inode *dir, struct qstr *qname,
+ struct file_id_t *fid)
+{
+ int err;
+
+ if (qname->len == 0)
+ return -ENOENT;
+
+ err = ffsLookupFile(dir, (u8 *)qname->name, fid);
+ if (err)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int exfat_d_anon_disconn(struct dentry *dentry)
+{
+ return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct dentry *alias;
+ int err;
+ struct file_id_t fid;
+ loff_t i_pos;
+ u64 ret;
+ mode_t i_mode;
+
+ __lock_super(sb);
+ pr_debug("%s entered\n", __func__);
+ err = exfat_find(dir, &dentry->d_name, &fid);
+ if (err) {
+ if (err == -ENOENT) {
+ inode = NULL;
+ goto out;
+ }
+ goto error;
+ }
+
+ i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+
+ i_mode = inode->i_mode;
+ if (S_ISLNK(i_mode) && !EXFAT_I(inode)->target) {
+ EXFAT_I(inode)->target = kmalloc(i_size_read(inode) + 1,
+ GFP_KERNEL);
+ if (!EXFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto error;
+ }
+ ffsReadFile(dir, &fid, EXFAT_I(inode)->target,
+ i_size_read(inode), &ret);
+ *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0';
+ }
+
+ alias = d_find_alias(inode);
+ if (alias && !exfat_d_anon_disconn(alias)) {
+ BUG_ON(d_unhashed(alias));
+ if (!S_ISDIR(i_mode))
+ d_move(alias, dentry);
+ iput(inode);
+ __unlock_super(sb);
+ pr_debug("%s exited 1\n", __func__);
+ return alias;
+ }
+ dput(alias);
+out:
+ __unlock_super(sb);
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ dentry = d_splice_alias(inode, dentry);
+ if (dentry)
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ pr_debug("%s exited 2\n", __func__);
+ return dentry;
+
+error:
+ __unlock_super(sb);
+ pr_debug("%s exited 3\n", __func__);
+ return ERR_PTR(err);
+}
+
+static inline unsigned long exfat_hash(loff_t i_pos)
+{
+ return hash_32(i_pos, EXFAT_HASH_BITS);
+}
+
+static void exfat_attach(struct inode *inode, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+
+ spin_lock(&sbi->inode_hash_lock);
+ EXFAT_I(inode)->i_pos = i_pos;
+ hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head);
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+static void exfat_detach(struct inode *inode)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_del_init(&EXFAT_I(inode)->i_hash_fat);
+ EXFAT_I(inode)->i_pos = 0;
+ spin_unlock(&sbi->inode_hash_lock);
+}
+
+static int exfat_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
+ if (err) {
+ if (err == FFS_PERMISSIONERR)
+ err = -EPERM;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void)exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_detach(inode);
+ remove_inode_hash(inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_symlink(struct inode *dir, struct dentry *dentry,
+ const char *target)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+ u64 len = (u64)strlen(target);
+ u64 ret;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+
+ err = ffsWriteFile(dir, &fid, (char *)target, len, &ret);
+
+ if (err) {
+ ffsRemoveFile(dir, &fid);
+
+ if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void)exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+
+ i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL);
+ if (!EXFAT_I(inode)->target) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+ struct file_id_t fid;
+ loff_t i_pos;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid);
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else if (err == FFS_NAMETOOLONG)
+ err = -ENAMETOOLONG;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void)exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ inc_nlink(dir);
+
+ i_pos = ((loff_t)fid.dir.dir << 32) | (fid.entry & 0xffffffff);
+
+ inode = exfat_build_inode(sb, &fid, i_pos);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ INC_IVERSION(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ /* timestamp is already written, so mark_inode_dirty() is unneeded. */
+
+ dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
+ d_instantiate(dentry, inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dir->i_sb;
+ int err;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
+ if (err) {
+ if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -ENOTEMPTY;
+ else if (err == FFS_NOTFOUND)
+ err = -ENOENT;
+ else if (err == FFS_DIRBUSY)
+ err = -EBUSY;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(dir);
+ dir->i_mtime = dir->i_atime = current_time(dir);
+ if (IS_DIRSYNC(dir))
+ (void)exfat_sync_inode(dir);
+ else
+ mark_inode_dirty(dir);
+ drop_nlink(dir);
+
+ clear_nlink(inode);
+ inode->i_mtime = inode->i_atime = current_time(inode);
+ exfat_detach(inode);
+ remove_inode_hash(inode);
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct inode *old_inode, *new_inode;
+ struct super_block *sb = old_dir->i_sb;
+ loff_t i_pos;
+ int err;
+
+ if (flags)
+ return -EINVAL;
+
+ __lock_super(sb);
+
+ pr_debug("%s entered\n", __func__);
+
+ old_inode = old_dentry->d_inode;
+ new_inode = new_dentry->d_inode;
+
+ EXFAT_I(old_inode)->fid.size = i_size_read(old_inode);
+
+ err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
+ new_dentry);
+ if (err) {
+ if (err == FFS_PERMISSIONERR)
+ err = -EPERM;
+ else if (err == FFS_INVALIDPATH)
+ err = -EINVAL;
+ else if (err == FFS_FILEEXIST)
+ err = -EEXIST;
+ else if (err == FFS_NOTFOUND)
+ err = -ENOENT;
+ else if (err == FFS_FULL)
+ err = -ENOSPC;
+ else
+ err = -EIO;
+ goto out;
+ }
+ INC_IVERSION(new_dir);
+ new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
+ current_time(new_dir);
+ if (IS_DIRSYNC(new_dir))
+ (void)exfat_sync_inode(new_dir);
+ else
+ mark_inode_dirty(new_dir);
+
+ i_pos = ((loff_t)EXFAT_I(old_inode)->fid.dir.dir << 32) |
+ (EXFAT_I(old_inode)->fid.entry & 0xffffffff);
+
+ exfat_detach(old_inode);
+ exfat_attach(old_inode, i_pos);
+ if (IS_DIRSYNC(new_dir))
+ (void)exfat_sync_inode(old_inode);
+ else
+ mark_inode_dirty(old_inode);
+
+ if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) {
+ drop_nlink(old_dir);
+ if (!new_inode)
+ inc_nlink(new_dir);
+ }
+ INC_IVERSION(old_dir);
+ old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ if (IS_DIRSYNC(old_dir))
+ (void)exfat_sync_inode(old_dir);
+ else
+ mark_inode_dirty(old_dir);
+
+ if (new_inode) {
+ exfat_detach(new_inode);
+ drop_nlink(new_inode);
+ if (S_ISDIR(new_inode->i_mode))
+ drop_nlink(new_inode);
+ new_inode->i_ctime = current_time(new_inode);
+ }
+
+out:
+ __unlock_super(sb);
+ pr_debug("%s exited\n", __func__);
+ return err;
+}
+
+static int exfat_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ int err, err2;
+
+ err = generic_cont_expand_simple(inode, size);
+ if (err != 0)
+ return err;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
+
+ if (IS_SYNC(inode)) {
+ err = filemap_fdatawrite_range(mapping, start,
+ start + count - 1);
+ err2 = sync_mapping_buffers(mapping);
+ err = (err) ? (err) : (err2);
+ err2 = write_inode_now(inode, 1);
+ err = (err) ? (err) : (err2);
+ if (!err)
+ err = filemap_fdatawait_range(mapping, start,
+ start + count - 1);
+ }
+ return err;
+}
+
+static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
+{
+ mode_t allow_utime = sbi->options.allow_utime;
+
+ if (!uid_eq(current_fsuid(), inode->i_uid)) {
+ if (in_group_p(inode->i_gid))
+ allow_utime >>= 3;
+ if (allow_utime & MAY_WRITE)
+ return 1;
+ }
+
+ /* use a default check */
+ return 0;
+}
+
+static int exfat_sanitize_mode(const struct exfat_sb_info *sbi,
+ struct inode *inode, umode_t *mode_ptr)
+{
+ mode_t i_mode, mask, perm;
+
+ i_mode = inode->i_mode;
+
+ if (S_ISREG(i_mode) || S_ISLNK(i_mode))
+ mask = sbi->options.fs_fmask;
+ else
+ mask = sbi->options.fs_dmask;
+
+ perm = *mode_ptr & ~(S_IFMT | mask);
+
+ /* Of the r and x bits, all (subject to umask) must be present.*/
+ if ((perm & 0555) != (i_mode & 0555))
+ return -EPERM;
+
+ if (exfat_mode_can_hold_ro(inode)) {
+ /*
+ * Of the w bits, either all (subject to umask) or none must be
+ * present.
+ */
+ if ((perm & 0222) && ((perm & 0222) != (0222 & ~mask)))
+ return -EPERM;
+ } else {
+ /*
+ * If exfat_mode_can_hold_ro(inode) is false, can't change w
+ * bits.
+ */
+ if ((perm & 0222) != (0222 & ~mask))
+ return -EPERM;
+ }
+
+ *mode_ptr &= S_IFMT | perm;
+
+ return 0;
+}
+
+static void exfat_truncate(struct inode *inode, loff_t old_size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ int err;
+
+ __lock_super(sb);
+
+ /*
+ * This protects against truncating a file bigger than it was then
+ * trying to write into the hole.
+ */
+ if (EXFAT_I(inode)->mmu_private > i_size_read(inode))
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+
+ if (EXFAT_I(inode)->fid.start_clu == 0)
+ goto out;
+
+ err = ffsTruncateFile(inode, old_size, i_size_read(inode));
+ if (err)
+ goto out;
+
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ if (IS_DIRSYNC(inode))
+ (void)exfat_sync_inode(inode);
+ else
+ mark_inode_dirty(inode);
+
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) &
+ ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+out:
+ __unlock_super(sb);
+}
+
+static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ unsigned int ia_valid;
+ int error;
+ loff_t old_size;
+
+ pr_debug("%s entered\n", __func__);
+
+ if ((attr->ia_valid & ATTR_SIZE)
+ && (attr->ia_size > i_size_read(inode))) {
+ error = exfat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ return error;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+
+ ia_valid = attr->ia_valid;
+
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
+ && exfat_allow_set_time(sbi, inode)) {
+ attr->ia_valid &= ~(ATTR_MTIME_SET |
+ ATTR_ATIME_SET |
+ ATTR_TIMES_SET);
+ }
+
+ error = setattr_prepare(dentry, attr);
+ attr->ia_valid = ia_valid;
+ if (error)
+ return error;
+
+ if (((attr->ia_valid & ATTR_UID) &&
+ (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
+ ((attr->ia_valid & ATTR_GID) &&
+ (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
+ ((attr->ia_valid & ATTR_MODE) &&
+ (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | 0777)))) {
+ return -EPERM;
+ }
+
+ /*
+ * We don't return -EPERM here. Yes, strange, but this is too
+ * old behavior.
+ */
+ if (attr->ia_valid & ATTR_MODE) {
+ if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
+ attr->ia_valid &= ~ATTR_MODE;
+ }
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ old_size = i_size_read(inode);
+ down_write(&EXFAT_I(inode)->truncate_lock);
+ truncate_setsize(inode, attr->ia_size);
+ exfat_truncate(inode, old_size);
+ up_write(&EXFAT_I(inode)->truncate_lock);
+ }
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ pr_debug("%s exited\n", __func__);
+ return error;
+}
+
+static int exfat_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
+{
+ struct inode *inode = path->dentry->d_inode;
+
+ pr_debug("%s entered\n", __func__);
+
+ generic_fillattr(inode, stat);
+ stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size;
+
+ pr_debug("%s exited\n", __func__);
+ return 0;
+}
+
+static const struct inode_operations exfat_dir_inode_operations = {
+ .create = exfat_create,
+ .lookup = exfat_lookup,
+ .unlink = exfat_unlink,
+ .symlink = exfat_symlink,
+ .mkdir = exfat_mkdir,
+ .rmdir = exfat_rmdir,
+ .rename = exfat_rename,
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
+
+/*======================================================================*/
+/* File Operations */
+/*======================================================================*/
+static const char *exfat_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+
+ if (ei->target) {
+ char *cookie = ei->target;
+
+ if (cookie)
+ return (char *)(ei->target);
+ }
+ return NULL;
+}
+
+static const struct inode_operations exfat_symlink_inode_operations = {
+ .get_link = exfat_get_link,
+};
+
+static int exfat_file_release(struct inode *inode, struct file *filp)
+{
+ struct super_block *sb = inode->i_sb;
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ ffsSyncVol(sb, false);
+ return 0;
+}
+
+static const struct file_operations exfat_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .mmap = generic_file_mmap,
+ .release = exfat_file_release,
+ .unlocked_ioctl = exfat_generic_ioctl,
+ .fsync = generic_file_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+static const struct inode_operations exfat_file_inode_operations = {
+ .setattr = exfat_setattr,
+ .getattr = exfat_getattr,
+};
+
+/*======================================================================*/
+/* Address Space Operations */
+/*======================================================================*/
+
+static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+ unsigned long *mapped_blocks, int *create)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct bd_info_t *p_bd = &(sbi->bd_info);
+ const unsigned long blocksize = sb->s_blocksize;
+ const unsigned char blocksize_bits = sb->s_blocksize_bits;
+ sector_t last_block;
+ int err, clu_offset, sec_offset;
+ unsigned int cluster;
+
+ *phys = 0;
+ *mapped_blocks = 0;
+
+ if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) {
+ if (inode->i_ino == EXFAT_ROOT_INO) {
+ if (sector <
+ (p_fs->dentries_in_root >>
+ (p_bd->sector_size_bits - DENTRY_SIZE_BITS))) {
+ *phys = sector + p_fs->root_start_sector;
+ *mapped_blocks = 1;
+ }
+ return 0;
+ }
+ }
+
+ last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
+ if (sector >= last_block) {
+ if (*create == 0)
+ return 0;
+ } else {
+ *create = 0;
+ }
+
+ /* cluster offset */
+ clu_offset = sector >> p_fs->sectors_per_clu_bits;
+
+ /* sector offset in cluster */
+ sec_offset = sector & (p_fs->sectors_per_clu - 1);
+
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+
+ err = ffsMapCluster(inode, clu_offset, &cluster);
+
+ if (err) {
+ if (err == FFS_FULL)
+ return -ENOSPC;
+ else
+ return -EIO;
+ } else if (cluster != CLUSTER_32(~0)) {
+ *phys = START_SECTOR(cluster) + sec_offset;
+ *mapped_blocks = p_fs->sectors_per_clu - sec_offset;
+ }
+
+ return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
+ int err;
+ unsigned long mapped_blocks;
+ sector_t phys;
+
+ __lock_super(sb);
+
+ err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create);
+ if (err) {
+ __unlock_super(sb);
+ return err;
+ }
+
+ if (phys) {
+ max_blocks = min(mapped_blocks, max_blocks);
+ if (create) {
+ EXFAT_I(inode)->mmu_private += max_blocks <<
+ sb->s_blocksize_bits;
+ set_buffer_new(bh_result);
+ }
+ map_bh(bh_result, sb, phys);
+ }
+
+ bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+ __unlock_super(sb);
+
+ return 0;
+}
+
+static int exfat_readpage(struct file *file, struct page *page)
+{
+ return mpage_readpage(page, exfat_get_block);
+}
+
+static int exfat_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
+}
+
+static int exfat_writepage(struct page *page, struct writeback_control *wbc)
+{
+ return block_write_full_page(page, exfat_get_block, wbc);
+}
+
+static int exfat_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return mpage_writepages(mapping, wbc, exfat_get_block);
+}
+
+static void exfat_write_failed(struct address_space *mapping, loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > i_size_read(inode)) {
+ truncate_pagecache(inode, i_size_read(inode));
+ EXFAT_I(inode)->fid.size = i_size_read(inode);
+ exfat_truncate(inode, i_size_read(inode));
+ }
+}
+
+static int exfat_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int flags,
+ struct page **pagep, void **fsdata)
+{
+ int ret;
+
+ *pagep = NULL;
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ exfat_get_block,
+ &EXFAT_I(mapping->host)->mmu_private);
+
+ if (ret < 0)
+ exfat_write_failed(mapping, pos + len);
+ return ret;
+}
+
+static int exfat_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *pagep, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ int err;
+
+ err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
+
+ if (err < len)
+ exfat_write_failed(mapping, pos + len);
+
+ if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ fid->attr |= ATTR_ARCHIVE;
+ mark_inode_dirty(inode);
+ }
+ return err;
+}
+
+static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ ssize_t ret;
+ int rw;
+
+ rw = iov_iter_rw(iter);
+
+ if (rw == WRITE) {
+ if (EXFAT_I(inode)->mmu_private < iov_iter_count(iter))
+ return 0;
+ }
+ ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
+
+ if ((ret < 0) && (rw & WRITE))
+ exfat_write_failed(mapping, iov_iter_count(iter));
+ return ret;
+}
+
+static sector_t _exfat_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t blocknr;
+
+ /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */
+ down_read(&EXFAT_I(mapping->host)->truncate_lock);
+ blocknr = generic_block_bmap(mapping, block, exfat_get_block);
+ up_read(&EXFAT_I(mapping->host)->truncate_lock);
+
+ return blocknr;
+}
+
+static const struct address_space_operations exfat_aops = {
+ .readpage = exfat_readpage,
+ .readpages = exfat_readpages,
+ .writepage = exfat_writepage,
+ .writepages = exfat_writepages,
+ .write_begin = exfat_write_begin,
+ .write_end = exfat_write_end,
+ .direct_IO = exfat_direct_IO,
+ .bmap = _exfat_bmap
+};
+
+/*======================================================================*/
+/* Super Operations */
+/*======================================================================*/
+
+static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *info;
+ struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos);
+ struct inode *inode = NULL;
+
+ spin_lock(&sbi->inode_hash_lock);
+ hlist_for_each_entry(info, head, i_hash_fat) {
+ BUG_ON(info->vfs_inode.i_sb != sb);
+
+ if (i_pos != info->i_pos)
+ continue;
+ inode = igrab(&info->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->inode_hash_lock);
+ return inode;
+}
+
+/* doesn't deal with root inode */
+static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct dir_entry_t info;
+
+ memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
+
+ ffsReadStat(inode, &info);
+
+ EXFAT_I(inode)->i_pos = 0;
+ EXFAT_I(inode)->target = NULL;
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ INC_IVERSION(inode);
+ inode->i_generation = get_seconds();
+
+ if (info.Attr & ATTR_SUBDIR) { /* directory */
+ inode->i_generation &= ~1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ set_nlink(inode, info.NumSubdirs);
+ } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */
+ inode->i_generation |= 1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_symlink_inode_operations;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ } else { /* regular file */
+ inode->i_generation |= 1;
+ inode->i_mode = exfat_make_mode(sbi, info.Attr, 0777);
+ inode->i_op = &exfat_file_inode_operations;
+ inode->i_fop = &exfat_file_operations;
+ inode->i_mapping->a_ops = &exfat_aops;
+ inode->i_mapping->nrpages = 0;
+
+ i_size_write(inode, info.Size);
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+ }
+ exfat_save_attr(inode, info.Attr);
+
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
+ & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+
+ exfat_time_fat2unix(&inode->i_mtime, &info.ModifyTimestamp);
+ exfat_time_fat2unix(&inode->i_ctime, &info.CreateTimestamp);
+ exfat_time_fat2unix(&inode->i_atime, &info.AccessTimestamp);
+
+ return 0;
+}
+
+static struct inode *exfat_build_inode(struct super_block *sb,
+ struct file_id_t *fid, loff_t i_pos)
+{
+ struct inode *inode;
+ int err;
+
+ inode = exfat_iget(sb, i_pos);
+ if (inode)
+ goto out;
+ inode = new_inode(sb);
+ if (!inode) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+ SET_IVERSION(inode, 1);
+ err = exfat_fill_inode(inode, fid);
+ if (err) {
+ iput(inode);
+ inode = ERR_PTR(err);
+ goto out;
+ }
+ exfat_attach(inode, i_pos);
+ insert_inode_hash(inode);
+out:
+ return inode;
+}
+
+static int exfat_sync_inode(struct inode *inode)
+{
+ return exfat_write_inode(inode, NULL);
+}
+
+static struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+ struct exfat_inode_info *ei;
+
+ ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS);
+ if (!ei)
+ return NULL;
+
+ init_rwsem(&ei->truncate_lock);
+
+ return &ei->vfs_inode;
+}
+
+static void exfat_destroy_inode(struct inode *inode)
+{
+ kfree(EXFAT_I(inode)->target);
+ EXFAT_I(inode)->target = NULL;
+
+ kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode));
+}
+
+static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ struct dir_entry_t info;
+
+ if (inode->i_ino == EXFAT_ROOT_INO)
+ return 0;
+
+ info.Attr = exfat_make_attr(inode);
+ info.Size = i_size_read(inode);
+
+ exfat_time_unix2fat(&inode->i_mtime, &info.ModifyTimestamp);
+ exfat_time_unix2fat(&inode->i_ctime, &info.CreateTimestamp);
+ exfat_time_unix2fat(&inode->i_atime, &info.AccessTimestamp);
+
+ ffsWriteStat(inode, &info);
+
+ return 0;
+}
+
+static void exfat_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+
+ if (!inode->i_nlink)
+ i_size_write(inode, 0);
+ invalidate_inode_buffers(inode);
+ clear_inode(inode);
+ exfat_detach(inode);
+
+ remove_inode_hash(inode);
+}
+
+static void exfat_free_super(struct exfat_sb_info *sbi)
+{
+ if (sbi->nls_disk)
+ unload_nls(sbi->nls_disk);
+ if (sbi->nls_io)
+ unload_nls(sbi->nls_io);
+ if (sbi->options.iocharset != exfat_default_iocharset)
+ kfree(sbi->options.iocharset);
+ /* mutex_init is in exfat_fill_super function. only for 3.7+ */
+ mutex_destroy(&sbi->s_lock);
+ kfree(sbi);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ if (__is_sb_dirty(sb))
+ exfat_write_super(sb);
+
+ ffsUmountVol(sb);
+
+ sb->s_fs_info = NULL;
+ exfat_free_super(sbi);
+}
+
+static void exfat_write_super(struct super_block *sb)
+{
+ __lock_super(sb);
+
+ __set_sb_clean(sb);
+
+ if (!sb_rdonly(sb))
+ ffsSyncVol(sb, true);
+
+ __unlock_super(sb);
+}
+
+static int exfat_sync_fs(struct super_block *sb, int wait)
+{
+ int err = 0;
+
+ if (__is_sb_dirty(sb)) {
+ __lock_super(sb);
+ __set_sb_clean(sb);
+ err = ffsSyncVol(sb, true);
+ __unlock_super(sb);
+ }
+
+ return err;
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct vol_info_t info;
+
+ if (p_fs->used_clusters == UINT_MAX) {
+ if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR)
+ return -EIO;
+
+ } else {
+ info.FatType = p_fs->vol_type;
+ info.ClusterSize = p_fs->cluster_size;
+ info.NumClusters = p_fs->num_clusters - 2;
+ info.UsedClusters = p_fs->used_clusters;
+ info.FreeClusters = info.NumClusters - info.UsedClusters;
+
+ if (p_fs->dev_ejected)
+ pr_info("[EXFAT] statfs on device that is ejected\n");
+ }
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = info.ClusterSize;
+ buf->f_blocks = info.NumClusters;
+ buf->f_bfree = info.FreeClusters;
+ buf->f_bavail = info.FreeClusters;
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ buf->f_namelen = 260;
+
+ return 0;
+}
+
+static int exfat_remount(struct super_block *sb, int *flags, char *data)
+{
+ *flags |= SB_NODIRATIME;
+ return 0;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb);
+ struct exfat_mount_options *opts = &sbi->options;
+
+ if (__kuid_val(opts->fs_uid))
+ seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid));
+ if (__kgid_val(opts->fs_gid))
+ seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid));
+ seq_printf(m, ",fmask=%04o", opts->fs_fmask);
+ seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+ if (opts->allow_utime)
+ seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
+ if (sbi->nls_disk)
+ seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
+ if (sbi->nls_io)
+ seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
+ seq_printf(m, ",namecase=%u", opts->casesensitive);
+ if (opts->errors == EXFAT_ERRORS_CONT)
+ seq_puts(m, ",errors=continue");
+ else if (opts->errors == EXFAT_ERRORS_PANIC)
+ seq_puts(m, ",errors=panic");
+ else
+ seq_puts(m, ",errors=remount-ro");
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard)
+ seq_puts(m, ",discard");
+#endif
+ return 0;
+}
+
+static const struct super_operations exfat_sops = {
+ .alloc_inode = exfat_alloc_inode,
+ .destroy_inode = exfat_destroy_inode,
+ .write_inode = exfat_write_inode,
+ .evict_inode = exfat_evict_inode,
+ .put_super = exfat_put_super,
+ .sync_fs = exfat_sync_fs,
+ .statfs = exfat_statfs,
+ .remount_fs = exfat_remount,
+ .show_options = exfat_show_options,
+};
+
+/*======================================================================*/
+/* Export Operations */
+/*======================================================================*/
+
+static struct inode *exfat_nfs_get_inode(struct super_block *sb, u64 ino,
+ u32 generation)
+{
+ struct inode *inode = NULL;
+
+ if (ino < EXFAT_ROOT_INO)
+ return inode;
+ inode = ilookup(sb, ino);
+
+ if (inode && generation && (inode->i_generation != generation)) {
+ iput(inode);
+ inode = NULL;
+ }
+
+ return inode;
+}
+
+static struct dentry *exfat_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ exfat_nfs_get_inode);
+}
+
+static struct dentry *exfat_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ exfat_nfs_get_inode);
+}
+
+static const struct export_operations exfat_export_ops = {
+ .fh_to_dentry = exfat_fh_to_dentry,
+ .fh_to_parent = exfat_fh_to_parent,
+};
+
+/*======================================================================*/
+/* Super Block Read Operations */
+/*======================================================================*/
+
+enum {
+ Opt_uid,
+ Opt_gid,
+ Opt_umask,
+ Opt_dmask,
+ Opt_fmask,
+ Opt_allow_utime,
+ Opt_codepage,
+ Opt_charset,
+ Opt_namecase,
+ Opt_debug,
+ Opt_err_cont,
+ Opt_err_panic,
+ Opt_err_ro,
+ Opt_utf8_hack,
+ Opt_err,
+#ifdef CONFIG_EXFAT_DISCARD
+ Opt_discard,
+#endif /* EXFAT_CONFIG_DISCARD */
+};
+
+static const match_table_t exfat_tokens = {
+ {Opt_uid, "uid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_umask, "umask=%o"},
+ {Opt_dmask, "dmask=%o"},
+ {Opt_fmask, "fmask=%o"},
+ {Opt_allow_utime, "allow_utime=%o"},
+ {Opt_codepage, "codepage=%u"},
+ {Opt_charset, "iocharset=%s"},
+ {Opt_namecase, "namecase=%u"},
+ {Opt_debug, "debug"},
+ {Opt_err_cont, "errors=continue"},
+ {Opt_err_panic, "errors=panic"},
+ {Opt_err_ro, "errors=remount-ro"},
+ {Opt_utf8_hack, "utf8"},
+#ifdef CONFIG_EXFAT_DISCARD
+ {Opt_discard, "discard"},
+#endif /* CONFIG_EXFAT_DISCARD */
+ {Opt_err, NULL}
+};
+
+static int parse_options(char *options, int silent, int *debug,
+ struct exfat_mount_options *opts)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ char *iocharset;
+
+ opts->fs_uid = current_uid();
+ opts->fs_gid = current_gid();
+ opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->allow_utime = U16_MAX;
+ opts->codepage = exfat_default_codepage;
+ opts->iocharset = exfat_default_iocharset;
+ opts->casesensitive = 0;
+ opts->errors = EXFAT_ERRORS_RO;
+#ifdef CONFIG_EXFAT_DISCARD
+ opts->discard = 0;
+#endif
+ *debug = 0;
+
+ if (!options)
+ goto out;
+
+ while ((p = strsep(&options, ","))) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, exfat_tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_uid = KUIDT_INIT(option);
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->fs_gid = KGIDT_INIT(option);
+ break;
+ case Opt_umask:
+ case Opt_dmask:
+ case Opt_fmask:
+ if (match_octal(&args[0], &option))
+ return 0;
+ if (token != Opt_dmask)
+ opts->fs_fmask = option;
+ if (token != Opt_fmask)
+ opts->fs_dmask = option;
+ break;
+ case Opt_allow_utime:
+ if (match_octal(&args[0], &option))
+ return 0;
+ opts->allow_utime = option & 0022;
+ break;
+ case Opt_codepage:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->codepage = option;
+ break;
+ case Opt_charset:
+ if (opts->iocharset != exfat_default_iocharset)
+ kfree(opts->iocharset);
+ iocharset = match_strdup(&args[0]);
+ if (!iocharset)
+ return -ENOMEM;
+ opts->iocharset = iocharset;
+ break;
+ case Opt_namecase:
+ if (match_int(&args[0], &option))
+ return 0;
+ opts->casesensitive = option;
+ break;
+ case Opt_err_cont:
+ opts->errors = EXFAT_ERRORS_CONT;
+ break;
+ case Opt_err_panic:
+ opts->errors = EXFAT_ERRORS_PANIC;
+ break;
+ case Opt_err_ro:
+ opts->errors = EXFAT_ERRORS_RO;
+ break;
+ case Opt_debug:
+ *debug = 1;
+ break;
+#ifdef CONFIG_EXFAT_DISCARD
+ case Opt_discard:
+ opts->discard = 1;
+ break;
+#endif /* CONFIG_EXFAT_DISCARD */
+ case Opt_utf8_hack:
+ break;
+ default:
+ if (!silent)
+ pr_err("[EXFAT] Unrecognized mount option %s or missing value\n",
+ p);
+ return -EINVAL;
+ }
+ }
+
+out:
+ if (opts->allow_utime == U16_MAX)
+ opts->allow_utime = ~opts->fs_dmask & 0022;
+
+ return 0;
+}
+
+static void exfat_hash_init(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int i;
+
+ spin_lock_init(&sbi->inode_hash_lock);
+ for (i = 0; i < EXFAT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
+}
+
+static int exfat_read_root(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct dir_entry_t info;
+
+ EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
+ EXFAT_I(inode)->fid.dir.flags = 0x01;
+ EXFAT_I(inode)->fid.entry = -1;
+ EXFAT_I(inode)->fid.start_clu = p_fs->root_dir;
+ EXFAT_I(inode)->fid.flags = 0x01;
+ EXFAT_I(inode)->fid.type = TYPE_DIR;
+ EXFAT_I(inode)->fid.rwoffset = 0;
+ EXFAT_I(inode)->fid.hint_last_off = -1;
+
+ EXFAT_I(inode)->target = NULL;
+
+ ffsReadStat(inode, &info);
+
+ inode->i_uid = sbi->options.fs_uid;
+ inode->i_gid = sbi->options.fs_gid;
+ INC_IVERSION(inode);
+ inode->i_generation = 0;
+ inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, 0777);
+ inode->i_op = &exfat_dir_inode_operations;
+ inode->i_fop = &exfat_dir_operations;
+
+ i_size_write(inode, info.Size);
+ inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1))
+ & ~((loff_t)p_fs->cluster_size - 1)) >> 9;
+ EXFAT_I(inode)->i_pos = ((loff_t)p_fs->root_dir << 32) | 0xffffffff;
+ EXFAT_I(inode)->mmu_private = i_size_read(inode);
+
+ exfat_save_attr(inode, ATTR_SUBDIR);
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ set_nlink(inode, info.NumSubdirs + 2);
+
+ return 0;
+}
+
+static void setup_dops(struct super_block *sb)
+{
+ if (EXFAT_SB(sb)->options.casesensitive == 0)
+ sb->s_d_op = &exfat_ci_dentry_ops;
+ else
+ sb->s_d_op = &exfat_dentry_ops;
+}
+
+static int exfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode = NULL;
+ struct exfat_sb_info *sbi;
+ int debug, ret;
+ long error;
+ char buf[50];
+
+ /*
+ * GFP_KERNEL is ok here, because while we do hold the
+ * supeblock lock, memory pressure can't call back into
+ * the filesystem, since we're only just about to mount
+ * it and have no inodes etc active!
+ */
+ sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+ mutex_init(&sbi->s_lock);
+ sb->s_fs_info = sbi;
+ sb->s_flags |= SB_NODIRATIME;
+ sb->s_magic = EXFAT_SUPER_MAGIC;
+ sb->s_op = &exfat_sops;
+ sb->s_export_op = &exfat_export_ops;
+
+ error = parse_options(data, silent, &debug, &sbi->options);
+ if (error)
+ goto out_fail;
+
+ setup_dops(sb);
+
+ error = -EIO;
+ sb_min_blocksize(sb, 512);
+ sb->s_maxbytes = 0x7fffffffffffffffLL; /* maximum file size */
+
+ ret = ffsMountVol(sb);
+ if (ret) {
+ if (!silent)
+ pr_err("[EXFAT] ffsMountVol failed\n");
+
+ goto out_fail;
+ }
+
+ /* set up enough so that it can read an inode */
+ exfat_hash_init(sb);
+
+ /*
+ * The low byte of FAT's first entry must have same value with
+ * media-field. But in real world, too many devices is
+ * writing wrong value. So, removed that validity check.
+ *
+ * if (FAT_FIRST_ENT(sb, media) != first)
+ */
+
+ /* codepage is not meaningful in exfat */
+ if (sbi->fs_info.vol_type != EXFAT) {
+ error = -EINVAL;
+ sprintf(buf, "cp%d", sbi->options.codepage);
+ sbi->nls_disk = load_nls(buf);
+ if (!sbi->nls_disk) {
+ pr_err("[EXFAT] Codepage %s not found\n", buf);
+ goto out_fail2;
+ }
+ }
+
+ sbi->nls_io = load_nls(sbi->options.iocharset);
+
+ error = -ENOMEM;
+ root_inode = new_inode(sb);
+ if (!root_inode)
+ goto out_fail2;
+ root_inode->i_ino = EXFAT_ROOT_INO;
+ SET_IVERSION(root_inode, 1);
+
+ error = exfat_read_root(root_inode);
+ if (error < 0)
+ goto out_fail2;
+ error = -ENOMEM;
+ exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos);
+ insert_inode_hash(root_inode);
+ sb->s_root = d_make_root(root_inode);
+ if (!sb->s_root) {
+ pr_err("[EXFAT] Getting the root inode failed\n");
+ goto out_fail2;
+ }
+
+ return 0;
+
+out_fail2:
+ ffsUmountVol(sb);
+out_fail:
+ if (root_inode)
+ iput(root_inode);
+ sb->s_fs_info = NULL;
+ exfat_free_super(sbi);
+ return error;
+}
+
+static struct dentry *exfat_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super);
+}
+
+static void init_once(void *foo)
+{
+ struct exfat_inode_info *ei = (struct exfat_inode_info *)foo;
+
+ INIT_HLIST_NODE(&ei->i_hash_fat);
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int __init exfat_init_inodecache(void)
+{
+ exfat_inode_cachep = kmem_cache_create("exfat_inode_cache",
+ sizeof(struct exfat_inode_info),
+ 0,
+ (SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (!exfat_inode_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit exfat_destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(exfat_inode_cachep);
+}
+
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+static void exfat_debug_kill_sb(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct block_device *bdev = sb->s_bdev;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+
+ long flags;
+
+ if (sbi) {
+ flags = sbi->debug_flags;
+
+ if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) {
+ /*
+ * invalidate_bdev drops all device cache include
+ * dirty. We use this to simulate device removal.
+ */
+ down(&p_fs->v_sem);
+ FAT_release_all(sb);
+ buf_release_all(sb);
+ up(&p_fs->v_sem);
+
+ invalidate_bdev(bdev);
+ }
+ }
+
+ kill_block_super(sb);
+}
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+
+static struct file_system_type exfat_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "exfat",
+ .mount = exfat_fs_mount,
+#ifdef CONFIG_EXFAT_KERNEL_DEBUG
+ .kill_sb = exfat_debug_kill_sb,
+#else
+ .kill_sb = kill_block_super,
+#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int __init init_exfat(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct dos_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct ext_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct file_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct strm_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct name_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct bmap_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct case_dentry_t) != DENTRY_SIZE);
+ BUILD_BUG_ON(sizeof(struct volm_dentry_t) != DENTRY_SIZE);
+
+ pr_info("exFAT: Version %s\n", EXFAT_VERSION);
+
+ err = exfat_init_inodecache();
+ if (err)
+ return err;
+
+ err = register_filesystem(&exfat_fs_type);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void __exit exit_exfat(void)
+{
+ exfat_destroy_inodecache();
+ unregister_filesystem(&exfat_fs_type);
+}
+
+module_init(init_exfat);
+module_exit(exit_exfat);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("exFAT Filesystem Driver");
+MODULE_ALIAS_FS("exfat");
diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c
new file mode 100644
index 000000000000..366082fb3dab
--- /dev/null
+++ b/drivers/staging/exfat/exfat_upcase.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/types.h>
+#include "exfat.h"
+
+const u8 uni_upcase[NUM_UPCASE << 1] = {
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00,
+ 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00,
+ 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00,
+ 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00,
+ 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
+ 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00,
+ 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00,
+ 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00,
+ 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00,
+ 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00,
+ 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00,
+ 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00,
+ 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00,
+ 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00,
+ 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00,
+ 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00,
+ 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
+ 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
+ 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
+ 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00,
+ 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00,
+ 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00,
+ 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00,
+ 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00,
+ 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00,
+ 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00,
+ 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00,
+ 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00,
+ 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00,
+ 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00,
+ 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00,
+ 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00,
+ 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00,
+ 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00,
+ 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00,
+ 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00,
+ 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00,
+ 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00,
+ 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00,
+ 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00,
+ 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00,
+ 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00,
+ 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00,
+ 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00,
+ 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
+ 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
+ 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
+ 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
+ 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00,
+ 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00,
+ 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00,
+ 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00,
+ 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00,
+ 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00,
+ 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00,
+ 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00,
+ 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01,
+ 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01,
+ 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01,
+ 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01,
+ 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01,
+ 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01,
+ 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01,
+ 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01,
+ 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01,
+ 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01,
+ 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01,
+ 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01,
+ 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01,
+ 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01,
+ 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01,
+ 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01,
+ 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01,
+ 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01,
+ 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01,
+ 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01,
+ 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01,
+ 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01,
+ 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01,
+ 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01,
+ 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01,
+ 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01,
+ 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01,
+ 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01,
+ 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01,
+ 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01,
+ 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01,
+ 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01,
+ 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01,
+ 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01,
+ 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01,
+ 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01,
+ 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01,
+ 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01,
+ 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01,
+ 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01,
+ 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01,
+ 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01,
+ 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01,
+ 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01,
+ 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01,
+ 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01,
+ 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01,
+ 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01,
+ 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01,
+ 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01,
+ 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01,
+ 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01,
+ 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01,
+ 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01,
+ 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01,
+ 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01,
+ 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01,
+ 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01,
+ 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01,
+ 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01,
+ 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01,
+ 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01,
+ 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01,
+ 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01,
+ 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02,
+ 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02,
+ 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02,
+ 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02,
+ 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02,
+ 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02,
+ 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02,
+ 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02,
+ 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02,
+ 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02,
+ 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02,
+ 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02,
+ 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02,
+ 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02,
+ 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02,
+ 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02,
+ 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02,
+ 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02,
+ 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02,
+ 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01,
+ 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01,
+ 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01,
+ 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02,
+ 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01,
+ 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02,
+ 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C,
+ 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01,
+ 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02,
+ 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02,
+ 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02,
+ 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02,
+ 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01,
+ 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02,
+ 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01,
+ 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02,
+ 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02,
+ 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02,
+ 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02,
+ 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02,
+ 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02,
+ 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02,
+ 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02,
+ 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02,
+ 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02,
+ 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02,
+ 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02,
+ 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02,
+ 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02,
+ 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02,
+ 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02,
+ 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02,
+ 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02,
+ 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02,
+ 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02,
+ 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02,
+ 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02,
+ 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02,
+ 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02,
+ 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02,
+ 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02,
+ 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02,
+ 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02,
+ 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02,
+ 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03,
+ 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03,
+ 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03,
+ 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03,
+ 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03,
+ 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03,
+ 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03,
+ 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03,
+ 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03,
+ 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03,
+ 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03,
+ 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03,
+ 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03,
+ 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03,
+ 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03,
+ 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03,
+ 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03,
+ 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03,
+ 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03,
+ 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03,
+ 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03,
+ 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03,
+ 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03,
+ 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03,
+ 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03,
+ 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03,
+ 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03,
+ 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03,
+ 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03,
+ 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03,
+ 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03,
+ 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03,
+ 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03,
+ 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03,
+ 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03,
+ 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03,
+ 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
+ 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
+ 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03,
+ 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
+ 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03,
+ 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03,
+ 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03,
+ 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03,
+ 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03,
+ 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03,
+ 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03,
+ 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03,
+ 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03,
+ 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03,
+ 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03,
+ 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03,
+ 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03,
+ 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03,
+ 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03,
+ 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03,
+ 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03,
+ 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03,
+ 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03,
+ 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03,
+ 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
+ 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
+ 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
+ 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
+ 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
+ 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04,
+ 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04,
+ 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04,
+ 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04,
+ 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04,
+ 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04,
+ 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04,
+ 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04,
+ 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04,
+ 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04,
+ 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04,
+ 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04,
+ 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04,
+ 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04,
+ 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04,
+ 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04,
+ 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04,
+ 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04,
+ 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04,
+ 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04,
+ 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04,
+ 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04,
+ 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04,
+ 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04,
+ 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04,
+ 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04,
+ 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04,
+ 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04,
+ 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04,
+ 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04,
+ 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04,
+ 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04,
+ 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04,
+ 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04,
+ 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04,
+ 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04,
+ 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04,
+ 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04,
+ 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04,
+ 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04,
+ 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04,
+ 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04,
+ 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04,
+ 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04,
+ 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04,
+ 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04,
+ 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04,
+ 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04,
+ 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04,
+ 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04,
+ 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04,
+ 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05,
+ 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05,
+ 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05,
+ 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05,
+ 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05,
+ 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05,
+ 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05,
+ 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05,
+ 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05,
+ 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05,
+ 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05,
+ 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05,
+ 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
+ 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
+ 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
+ 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
+ 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
+ 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05,
+ 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05,
+ 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05,
+ 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05,
+ 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05,
+ 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05,
+ 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05,
+ 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05,
+ 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05,
+ 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05,
+ 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05,
+ 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05,
+ 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF,
+ 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D,
+ 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D,
+ 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D,
+ 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D,
+ 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D,
+ 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D,
+ 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D,
+ 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D,
+ 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D,
+ 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D,
+ 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D,
+ 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D,
+ 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D,
+ 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D,
+ 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D,
+ 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D,
+ 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D,
+ 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D,
+ 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D,
+ 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D,
+ 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D,
+ 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D,
+ 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D,
+ 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D,
+ 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D,
+ 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D,
+ 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D,
+ 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D,
+ 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D,
+ 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D,
+ 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D,
+ 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D,
+ 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D,
+ 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E,
+ 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E,
+ 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E,
+ 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E,
+ 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E,
+ 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E,
+ 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E,
+ 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E,
+ 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E,
+ 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E,
+ 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E,
+ 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E,
+ 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E,
+ 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E,
+ 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E,
+ 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E,
+ 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E,
+ 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E,
+ 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E,
+ 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E,
+ 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E,
+ 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E,
+ 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E,
+ 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E,
+ 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E,
+ 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E,
+ 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E,
+ 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E,
+ 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E,
+ 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E,
+ 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E,
+ 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E,
+ 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E,
+ 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E,
+ 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E,
+ 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E,
+ 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E,
+ 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E,
+ 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E,
+ 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E,
+ 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E,
+ 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E,
+ 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E,
+ 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E,
+ 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E,
+ 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E,
+ 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E,
+ 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E,
+ 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E,
+ 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E,
+ 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E,
+ 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E,
+ 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E,
+ 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E,
+ 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E,
+ 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E,
+ 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E,
+ 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E,
+ 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E,
+ 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E,
+ 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E,
+ 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E,
+ 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E,
+ 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
+ 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F,
+ 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F,
+ 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F,
+ 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F,
+ 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F,
+ 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
+ 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F,
+ 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F,
+ 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
+ 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F,
+ 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F,
+ 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F,
+ 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F,
+ 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F,
+ 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F,
+ 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F,
+ 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F,
+ 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F,
+ 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
+ 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F,
+ 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F,
+ 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F,
+ 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
+ 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F,
+ 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
+ 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F,
+ 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F,
+ 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
+ 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F,
+ 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F,
+ 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
+ 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F,
+ 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F,
+ 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F,
+ 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F,
+ 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F,
+ 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F,
+ 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F,
+ 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F,
+ 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F,
+ 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F,
+ 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F,
+ 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F,
+ 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F,
+ 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F,
+ 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F,
+ 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F,
+ 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F,
+ 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F,
+ 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F,
+ 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F,
+ 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F,
+ 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F,
+ 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20,
+ 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20,
+ 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20,
+ 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20,
+ 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20,
+ 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20,
+ 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20,
+ 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20,
+ 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20,
+ 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20,
+ 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20,
+ 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20,
+ 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20,
+ 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20,
+ 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20,
+ 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20,
+ 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20,
+ 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20,
+ 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20,
+ 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20,
+ 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20,
+ 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20,
+ 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20,
+ 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20,
+ 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20,
+ 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20,
+ 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20,
+ 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20,
+ 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20,
+ 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20,
+ 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20,
+ 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20,
+ 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20,
+ 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20,
+ 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20,
+ 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20,
+ 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20,
+ 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20,
+ 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20,
+ 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20,
+ 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20,
+ 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20,
+ 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20,
+ 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20,
+ 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20,
+ 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20,
+ 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20,
+ 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20,
+ 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20,
+ 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20,
+ 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20,
+ 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20,
+ 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20,
+ 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20,
+ 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20,
+ 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20,
+ 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20,
+ 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20,
+ 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20,
+ 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20,
+ 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20,
+ 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20,
+ 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20,
+ 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20,
+ 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21,
+ 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21,
+ 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21,
+ 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21,
+ 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21,
+ 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21,
+ 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21,
+ 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21,
+ 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21,
+ 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21,
+ 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21,
+ 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21,
+ 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21,
+ 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21,
+ 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21,
+ 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21,
+ 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21,
+ 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21,
+ 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21,
+ 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21,
+ 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21,
+ 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21,
+ 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21,
+ 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21,
+ 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
+ 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
+ 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21,
+ 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21,
+ 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21,
+ 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21,
+ 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21,
+ 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24,
+ 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24,
+ 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24,
+ 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24,
+ 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24,
+ 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24,
+ 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24,
+ 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C,
+ 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C,
+ 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C,
+ 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C,
+ 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C,
+ 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C,
+ 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C,
+ 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C,
+ 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C,
+ 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C,
+ 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C,
+ 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C,
+ 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C,
+ 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C,
+ 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C,
+ 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C,
+ 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C,
+ 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C,
+ 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C,
+ 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C,
+ 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C,
+ 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C,
+ 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C,
+ 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C,
+ 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C,
+ 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C,
+ 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C,
+ 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C,
+ 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C,
+ 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C,
+ 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C,
+ 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C,
+ 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C,
+ 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C,
+ 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C,
+ 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C,
+ 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C,
+ 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C,
+ 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C,
+ 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C,
+ 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C,
+ 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C,
+ 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C,
+ 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C,
+ 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C,
+ 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C,
+ 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C,
+ 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C,
+ 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C,
+ 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C,
+ 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C,
+ 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C,
+ 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10,
+ 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10,
+ 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10,
+ 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10,
+ 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10,
+ 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10,
+ 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10,
+ 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10,
+ 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10,
+ 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10,
+ 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF,
+ 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF,
+ 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF,
+ 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF,
+ 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF,
+ 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF,
+ 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF,
+ 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF,
+ 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF,
+ 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF,
+ 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF,
+ 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF,
+ 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF,
+ 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF,
+ 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF,
+ 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF,
+ 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF,
+ 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF,
+ 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF,
+ 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF,
+ 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF,
+ 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF,
+ 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF,
+ 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF,
+ 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF,
+ 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF,
+ 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF,
+ 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF,
+ 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF,
+ 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF,
+ 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF,
+ 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF,
+ 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF,
+ 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF,
+ 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF,
+ 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF,
+ 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF,
+ 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF,
+ 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF,
+ 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF,
+ 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF,
+ 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF,
+ 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF,
+ 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF,
+ 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF,
+ 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF,
+ 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF,
+ 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF
+};
diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
index b6c6d66e4eb1..e2c7646588f8 100644
--- a/drivers/staging/fbtft/fb_bd663474.c
+++ b/drivers/staging/fbtft/fb_bd663474.c
@@ -24,7 +24,7 @@
static int init_display(struct fbtft_par *par)
{
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
par->fbtftops.reset(par);
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index d47dcf31fffb..2fd7b87ea0ce 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -151,7 +151,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x0f, 0x0f, 0x1f, 0x0f, 0x0f, 0x0f, 0x1f, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x03, 0x03, 0x0f, 0x0f, 0x1f, 0x0f, 0x0f,
0x0f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x00, 0x00,
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 3427a858d17c..37eaf0862c5b 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -95,7 +95,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x7f, 0x7f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x0f,
};
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index d609a2b67db9..05648c3ffe47 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -77,7 +77,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
@@ -195,7 +195,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int gamma_adj(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
0x1f, 0x3f, 0x0f, 0x0f, 0x7f, 0x1f,
0x3F, 0x3F, 0x3F, 0x3F, 0x3F};
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index ea6e001288ce..f2e72d14431d 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -214,7 +214,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index b090e7ab6fdd..c9aa4cb43123 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -85,7 +85,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
bt &= 0x07;
@@ -208,7 +208,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
diff --git a/drivers/staging/fbtft/fb_pcd8544.c b/drivers/staging/fbtft/fb_pcd8544.c
index ad49973ad594..08f8a4bb8772 100644
--- a/drivers/staging/fbtft/fb_pcd8544.c
+++ b/drivers/staging/fbtft/fb_pcd8544.c
@@ -157,10 +157,10 @@ static struct fbtft_display display = {
.backlight = 1,
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pdc8544", &display);
+FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pcd8544", &display);
MODULE_ALIAS("spi:" DRVNAME);
-MODULE_ALIAS("spi:pdc8544");
+MODULE_ALIAS("spi:pcd8544");
MODULE_DESCRIPTION("FB driver for the PCD8544 LCD Controller");
MODULE_AUTHOR("Noralf Tronnes");
diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
index b3d0701880fe..8c7de3290343 100644
--- a/drivers/staging/fbtft/fb_s6d1121.c
+++ b/drivers/staging/fbtft/fb_s6d1121.c
@@ -29,7 +29,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
/* Initialization sequence from Lib_UTFT */
@@ -123,7 +123,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x1f, 0x1f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f,
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index bbf75f795234..7a3fe022cc69 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -28,7 +28,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
write_reg(par, 0x00, 0x0001);
@@ -129,7 +129,7 @@ static int set_var(struct fbtft_par *par)
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
- unsigned long mask[] = {
+ static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 4cfe9f8535d0..37622c9462aa 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -81,7 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
va_start(args, len);
*buf = (u8)va_arg(args, unsigned int);
- if (!par->gpio.dc)
+ if (par->gpio.dc)
gpiod_set_value(par->gpio.dc, 0);
ret = par->fbtftops.write(par, par->buf, sizeof(u8));
if (ret < 0) {
@@ -104,7 +104,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
return;
}
}
- if (!par->gpio.dc)
+ if (par->gpio.dc)
gpiod_set_value(par->gpio.dc, 1);
va_end(args);
}
diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
index 564a38e34440..c77832ae5e5b 100644
--- a/drivers/staging/fbtft/fb_upd161704.c
+++ b/drivers/staging/fbtft/fb_upd161704.c
@@ -26,7 +26,7 @@ static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
/* Initialization sequence from Lib_UTFT */
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 2ea814d0dca5..63c65dd67b17 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -135,7 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
- if (!par->gpio.dc)
+ if (par->gpio.dc)
gpiod_set_value(par->gpio.dc, 1);
/* non buffered write */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 7cbc1bdd2d8a..cf5700a2ea66 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
struct gpio_desc **gpiop)
{
struct device *dev = par->info->device;
- struct device_node *node = dev->of_node;
int ret = 0;
- if (of_find_property(node, name, NULL)) {
- *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index,
- GPIOD_OUT_HIGH);
- if (IS_ERR(*gpiop)) {
- ret = PTR_ERR(*gpiop);
- dev_err(dev,
- "Failed to request %s GPIO:%d\n", name, ret);
- return ret;
- }
- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
- __func__, name);
+ *gpiop = devm_gpiod_get_index_optional(dev, name, index,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(*gpiop)) {
+ ret = PTR_ERR(*gpiop);
+ dev_err(dev,
+ "Failed to request %s GPIO: %d\n", name, ret);
+ return ret;
}
+ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+ __func__, name);
return ret;
}
@@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
if (!par->info->device->of_node)
return -EINVAL;
- ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
+ ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
+ ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
+ ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
+ ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
+ ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
+ ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
if (ret)
return ret;
for (i = 0; i < 16; i++) {
- ret = fbtft_request_one_gpio(par, "db-gpios", i,
+ ret = fbtft_request_one_gpio(par, "db", i,
&par->gpio.db[i]);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "led-gpios", i,
+ ret = fbtft_request_one_gpio(par, "led", i,
&par->gpio.led[i]);
if (ret)
return ret;
- ret = fbtft_request_one_gpio(par, "aux-gpios", i,
+ ret = fbtft_request_one_gpio(par, "aux", i,
&par->gpio.aux[i]);
if (ret)
return ret;
@@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par)
if (!par->gpio.reset)
return;
fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
- gpiod_set_value_cansleep(par->gpio.reset, 0);
- usleep_range(20, 40);
gpiod_set_value_cansleep(par->gpio.reset, 1);
+ usleep_range(20, 40);
+ gpiod_set_value_cansleep(par->gpio.reset, 0);
msleep(120);
}
@@ -921,7 +918,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
return -EINVAL;
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
while (p) {
@@ -1012,7 +1009,7 @@ int fbtft_init_display(struct fbtft_par *par)
}
par->fbtftops.reset(par);
- if (!par->gpio.cs)
+ if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
i = 0;
diff --git a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
new file mode 100644
index 000000000000..b1f9474f36d5
--- /dev/null
+++ b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
@@ -0,0 +1,71 @@
+* Arcx Anybus-S controller
+
+This chip communicates with the SoC over a parallel bus. It is
+expected that its Device Tree node is specified as the child of a node
+corresponding to the parallel bus used for communication.
+
+Required properties:
+--------------------
+
+ - compatible : The following chip-specific string:
+ "arcx,anybus-controller"
+
+ - reg : three areas:
+ index 0: bus memory area where the cpld registers are located.
+ index 1: bus memory area of the first host's dual-port ram.
+ index 2: bus memory area of the second host's dual-port ram.
+
+ - reset-gpios : the GPIO pin connected to the reset line of the controller.
+
+ - interrupts : two interrupts:
+ index 0: interrupt connected to the first host
+ index 1: interrupt connected to the second host
+ Generic interrupt client node bindings are described in
+ interrupt-controller/interrupts.txt
+
+Optional: use of subnodes
+-------------------------
+
+The card connected to a host may need additional properties. These can be
+specified in subnodes to the controller node.
+
+The subnodes are identified by the standard 'reg' property. Which information
+exactly can be specified depends on the bindings for the function driver
+for the subnode.
+
+Required controller node properties when using subnodes:
+- #address-cells: should be one.
+- #size-cells: should be zero.
+
+Required subnode properties:
+- reg: Must contain the host index of the card this subnode describes:
+ <0> for the first host on the controller
+ <1> for the second host on the controller
+ Note that only a single card can be plugged into a host, so the host
+ index uniquely describes the card location.
+
+Example of usage:
+-----------------
+
+This example places the bridge on top of the i.MX WEIM parallel bus, see:
+Documentation/devicetree/bindings/bus/imx-weim.txt
+
+&weim {
+ controller@0,0 {
+ compatible = "arcx,anybus-controller";
+ reg = <0 0 0x100>, <0 0x400000 0x800>, <1 0x400000 0x800>;
+ reset-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>, <5 IRQ_TYPE_LEVEL_LOW>;
+ /* fsl,weim-cs-timing is a i.MX WEIM bus specific property */
+ fsl,weim-cs-timing = <0x024400b1 0x00001010 0x20081100
+ 0x00000000 0xa0000240 0x00000000>;
+ /* optional subnode for a card plugged into the first host */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ card@0 {
+ reg = <0>;
+ /* card specific properties go here */
+ };
+ };
+};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/TODO b/drivers/staging/fsl-dpaa2/ethsw/TODO
index 24b5e95a96f8..4d46857b0b2b 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/TODO
+++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
@@ -1,7 +1,6 @@
* Add I/O capabilities on switch port netdevices. This will allow control
traffic to reach the CPU.
* Add ACL to redirect control traffic to CPU.
-* Add support for displaying learned FDB entries
* Add support for multiple FDBs and switch port partitioning
* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
need to be kept in sync with binary interface changes in MC
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
index 14b974defa3a..5e1339daa7c7 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
@@ -10,7 +10,7 @@
/* DPSW Version */
#define DPSW_VER_MAJOR 8
-#define DPSW_VER_MINOR 0
+#define DPSW_VER_MINOR 1
#define DPSW_CMD_BASE_VERSION 1
#define DPSW_CMD_ID_OFFSET 4
@@ -67,6 +67,7 @@
#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
+#define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A)
/* Macros for accessing command fields smaller than 1byte */
#define DPSW_MASK(field) \
@@ -351,6 +352,18 @@ struct dpsw_cmd_fdb_set_learning_mode {
u8 mode;
};
+struct dpsw_cmd_fdb_dump {
+ __le16 fdb_id;
+ __le16 pad0;
+ __le32 pad1;
+ __le64 iova_addr;
+ __le32 iova_size;
+};
+
+struct dpsw_rsp_fdb_dump {
+ __le16 num_entries;
+};
+
struct dpsw_rsp_get_api_version {
__le16 version_major;
__le16 version_minor;
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
index cabed77b445d..56b0fa789a67 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
@@ -981,6 +981,57 @@ int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
}
/**
+ * dpsw_fdb_dump() - Dump the content of FDB table into memory.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @iova_addr: Data will be stored here as an array of struct fdb_dump_entry
+ * @iova_size: Memory size allocated at iova_addr
+ * @num_entries:Number of entries written at iova_addr
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ *
+ * The memory allocated at iova_addr must be initialized with zero before
+ * command execution. If the FDB table does not fit into memory MC will stop
+ * after the memory is filled up.
+ * The struct fdb_dump_entry array must be parsed until the end of memory
+ * area or until an entry with mac_addr set to zero is found.
+ */
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ u64 iova_addr,
+ u32 iova_size,
+ u16 *num_entries)
+{
+ struct dpsw_cmd_fdb_dump *cmd_params;
+ struct dpsw_rsp_fdb_dump *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_DUMP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_dump *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->iova_addr = cpu_to_le64(iova_addr);
+ cmd_params->iova_size = cpu_to_le32(iova_size);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_dump *)cmd.params;
+ *num_entries = le16_to_cpu(rsp_params->num_entries);
+
+ return 0;
+}
+
+/**
* dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
index 25635259ce44..25b45850925c 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -75,37 +75,6 @@ enum dpsw_component_type {
DPSW_COMPONENT_TYPE_S_VLAN
};
-/**
- * struct dpsw_cfg - DPSW configuration
- * @num_ifs: Number of external and internal interfaces
- * @adv: Advanced parameters; default is all zeros;
- * use this structure to change default settings
- * @adv.options: Enable/Disable DPSW features (bitmap)
- * @adv.max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
- * @adv.max_meters_per_if: Number of meters per interface
- * @adv.max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
- * @adv.max_fdb_entries: Number of FDB entries for default FDB table;
- * 0 - indicates default 1024 entries.
- * @adv.fdb_aging_time: Default FDB aging time for default FDB table;
- * 0 - indicates default 300 seconds
- * @adv.max_fdb_mc_groups: Number of multicast groups in each FDB table;
- * 0 - indicates default 32
- * @adv.component_type: Indicates the component type of this bridge
- */
-struct dpsw_cfg {
- u16 num_ifs;
- struct {
- u64 options;
- u16 max_vlans;
- u8 max_meters_per_if;
- u8 max_fdbs;
- u16 max_fdb_entries;
- u16 fdb_aging_time;
- u16 max_fdb_mc_groups;
- enum dpsw_component_type component_type;
- } adv;
-};
-
int dpsw_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
@@ -496,6 +465,31 @@ int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
u16 fdb_id,
const struct dpsw_fdb_unicast_cfg *cfg);
+#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
+#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
+
+/**
+ * struct fdb_dump_entry - fdb snapshot entry
+ * @mac_addr: MAC address
+ * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
+ * @if_info: unicast - egress interface, multicast - number of egress interfaces
+ * @if_mask: multicast - egress interface mask
+ */
+struct fdb_dump_entry {
+ u8 mac_addr[6];
+ u8 type;
+ u8 if_info;
+ u8 if_mask[8];
+};
+
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ u64 iova_addr,
+ u32 iova_size,
+ u16 *num_entries);
+
/**
* struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
* @type: Select static or dynamic entry
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
index 926a0c053e18..4f0bff86e43e 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
@@ -65,7 +65,7 @@ ethsw_get_link_ksettings(struct net_device *netdev,
port_priv->idx,
&state);
if (err) {
- netdev_err(netdev, "ERROR %d getting link state", err);
+ netdev_err(netdev, "ERROR %d getting link state\n", err);
goto out;
}
@@ -88,18 +88,21 @@ ethsw_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *link_ksettings)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpsw_link_cfg cfg = {0};
- int err = 0;
-
- netdev_dbg(netdev, "Setting link parameters...");
-
- /* Due to a temporary MC limitation, the DPSW port must be down
- * in order to be able to change link settings. Taking steps to let
- * the user know that.
- */
- if (netif_running(netdev)) {
- netdev_info(netdev, "Sorry, interface must be brought down first.\n");
- return -EACCES;
+ bool if_running;
+ int err = 0, ret;
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+ if (if_running) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
}
cfg.rate = link_ksettings->base.speed;
@@ -116,12 +119,16 @@ ethsw_set_link_ksettings(struct net_device *netdev,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx,
&cfg);
- if (err)
- /* ethtool will be loud enough if we return an error; no point
- * in putting our own error message on the console by default
- */
- netdev_dbg(netdev, "ERROR %d setting link cfg", err);
+ if (if_running) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
return err;
}
@@ -156,9 +163,6 @@ static void ethsw_ethtool_get_stats(struct net_device *netdev,
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int i, err;
- memset(data, 0,
- sizeof(u64) * ETHSW_NUM_COUNTERS);
-
for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index f73edaf6ce87..14a9eebf687e 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -22,7 +22,7 @@ static struct workqueue_struct *ethsw_owq;
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
-#define DPSW_MIN_VER_MINOR 0
+#define DPSW_MIN_VER_MINOR 1
#define DEFAULT_VLAN_ID 1
@@ -34,11 +34,6 @@ static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
.fdb_id = 0,
};
- if (ethsw->vlans[vid]) {
- dev_err(ethsw->dev, "VLAN already configured\n");
- return -EEXIST;
- }
-
err = dpsw_vlan_add(ethsw->mc_io, 0,
ethsw->dpsw_handle, vid, &vcfg);
if (err) {
@@ -149,12 +144,12 @@ static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
return 0;
}
-static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
+static int ethsw_set_learning(struct ethsw_core *ethsw, bool enable)
{
enum dpsw_fdb_learning_mode learn_mode;
int err;
- if (flag)
+ if (enable)
learn_mode = DPSW_FDB_LEARNING_MODE_HW;
else
learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
@@ -165,24 +160,24 @@ static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
return err;
}
- ethsw->learning = !!flag;
+ ethsw->learning = enable;
return 0;
}
-static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
+static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
{
int err;
err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
- port_priv->idx, flag);
+ port_priv->idx, enable);
if (err) {
netdev_err(port_priv->netdev,
"dpsw_if_set_flooding err %d\n", err);
return err;
}
- port_priv->flood = !!flag;
+ port_priv->flood = enable;
return 0;
}
@@ -316,6 +311,31 @@ static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
return err;
}
+static int port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ if (is_unicast_ether_addr(addr))
+ return ethsw_port_fdb_add_uc(netdev_priv(dev),
+ addr);
+ else
+ return ethsw_port_fdb_add_mc(netdev_priv(dev),
+ addr);
+}
+
+static int port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ if (is_unicast_ether_addr(addr))
+ return ethsw_port_fdb_del_uc(netdev_priv(dev),
+ addr);
+ else
+ return ethsw_port_fdb_del_mc(netdev_priv(dev),
+ addr);
+}
+
static void port_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -516,17 +536,165 @@ static int swdev_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->idx);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct ethsw_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int ethsw_fdb_do_dump(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
+{
+ int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
+{
+ int idx = port_priv->idx;
+ int valid;
+
+ if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ valid = entry->if_info == port_priv->idx;
+ else
+ valid = entry->if_mask[idx / 8] & BIT(idx % 8);
+
+ return valid;
+}
+
+static int port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct device *dev = net_dev->dev.parent;
+ struct fdb_dump_entry *fdb_entries;
+ struct fdb_dump_entry fdb_entry;
+ struct ethsw_dump_ctx dump = {
+ .dev = net_dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ dma_addr_t fdb_dump_iova;
+ u16 num_fdb_entries;
+ u32 fdb_dump_size;
+ int err = 0, i;
+ u8 *dma_mem;
+
+ fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
+ dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, fdb_dump_iova)) {
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto err_map;
+ }
+
+ err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
+ if (err) {
+ netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
+ goto err_dump;
+ }
+
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
+
+ fdb_entries = (struct fdb_dump_entry *)dma_mem;
+ for (i = 0; i < num_fdb_entries; i++) {
+ fdb_entry = fdb_entries[i];
+
+ if (!port_fdb_valid_entry(&fdb_entry, port_priv))
+ continue;
+
+ err = ethsw_fdb_do_dump(&fdb_entry, &dump);
+ if (err)
+ goto end;
+ }
+
+end:
+ *idx = dump.idx;
+
+ kfree(dma_mem);
+
+ return 0;
+
+err_dump:
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
+err_map:
+ kfree(dma_mem);
+ return err;
+}
+
static const struct net_device_ops ethsw_port_ops = {
.ndo_open = port_open,
.ndo_stop = port_stop,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = port_get_stats,
.ndo_change_mtu = port_change_mtu,
.ndo_has_offload_stats = port_has_offload_stats,
.ndo_get_offload_stats = port_get_offload_stats,
+ .ndo_fdb_add = port_fdb_add,
+ .ndo_fdb_del = port_fdb_del,
+ .ndo_fdb_dump = port_fdb_dump,
.ndo_start_xmit = port_dropframe,
.ndo_get_port_parent_id = swdev_get_port_parent_id,
+ .ndo_get_phys_port_name = port_get_phys_name,
};
static void ethsw_links_state_update(struct ethsw_core *ethsw)
@@ -549,12 +717,12 @@ static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
if (err) {
- dev_err(dev, "Can't get irq status (err %d)", err);
+ dev_err(dev, "Can't get irq status (err %d)\n", err);
err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
if (err)
- dev_err(dev, "Can't clear irq status (err %d)", err);
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
goto out;
}
@@ -599,21 +767,21 @@ static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
if (err) {
- dev_err(dev, "devm_request_threaded_irq(): %d", err);
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
goto free_irq;
}
err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, mask);
if (err) {
- dev_err(dev, "dpsw_set_irq_mask(): %d", err);
+ dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
goto free_devm_irq;
}
err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, 1);
if (err) {
- dev_err(dev, "dpsw_set_irq_enable(): %d", err);
+ dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
goto free_devm_irq;
}
@@ -673,11 +841,12 @@ static int port_attr_br_flags_set(struct net_device *netdev,
return 0;
/* Learning is enabled per switch */
- err = ethsw_set_learning(port_priv->ethsw_data, flags & BR_LEARNING);
+ err = ethsw_set_learning(port_priv->ethsw_data,
+ !!(flags & BR_LEARNING));
if (err)
goto exit;
- err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
+ err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
exit:
return err;
@@ -950,8 +1119,7 @@ static int port_bridge_join(struct net_device *netdev,
if (ethsw->ports[i]->bridge_dev &&
(ethsw->ports[i]->bridge_dev != upper_dev)) {
netdev_err(netdev,
- "Another switch port is connected to %s\n",
- ethsw->ports[i]->bridge_dev->name);
+ "Only one bridge supported per DPSW object!\n");
return -EINVAL;
}
@@ -1023,18 +1191,30 @@ static void ethsw_switchdev_event_work(struct work_struct *work)
container_of(work, struct ethsw_switchdev_event_work, work);
struct net_device *dev = switchdev_work->dev;
struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
rtnl_lock();
fdb_info = &switchdev_work->fdb_info;
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
if (is_unicast_ether_addr(fdb_info->addr))
- ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
+ err = ethsw_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
else
- ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
+ err = ethsw_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
+ if (err)
+ break;
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
if (is_unicast_ether_addr(fdb_info->addr))
ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
else
@@ -1177,48 +1357,6 @@ err_switchdev_nb:
return err;
}
-static int ethsw_open(struct ethsw_core *ethsw)
-{
- struct ethsw_port_priv *port_priv = NULL;
- int i, err;
-
- err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err) {
- dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
- return err;
- }
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- port_priv = ethsw->ports[i];
- err = dev_open(port_priv->netdev, NULL);
- if (err) {
- netdev_err(port_priv->netdev, "dev_open err %d\n", err);
- return err;
- }
- }
-
- return 0;
-}
-
-static int ethsw_stop(struct ethsw_core *ethsw)
-{
- struct ethsw_port_priv *port_priv = NULL;
- int i, err;
-
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- port_priv = ethsw->ports[i];
- dev_close(port_priv->netdev);
- }
-
- err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
- if (err) {
- dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
- return err;
- }
-
- return 0;
-}
-
static int ethsw_init(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
@@ -1320,7 +1458,6 @@ err_close:
static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
{
- const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpsw_vlan_if_cfg vcfg;
@@ -1346,12 +1483,10 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
DEFAULT_VLAN_ID, &vcfg);
- if (err) {
+ if (err)
netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
- return err;
- }
- return ethsw_port_fdb_add_mc(port_priv, def_mcast);
+ return err;
}
static void ethsw_unregister_notifier(struct device *dev)
@@ -1403,9 +1538,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
destroy_workqueue(ethsw_owq);
- rtnl_lock();
- ethsw_stop(ethsw);
- rtnl_unlock();
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
port_priv = ethsw->ports[i];
@@ -1455,16 +1588,24 @@ static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
port_netdev->min_mtu = ETH_MIN_MTU;
port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+ err = ethsw_port_init(port_priv, port_idx);
+ if (err)
+ goto err_port_probe;
+
err = register_netdev(port_netdev);
if (err < 0) {
dev_err(dev, "register_netdev error %d\n", err);
- free_netdev(port_netdev);
- return err;
+ goto err_port_probe;
}
ethsw->ports[port_idx] = port_priv;
- return ethsw_port_init(port_priv, port_idx);
+ return 0;
+
+err_port_probe:
+ free_netdev(port_netdev);
+
+ return err;
}
static int ethsw_probe(struct fsl_mc_device *sw_dev)
@@ -1482,7 +1623,8 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
ethsw->dev = dev;
dev_set_drvdata(dev, ethsw);
- err = fsl_mc_portal_allocate(sw_dev, 0, &ethsw->mc_io);
+ err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &ethsw->mc_io);
if (err) {
if (err == -ENXIO)
err = -EPROBE_DEFER;
@@ -1514,12 +1656,11 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
goto err_free_ports;
}
- /* Switch starts up enabled */
- rtnl_lock();
- err = ethsw_open(ethsw);
- rtnl_unlock();
- if (err)
+ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
goto err_free_ports;
+ }
/* Setup IRQs */
err = ethsw_setup_irqs(sw_dev);
@@ -1530,9 +1671,7 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
return 0;
err_stop:
- rtnl_lock();
- ethsw_stop(ethsw);
- rtnl_unlock();
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
err_free_ports:
/* Cleanup registered ports only */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index c48783680a05..3ea8a0ad8c10 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -23,9 +23,13 @@
/* Number of IRQs supported */
#define DPSW_IRQ_NUM 2
+/* Port is member of VLAN */
#define ETHSW_VLAN_MEMBER 1
+/* VLAN to be treated as untagged on egress */
#define ETHSW_VLAN_UNTAGGED 2
+/* Untagged frames will be assigned to this VLAN */
#define ETHSW_VLAN_PVID 4
+/* VLAN configured on the switch */
#define ETHSW_VLAN_GLOBAL 8
/* Maximum Frame Length supported by HW (currently 10k) */
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 2be45ee9d061..46199c8ca441 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -509,6 +509,8 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
struct gasket_dev *gasket_dev;
struct gasket_sysfs_attribute *gasket_attr;
enum sysfs_attribute_type type;
+ struct gasket_page_table *gpt;
+ uint val;
gasket_dev = gasket_sysfs_get_device_data(device);
if (!gasket_dev) {
@@ -524,29 +526,25 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
}
type = (enum sysfs_attribute_type)gasket_attr->data.attr_type;
+ gpt = gasket_dev->page_table[0];
switch (type) {
case ATTR_KERNEL_HIB_PAGE_TABLE_SIZE:
- ret = scnprintf(buf, PAGE_SIZE, "%u\n",
- gasket_page_table_num_entries(
- gasket_dev->page_table[0]));
+ val = gasket_page_table_num_entries(gpt);
break;
case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
- ret = scnprintf(buf, PAGE_SIZE, "%u\n",
- gasket_page_table_num_entries(
- gasket_dev->page_table[0]));
+ val = gasket_page_table_num_simple_entries(gpt);
break;
case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
- ret = scnprintf(buf, PAGE_SIZE, "%u\n",
- gasket_page_table_num_active_pages(
- gasket_dev->page_table[0]));
+ val = gasket_page_table_num_active_pages(gpt);
break;
default:
dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
attr->attr.name);
ret = 0;
- break;
+ goto exit;
}
-
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n", val);
+exit:
gasket_sysfs_put_attr(device, gasket_attr);
gasket_sysfs_put_device_data(device, gasket_dev);
return ret;
@@ -659,7 +657,7 @@ static void apex_pci_remove(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
}
-static struct gasket_driver_desc apex_desc = {
+static const struct gasket_driver_desc apex_desc = {
.name = "apex",
.driver_version = APEX_DRIVER_VERSION,
.major = 120,
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index 7ecfba4f2b06..240f9bb10b71 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -39,8 +39,7 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
}
/* Read the size of the page table. */
-static int gasket_read_page_table_size(
- struct gasket_dev *gasket_dev,
+static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
@@ -66,8 +65,7 @@ static int gasket_read_page_table_size(
}
/* Read the size of the simple page table. */
-static int gasket_read_simple_page_table_size(
- struct gasket_dev *gasket_dev,
+static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
@@ -93,8 +91,7 @@ static int gasket_read_simple_page_table_size(
}
/* Set the boundary between the simple and extended page tables. */
-static int gasket_partition_page_table(
- struct gasket_dev *gasket_dev,
+static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret;
@@ -185,8 +182,7 @@ static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
* Reserve structures for coherent allocation, and allocate or free the
* corresponding memory.
*/
-static int gasket_config_coherent_allocator(
- struct gasket_dev *gasket_dev,
+static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
struct gasket_coherent_alloc_config_ioctl __user *argp)
{
int ret;
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index 24a738238f9f..0c65a0121dde 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -302,10 +302,8 @@ static int goldfish_audio_probe(struct platform_device *pdev)
return -ENOMEM;
data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
+ if (data->irq < 0)
return -ENODEV;
- }
data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
COMBINED_BUFFER_SIZE,
&buf_addr, GFP_KERNEL);
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
index 806e75b7f405..3d2c6f88a138 100644
--- a/drivers/staging/greybus/Documentation/firmware/authenticate.c
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -2,54 +2,8 @@
/*
* Sample code to test CAP protocol
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
index 31d9c23e2eeb..765d69faa9cc 100644
--- a/drivers/staging/greybus/Documentation/firmware/firmware.c
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -2,54 +2,8 @@
/*
* Sample code to test firmware-management protocol
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index 4894c3514955..d4777f5a8b90 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -1,33 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-menuconfig GREYBUS
- tristate "Greybus support"
- depends on SYSFS
- ---help---
- This option enables the Greybus driver core. Greybus is an
- hardware protocol that was designed to provide Unipro with a
- sane application layer. It was originally designed for the
- ARA project, a module phone system, but has shown up in other
- phones, and can be tunneled over other busses in order to
- control hardware devices.
-
- Say Y here to enable support for these types of drivers.
-
- To compile this code as a module, chose M here: the module
- will be called greybus.ko
-
if GREYBUS
-config GREYBUS_ES2
- tristate "Greybus ES3 USB host controller"
- depends on USB
- ---help---
- Select this option if you have a Toshiba ES3 USB device that
- acts as a Greybus "host controller". This device is a bridge
- from a USB device to a Unipro network.
-
- To compile this code as a module, chose M here: the module
- will be called gb-es2.ko
-
config GREYBUS_AUDIO
tristate "Greybus Audio Class driver"
depends on SOUND
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
index 2551ed16b742..627e44f2a983 100644
--- a/drivers/staging/greybus/Makefile
+++ b/drivers/staging/greybus/Makefile
@@ -1,29 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-# Greybus core
-greybus-y := core.o \
- debugfs.o \
- hd.o \
- manifest.o \
- module.o \
- interface.o \
- bundle.o \
- connection.o \
- control.o \
- svc.o \
- svc_watchdog.o \
- operation.o
-
-obj-$(CONFIG_GREYBUS) += greybus.o
-
# needed for trace events
ccflags-y += -I$(src)
-
-# Greybus Host controller drivers
-gb-es2-y := es2.o
-
-obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
-
# Greybus class drivers
gb-bootrom-y := bootrom.o
gb-camera-y := camera.o
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 6eb842040c22..eebf0deb39f5 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -19,8 +19,8 @@
#include <linux/irq.h>
#include <linux/suspend.h>
#include <linux/time.h>
+#include <linux/greybus.h>
#include "arche_platform.h"
-#include "greybus.h"
#if IS_ENABLED(CONFIG_USB_HSIC_USB3613)
#include <linux/usb/usb3613.h>
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
deleted file mode 100644
index 3dab6375909c..000000000000
--- a/drivers/staging/greybus/arpc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __ARPC_H
-#define __ARPC_H
-
-/* APBridgeA RPC (ARPC) */
-
-enum arpc_result {
- ARPC_SUCCESS = 0x00,
- ARPC_NO_MEMORY = 0x01,
- ARPC_INVALID = 0x02,
- ARPC_TIMEOUT = 0x03,
- ARPC_UNKNOWN_ERROR = 0xff,
-};
-
-struct arpc_request_message {
- __le16 id; /* RPC unique id */
- __le16 size; /* Size in bytes of header + payload */
- __u8 type; /* RPC type */
- __u8 data[0]; /* ARPC data */
-} __packed;
-
-struct arpc_response_message {
- __le16 id; /* RPC unique id */
- __u8 result; /* Result of RPC */
-} __packed;
-
-/* ARPC requests */
-#define ARPC_TYPE_CPORT_CONNECTED 0x01
-#define ARPC_TYPE_CPORT_QUIESCE 0x02
-#define ARPC_TYPE_CPORT_CLEAR 0x03
-#define ARPC_TYPE_CPORT_FLUSH 0x04
-#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
-
-struct arpc_cport_connected_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_quiesce_req {
- __le16 cport_id;
- __le16 peer_space;
- __le16 timeout;
-} __packed;
-
-struct arpc_cport_clear_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_flush_req {
- __le16 cport_id;
-} __packed;
-
-struct arpc_cport_shutdown_req {
- __le16 cport_id;
- __le16 timeout;
- __u8 phase;
-} __packed;
-
-#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
index 7ebb1bde5cb7..26117e390deb 100644
--- a/drivers/staging/greybus/audio_apbridgea.c
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -5,8 +5,7 @@
* Copyright 2015-2016 Google Inc.
*/
-#include "greybus.h"
-#include "greybus_protocols.h"
+#include <linux/greybus.h>
#include "audio_apbridgea.h"
#include "audio_codec.h"
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index 330fc7a397eb..3f1f4dd2c61a 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -1,30 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/**
+/*
* Copyright (c) 2015-2016 Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This is a special protocol for configuring communication over the
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index 9ba09ea9c2fc..cb5d271da1a5 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -8,12 +8,10 @@
#ifndef __LINUX_GBAUDIO_CODEC_H
#define __LINUX_GBAUDIO_CODEC_H
+#include <linux/greybus.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "greybus.h"
-#include "greybus_protocols.h"
-
#define NAME_SIZE 32
#define MAX_DAIS 2 /* APB1, APB2 */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
index 8894f1c87d48..9d8994fdb41a 100644
--- a/drivers/staging/greybus/audio_gb.c
+++ b/drivers/staging/greybus/audio_gb.c
@@ -5,9 +5,7 @@
* Copyright 2015-2016 Google Inc.
*/
-#include "greybus.h"
-#include "greybus_protocols.h"
-#include "operation.h"
+#include <linux/greybus.h>
#include "audio_codec.h"
/* TODO: Split into separate calls */
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index c2a4af4c1d06..9b19ea9d3fa1 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
void gb_audio_manager_remove_all(void)
{
struct gb_audio_manager_module *module, *next;
- int is_empty = 1;
+ int is_empty;
down_write(&modules_rwsem);
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index a5d7c53df987..297e69f011c7 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -6,8 +6,7 @@
* Copyright 2016 Linaro Ltd.
*/
-#include "greybus.h"
-
+#include <linux/greybus.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 402e6360834f..a8efb86de140 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -10,8 +10,8 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "firmware.h"
/* Timeout, in jiffies, within which the next request must be received */
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
deleted file mode 100644
index 8734d2055657..000000000000
--- a/drivers/staging/greybus/bundle.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus bundles
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __BUNDLE_H
-#define __BUNDLE_H
-
-#include <linux/list.h>
-
-#define BUNDLE_ID_NONE U8_MAX
-
-/* Greybus "public" definitions" */
-struct gb_bundle {
- struct device dev;
- struct gb_interface *intf;
-
- u8 id;
- u8 class;
- u8 class_major;
- u8 class_minor;
-
- size_t num_cports;
- struct greybus_descriptor_cport *cport_desc;
-
- struct list_head connections;
- u8 *state;
-
- struct list_head links; /* interface->bundles */
-};
-#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
-
-/* Greybus "private" definitions" */
-struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
- u8 class);
-int gb_bundle_add(struct gb_bundle *bundle);
-void gb_bundle_destroy(struct gb_bundle *bundle);
-
-/* Bundle Runtime PM wrappers */
-#ifdef CONFIG_PM
-static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
-{
- int retval;
-
- retval = pm_runtime_get_sync(&bundle->dev);
- if (retval < 0) {
- dev_err(&bundle->dev,
- "pm_runtime_get_sync failed: %d\n", retval);
- pm_runtime_put_noidle(&bundle->dev);
- return retval;
- }
-
- return 0;
-}
-
-static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
-{
- int retval;
-
- pm_runtime_mark_last_busy(&bundle->dev);
- retval = pm_runtime_put_autosuspend(&bundle->dev);
-
- return retval;
-}
-
-static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
-{
- pm_runtime_get_noresume(&bundle->dev);
-}
-
-static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
-{
- pm_runtime_put_noidle(&bundle->dev);
-}
-
-#else
-static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
-{ return 0; }
-static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
-{ return 0; }
-
-static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
-static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
-#endif
-
-#endif /* __BUNDLE_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 615c8e7fd51e..b570e13394ac 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -14,9 +14,9 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <linux/greybus.h>
#include "gb-camera.h"
-#include "greybus.h"
#include "greybus_protocols.h"
enum gb_camera_debugs_buffer_id {
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
deleted file mode 100644
index 5ca3befc0636..000000000000
--- a/drivers/staging/greybus/connection.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus connections
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __CONNECTION_H
-#define __CONNECTION_H
-
-#include <linux/list.h>
-#include <linux/kfifo.h>
-
-#define GB_CONNECTION_FLAG_CSD BIT(0)
-#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
-#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
-#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
-#define GB_CONNECTION_FLAG_CONTROL BIT(4)
-#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
-
-#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
-
-enum gb_connection_state {
- GB_CONNECTION_STATE_DISABLED = 0,
- GB_CONNECTION_STATE_ENABLED_TX = 1,
- GB_CONNECTION_STATE_ENABLED = 2,
- GB_CONNECTION_STATE_DISCONNECTING = 3,
-};
-
-struct gb_operation;
-
-typedef int (*gb_request_handler_t)(struct gb_operation *);
-
-struct gb_connection {
- struct gb_host_device *hd;
- struct gb_interface *intf;
- struct gb_bundle *bundle;
- struct kref kref;
- u16 hd_cport_id;
- u16 intf_cport_id;
-
- struct list_head hd_links;
- struct list_head bundle_links;
-
- gb_request_handler_t handler;
- unsigned long flags;
-
- struct mutex mutex;
- spinlock_t lock;
- enum gb_connection_state state;
- struct list_head operations;
-
- char name[16];
- struct workqueue_struct *wq;
-
- atomic_t op_cycle;
-
- void *private;
-
- bool mode_switch;
-};
-
-struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
- u16 hd_cport_id, gb_request_handler_t handler);
-struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
-struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
- u16 cport_id, gb_request_handler_t handler);
-struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
- u16 cport_id, gb_request_handler_t handler,
- unsigned long flags);
-struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
- u16 cport_id, unsigned long flags);
-void gb_connection_destroy(struct gb_connection *connection);
-
-static inline bool gb_connection_is_static(struct gb_connection *connection)
-{
- return !connection->intf;
-}
-
-int gb_connection_enable(struct gb_connection *connection);
-int gb_connection_enable_tx(struct gb_connection *connection);
-void gb_connection_disable_rx(struct gb_connection *connection);
-void gb_connection_disable(struct gb_connection *connection);
-void gb_connection_disable_forced(struct gb_connection *connection);
-
-void gb_connection_mode_switch_prepare(struct gb_connection *connection);
-void gb_connection_mode_switch_complete(struct gb_connection *connection);
-
-void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
- u8 *data, size_t length);
-
-void gb_connection_latency_tag_enable(struct gb_connection *connection);
-void gb_connection_latency_tag_disable(struct gb_connection *connection);
-
-static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
-{
- return !(connection->flags & GB_CONNECTION_FLAG_CSD);
-}
-
-static inline bool
-gb_connection_flow_control_disabled(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
-}
-
-static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
-}
-
-static inline bool gb_connection_is_control(struct gb_connection *connection)
-{
- return connection->flags & GB_CONNECTION_FLAG_CONTROL;
-}
-
-static inline void *gb_connection_get_data(struct gb_connection *connection)
-{
- return connection->private;
-}
-
-static inline void gb_connection_set_data(struct gb_connection *connection,
- void *data)
-{
- connection->private = data;
-}
-
-#endif /* __CONNECTION_H */
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
deleted file mode 100644
index 3a29ec05f631..000000000000
--- a/drivers/staging/greybus/control.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus CPort control protocol
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __CONTROL_H
-#define __CONTROL_H
-
-struct gb_control {
- struct device dev;
- struct gb_interface *intf;
-
- struct gb_connection *connection;
-
- u8 protocol_major;
- u8 protocol_minor;
-
- bool has_bundle_activate;
- bool has_bundle_version;
-
- char *vendor_string;
- char *product_string;
-};
-#define to_gb_control(d) container_of(d, struct gb_control, dev)
-
-struct gb_control *gb_control_create(struct gb_interface *intf);
-int gb_control_enable(struct gb_control *control);
-void gb_control_disable(struct gb_control *control);
-int gb_control_suspend(struct gb_control *control);
-int gb_control_resume(struct gb_control *control);
-int gb_control_add(struct gb_control *control);
-void gb_control_del(struct gb_control *control);
-struct gb_control *gb_control_get(struct gb_control *control);
-void gb_control_put(struct gb_control *control);
-
-int gb_control_get_bundle_versions(struct gb_control *control);
-int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
-int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
-int gb_control_disconnecting_operation(struct gb_control *control,
- u16 cport_id);
-int gb_control_mode_switch_operation(struct gb_control *control);
-void gb_control_mode_switch_prepare(struct gb_control *control);
-void gb_control_mode_switch_complete(struct gb_control *control);
-int gb_control_get_manifest_size_operation(struct gb_interface *intf);
-int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
- size_t size);
-int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
-int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
-int gb_control_interface_suspend_prepare(struct gb_control *control);
-int gb_control_interface_deactivate_prepare(struct gb_control *control);
-int gb_control_interface_hibernate_abort(struct gb_control *control);
-#endif /* __CONTROL_H */
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
index 946221307ef6..5d2564462ffc 100644
--- a/drivers/staging/greybus/firmware.h
+++ b/drivers/staging/greybus/firmware.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Firmware Management Header
*
@@ -9,7 +9,7 @@
#ifndef __FIRMWARE_H
#define __FIRMWARE_H
-#include "greybus.h"
+#include <linux/greybus.h>
#define FW_NAME_PREFIX "gmp_"
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
index 388866d92f5b..57bebf24636b 100644
--- a/drivers/staging/greybus/fw-core.c
+++ b/drivers/staging/greybus/fw-core.c
@@ -8,8 +8,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/firmware.h>
+#include <linux/greybus.h>
#include "firmware.h"
-#include "greybus.h"
#include "spilib.h"
struct gb_fw_core {
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index d3b7cccbc10d..543692c567f9 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -10,8 +10,8 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
#include "firmware.h"
-#include "greybus.h"
/* Estimated minimum buffer size, actual size can be smaller than this */
#define MIN_FETCH_SIZE 512
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 71aec14f8181..687c6405c65b 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -13,10 +13,10 @@
#include <linux/idr.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
+#include <linux/greybus.h>
#include "firmware.h"
#include "greybus_firmware.h"
-#include "greybus.h"
#define FW_MGMT_TIMEOUT_MS 1000
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index ee293e461fc3..5fc469101fc1 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Camera protocol driver.
*
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 6cb85c3d3572..9fc5c47be9bd 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -13,8 +13,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#define GB_GBPHY_AUTOSUSPEND_MS 3000
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
index 99463489d7d6..087928a586fb 100644
--- a/drivers/staging/greybus/gbphy.h
+++ b/drivers/staging/greybus/gbphy.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus Bridged-Phy Bus driver
*
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 3151004d26fb..1ff34abd5692 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -13,8 +13,8 @@
#include <linux/irqdomain.h>
#include <linux/gpio/driver.h>
#include <linux/mutex.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_gpio_line {
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
deleted file mode 100644
index d03ddb7c9df0..000000000000
--- a/drivers/staging/greybus/greybus.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus driver and device API
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#ifndef __LINUX_GREYBUS_H
-#define __LINUX_GREYBUS_H
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/idr.h>
-
-#include "greybus_id.h"
-#include "greybus_manifest.h"
-#include "greybus_protocols.h"
-#include "manifest.h"
-#include "hd.h"
-#include "svc.h"
-#include "control.h"
-#include "module.h"
-#include "interface.h"
-#include "bundle.h"
-#include "connection.h"
-#include "operation.h"
-
-/* Matches up with the Greybus Protocol specification document */
-#define GREYBUS_VERSION_MAJOR 0x00
-#define GREYBUS_VERSION_MINOR 0x01
-
-#define GREYBUS_ID_MATCH_DEVICE \
- (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
-
-#define GREYBUS_DEVICE(v, p) \
- .match_flags = GREYBUS_ID_MATCH_DEVICE, \
- .vendor = (v), \
- .product = (p),
-
-#define GREYBUS_DEVICE_CLASS(c) \
- .match_flags = GREYBUS_ID_MATCH_CLASS, \
- .class = (c),
-
-/* Maximum number of CPorts */
-#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
-#define CPORT_ID_BAD U16_MAX
-
-struct greybus_driver {
- const char *name;
-
- int (*probe)(struct gb_bundle *bundle,
- const struct greybus_bundle_id *id);
- void (*disconnect)(struct gb_bundle *bundle);
-
- const struct greybus_bundle_id *id_table;
-
- struct device_driver driver;
-};
-#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
-
-static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
-{
- dev_set_drvdata(&bundle->dev, data);
-}
-
-static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
-{
- return dev_get_drvdata(&bundle->dev);
-}
-
-/* Don't call these directly, use the module_greybus_driver() macro instead */
-int greybus_register_driver(struct greybus_driver *driver,
- struct module *module, const char *mod_name);
-void greybus_deregister_driver(struct greybus_driver *driver);
-
-/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
-#define greybus_register(driver) \
- greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
-#define greybus_deregister(driver) \
- greybus_deregister_driver(driver)
-
-/**
- * module_greybus_driver() - Helper macro for registering a Greybus driver
- * @__greybus_driver: greybus_driver structure
- *
- * Helper macro for Greybus drivers to set up proper module init / exit
- * functions. Replaces module_init() and module_exit() and keeps people from
- * printing pointless things to the kernel log when their driver is loaded.
- */
-#define module_greybus_driver(__greybus_driver) \
- module_driver(__greybus_driver, greybus_register, greybus_deregister)
-
-int greybus_disabled(void);
-
-void gb_debugfs_init(void);
-void gb_debugfs_cleanup(void);
-struct dentry *gb_debugfs_get(void);
-
-extern struct bus_type greybus_bus_type;
-
-extern struct device_type greybus_hd_type;
-extern struct device_type greybus_module_type;
-extern struct device_type greybus_interface_type;
-extern struct device_type greybus_control_type;
-extern struct device_type greybus_bundle_type;
-extern struct device_type greybus_svc_type;
-
-static inline int is_gb_host_device(const struct device *dev)
-{
- return dev->type == &greybus_hd_type;
-}
-
-static inline int is_gb_module(const struct device *dev)
-{
- return dev->type == &greybus_module_type;
-}
-
-static inline int is_gb_interface(const struct device *dev)
-{
- return dev->type == &greybus_interface_type;
-}
-
-static inline int is_gb_control(const struct device *dev)
-{
- return dev->type == &greybus_control_type;
-}
-
-static inline int is_gb_bundle(const struct device *dev)
-{
- return dev->type == &greybus_bundle_type;
-}
-
-static inline int is_gb_svc(const struct device *dev)
-{
- return dev->type == &greybus_svc_type;
-}
-
-static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
-{
- return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
-}
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_GREYBUS_H */
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 03ea9615b217..7edc7295b7ab 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -1,55 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Greybus Component Authentication User Header
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_AUTHENTICATION_USER_H
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index b58281a63ba4..f68fd5e25321 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -1,55 +1,9 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* Greybus Firmware Management User Header
*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 Google Inc. All rights reserved.
* Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Google Inc. All rights reserved.
- * Copyright(c) 2016 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __GREYBUS_FIRMWARE_USER_H
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
deleted file mode 100644
index f4c8440093e4..000000000000
--- a/drivers/staging/greybus/greybus_id.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* FIXME
- * move this to include/linux/mod_devicetable.h when merging
- */
-
-#ifndef __LINUX_GREYBUS_ID_H
-#define __LINUX_GREYBUS_ID_H
-
-#include <linux/types.h>
-#include <linux/mod_devicetable.h>
-
-
-struct greybus_bundle_id {
- __u16 match_flags;
- __u32 vendor;
- __u32 product;
- __u8 class;
-
- kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
-};
-
-/* Used to match the greybus_bundle_id */
-#define GREYBUS_ID_MATCH_VENDOR BIT(0)
-#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
-#define GREYBUS_ID_MATCH_CLASS BIT(2)
-
-#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
deleted file mode 100644
index 2cec5cf7a846..000000000000
--- a/drivers/staging/greybus/greybus_manifest.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus manifest definition
- *
- * See "Greybus Application Protocol" document (version 0.1) for
- * details on these values and structures.
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 and BSD licenses.
- */
-
-#ifndef __GREYBUS_MANIFEST_H
-#define __GREYBUS_MANIFEST_H
-
-enum greybus_descriptor_type {
- GREYBUS_TYPE_INVALID = 0x00,
- GREYBUS_TYPE_INTERFACE = 0x01,
- GREYBUS_TYPE_STRING = 0x02,
- GREYBUS_TYPE_BUNDLE = 0x03,
- GREYBUS_TYPE_CPORT = 0x04,
-};
-
-enum greybus_protocol {
- GREYBUS_PROTOCOL_CONTROL = 0x00,
- /* 0x01 is unused */
- GREYBUS_PROTOCOL_GPIO = 0x02,
- GREYBUS_PROTOCOL_I2C = 0x03,
- GREYBUS_PROTOCOL_UART = 0x04,
- GREYBUS_PROTOCOL_HID = 0x05,
- GREYBUS_PROTOCOL_USB = 0x06,
- GREYBUS_PROTOCOL_SDIO = 0x07,
- GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
- GREYBUS_PROTOCOL_PWM = 0x09,
- /* 0x0a is unused */
- GREYBUS_PROTOCOL_SPI = 0x0b,
- GREYBUS_PROTOCOL_DISPLAY = 0x0c,
- GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
- GREYBUS_PROTOCOL_SENSOR = 0x0e,
- GREYBUS_PROTOCOL_LIGHTS = 0x0f,
- GREYBUS_PROTOCOL_VIBRATOR = 0x10,
- GREYBUS_PROTOCOL_LOOPBACK = 0x11,
- GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
- GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
- GREYBUS_PROTOCOL_SVC = 0x14,
- GREYBUS_PROTOCOL_BOOTROM = 0x15,
- GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
- GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
- GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
- GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
- GREYBUS_PROTOCOL_LOG = 0x1a,
- /* ... */
- GREYBUS_PROTOCOL_RAW = 0xfe,
- GREYBUS_PROTOCOL_VENDOR = 0xff,
-};
-
-enum greybus_class_type {
- GREYBUS_CLASS_CONTROL = 0x00,
- /* 0x01 is unused */
- /* 0x02 is unused */
- /* 0x03 is unused */
- /* 0x04 is unused */
- GREYBUS_CLASS_HID = 0x05,
- /* 0x06 is unused */
- /* 0x07 is unused */
- GREYBUS_CLASS_POWER_SUPPLY = 0x08,
- /* 0x09 is unused */
- GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
- /* 0x0b is unused */
- GREYBUS_CLASS_DISPLAY = 0x0c,
- GREYBUS_CLASS_CAMERA = 0x0d,
- GREYBUS_CLASS_SENSOR = 0x0e,
- GREYBUS_CLASS_LIGHTS = 0x0f,
- GREYBUS_CLASS_VIBRATOR = 0x10,
- GREYBUS_CLASS_LOOPBACK = 0x11,
- GREYBUS_CLASS_AUDIO = 0x12,
- /* 0x13 is unused */
- /* 0x14 is unused */
- GREYBUS_CLASS_BOOTROM = 0x15,
- GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
- GREYBUS_CLASS_LOG = 0x17,
- /* ... */
- GREYBUS_CLASS_RAW = 0xfe,
- GREYBUS_CLASS_VENDOR = 0xff,
-};
-
-enum {
- GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
-};
-
-/*
- * The string in a string descriptor is not NUL-terminated. The
- * size of the descriptor will be rounded up to a multiple of 4
- * bytes, by padding the string with 0x00 bytes if necessary.
- */
-struct greybus_descriptor_string {
- __u8 length;
- __u8 id;
- __u8 string[0];
-} __packed;
-
-/*
- * An interface descriptor describes information about an interface as a whole,
- * *not* the functions within it.
- */
-struct greybus_descriptor_interface {
- __u8 vendor_stringid;
- __u8 product_stringid;
- __u8 features;
- __u8 pad;
-} __packed;
-
-/*
- * An bundle descriptor defines an identification number and a class for
- * each bundle.
- *
- * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
- * allow CPort descriptors to specify which bundle they are associated with.
- * The first bundle will have id 0, second will have 1 and so on.
- *
- * The largest CPort id associated with an bundle (defined by a
- * CPort descriptor in the manifest) is used to determine how to
- * encode the device id and module number in UniPro packets
- * that use the bundle.
- *
- * @class: It is used by kernel to know the functionality provided by the
- * bundle and will be matched against drivers functinality while probing greybus
- * driver. It should contain one of the values defined in
- * 'enum greybus_class_type'.
- *
- */
-struct greybus_descriptor_bundle {
- __u8 id; /* interface-relative id (0..) */
- __u8 class;
- __u8 pad[2];
-} __packed;
-
-/*
- * A CPort descriptor indicates the id of the bundle within the
- * module it's associated with, along with the CPort id used to
- * address the CPort. The protocol id defines the format of messages
- * exchanged using the CPort.
- */
-struct greybus_descriptor_cport {
- __le16 id;
- __u8 bundle;
- __u8 protocol_id; /* enum greybus_protocol */
-} __packed;
-
-struct greybus_descriptor_header {
- __le16 size;
- __u8 type; /* enum greybus_descriptor_type */
- __u8 pad;
-} __packed;
-
-struct greybus_descriptor {
- struct greybus_descriptor_header header;
- union {
- struct greybus_descriptor_string string;
- struct greybus_descriptor_interface interface;
- struct greybus_descriptor_bundle bundle;
- struct greybus_descriptor_cport cport;
- };
-} __packed;
-
-struct greybus_manifest_header {
- __le16 size;
- __u8 version_major;
- __u8 version_minor;
-} __packed;
-
-struct greybus_manifest {
- struct greybus_manifest_header header;
- struct greybus_descriptor descriptors[0];
-} __packed;
-
-#endif /* __GREYBUS_MANIFEST_H */
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
deleted file mode 100644
index ddc73f10eb22..000000000000
--- a/drivers/staging/greybus/greybus_protocols.h
+++ /dev/null
@@ -1,2222 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
- * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
- * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
- * its contributors may be used to endorse or promote products
- * derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
- * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __GREYBUS_PROTOCOLS_H
-#define __GREYBUS_PROTOCOLS_H
-
-/* Fixed IDs for control/svc protocols */
-
-/* SVC switch-port device ids */
-#define GB_SVC_DEVICE_ID_SVC 0
-#define GB_SVC_DEVICE_ID_AP 1
-#define GB_SVC_DEVICE_ID_MIN 2
-#define GB_SVC_DEVICE_ID_MAX 31
-
-#define GB_SVC_CPORT_ID 0
-#define GB_CONTROL_BUNDLE_ID 0
-#define GB_CONTROL_CPORT_ID 0
-
-
-/*
- * All operation messages (both requests and responses) begin with
- * a header that encodes the size of the message (header included).
- * This header also contains a unique identifier, that associates a
- * response message with its operation. The header contains an
- * operation type field, whose interpretation is dependent on what
- * type of protocol is used over the connection. The high bit
- * (0x80) of the operation type field is used to indicate whether
- * the message is a request (clear) or a response (set).
- *
- * Response messages include an additional result byte, which
- * communicates the result of the corresponding request. A zero
- * result value means the operation completed successfully. Any
- * other value indicates an error; in this case, the payload of the
- * response message (if any) is ignored. The result byte must be
- * zero in the header for a request message.
- *
- * The wire format for all numeric fields in the header is little
- * endian. Any operation-specific data begins immediately after the
- * header.
- */
-struct gb_operation_msg_hdr {
- __le16 size; /* Size in bytes of header + payload */
- __le16 operation_id; /* Operation unique id */
- __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
- __u8 result; /* Result of request (in responses only) */
- __u8 pad[2]; /* must be zero (ignore when read) */
-} __packed;
-
-
-/* Generic request types */
-#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
-#define GB_REQUEST_TYPE_INVALID 0x7f
-
-struct gb_cport_shutdown_request {
- __u8 phase;
-} __packed;
-
-
-/* Control Protocol */
-
-/* Greybus control request types */
-#define GB_CONTROL_TYPE_VERSION 0x01
-#define GB_CONTROL_TYPE_PROBE_AP 0x02
-#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
-#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
-#define GB_CONTROL_TYPE_CONNECTED 0x05
-#define GB_CONTROL_TYPE_DISCONNECTED 0x06
-#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
-#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
-#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
-/* Unused 0x0a */
-#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
-#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
-#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
-#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
-#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
-#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
-#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
-#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
-#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
-#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
-#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
-
-struct gb_control_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_control_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_control_bundle_version_request {
- __u8 bundle_id;
-} __packed;
-
-struct gb_control_bundle_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* Control protocol manifest get size request has no payload*/
-struct gb_control_get_manifest_size_response {
- __le16 size;
-} __packed;
-
-/* Control protocol manifest get request has no payload */
-struct gb_control_get_manifest_response {
- __u8 data[0];
-} __packed;
-
-/* Control protocol [dis]connected request */
-struct gb_control_connected_request {
- __le16 cport_id;
-} __packed;
-
-struct gb_control_disconnecting_request {
- __le16 cport_id;
-} __packed;
-/* disconnecting response has no payload */
-
-struct gb_control_disconnected_request {
- __le16 cport_id;
-} __packed;
-/* Control protocol [dis]connected response has no payload */
-
-/*
- * All Bundle power management operations use the same request and response
- * layout and status codes.
- */
-
-#define GB_CONTROL_BUNDLE_PM_OK 0x00
-#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
-#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
-#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
-#define GB_CONTROL_BUNDLE_PM_NA 0x04
-
-struct gb_control_bundle_pm_request {
- __u8 bundle_id;
-} __packed;
-
-struct gb_control_bundle_pm_response {
- __u8 status;
-} __packed;
-
-/*
- * Interface Suspend Prepare and Deactivate Prepare operations use the same
- * response layout and error codes. Define a single response structure and reuse
- * it. Both operations have no payload.
- */
-
-#define GB_CONTROL_INTF_PM_OK 0x00
-#define GB_CONTROL_INTF_PM_BUSY 0x01
-#define GB_CONTROL_INTF_PM_NA 0x02
-
-struct gb_control_intf_pm_response {
- __u8 status;
-} __packed;
-
-/* APBridge protocol */
-
-/* request APB1 log */
-#define GB_APB_REQUEST_LOG 0x02
-
-/* request to map a cport to bulk in and bulk out endpoints */
-#define GB_APB_REQUEST_EP_MAPPING 0x03
-
-/* request to get the number of cports available */
-#define GB_APB_REQUEST_CPORT_COUNT 0x04
-
-/* request to reset a cport state */
-#define GB_APB_REQUEST_RESET_CPORT 0x05
-
-/* request to time the latency of messages on a given cport */
-#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
-#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
-
-/* request to control the CSI transmitter */
-#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
-
-/* request to control audio streaming */
-#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
-
-/* TimeSync requests */
-#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
-#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
-#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
-#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
-
-/* requests to set Greybus CPort flags */
-#define GB_APB_REQUEST_CPORT_FLAGS 0x11
-
-/* ARPC request */
-#define GB_APB_REQUEST_ARPC_RUN 0x12
-
-struct gb_apb_request_cport_flags {
- __le32 flags;
-#define GB_APB_CPORT_FLAG_CONTROL 0x01
-#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
-} __packed;
-
-
-/* Firmware Download Protocol */
-
-/* Request Types */
-#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
-#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
-#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
-
-#define GB_FIRMWARE_TAG_MAX_SIZE 10
-
-/* firmware download find firmware request/response */
-struct gb_fw_download_find_firmware_request {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-
-struct gb_fw_download_find_firmware_response {
- __u8 firmware_id;
- __le32 size;
-} __packed;
-
-/* firmware download fetch firmware request/response */
-struct gb_fw_download_fetch_firmware_request {
- __u8 firmware_id;
- __le32 offset;
- __le32 size;
-} __packed;
-
-struct gb_fw_download_fetch_firmware_response {
- __u8 data[0];
-} __packed;
-
-/* firmware download release firmware request */
-struct gb_fw_download_release_firmware_request {
- __u8 firmware_id;
-} __packed;
-/* firmware download release firmware response has no payload */
-
-
-/* Firmware Management Protocol */
-
-/* Request Types */
-#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
-#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
-#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
-#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
-#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
-#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
-
-#define GB_FW_LOAD_METHOD_UNIPRO 0x01
-#define GB_FW_LOAD_METHOD_INTERNAL 0x02
-
-#define GB_FW_LOAD_STATUS_FAILED 0x00
-#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
-#define GB_FW_LOAD_STATUS_VALIDATED 0x02
-#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
-
-#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
-#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
-#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
-#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
-#define GB_FW_BACKEND_FW_STATUS_INT 0x05
-#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
-#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
-
-#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
-#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
-#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
-#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
-#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
-
-/* firmware management interface firmware version request has no payload */
-struct gb_fw_mgmt_interface_fw_version_response {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
- __le16 major;
- __le16 minor;
-} __packed;
-
-/* firmware management load and validate firmware request/response */
-struct gb_fw_mgmt_load_and_validate_fw_request {
- __u8 request_id;
- __u8 load_method;
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-/* firmware management load and validate firmware response has no payload*/
-
-/* firmware management loaded firmware request */
-struct gb_fw_mgmt_loaded_fw_request {
- __u8 request_id;
- __u8 status;
- __le16 major;
- __le16 minor;
-} __packed;
-/* firmware management loaded firmware response has no payload */
-
-/* firmware management backend firmware version request/response */
-struct gb_fw_mgmt_backend_fw_version_request {
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-
-struct gb_fw_mgmt_backend_fw_version_response {
- __le16 major;
- __le16 minor;
- __u8 status;
-} __packed;
-
-/* firmware management backend firmware update request */
-struct gb_fw_mgmt_backend_fw_update_request {
- __u8 request_id;
- __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
-} __packed;
-/* firmware management backend firmware update response has no payload */
-
-/* firmware management backend firmware updated request */
-struct gb_fw_mgmt_backend_fw_updated_request {
- __u8 request_id;
- __u8 status;
-} __packed;
-/* firmware management backend firmware updated response has no payload */
-
-
-/* Component Authentication Protocol (CAP) */
-
-/* Request Types */
-#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
-#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
-#define GB_CAP_TYPE_AUTHENTICATE 0x03
-
-/* CAP get endpoint uid request has no payload */
-struct gb_cap_get_endpoint_uid_response {
- __u8 uid[8];
-} __packed;
-
-/* CAP get endpoint ims certificate request/response */
-struct gb_cap_get_ims_certificate_request {
- __le32 certificate_class;
- __le32 certificate_id;
-} __packed;
-
-struct gb_cap_get_ims_certificate_response {
- __u8 result_code;
- __u8 certificate[0];
-} __packed;
-
-/* CAP authenticate request/response */
-struct gb_cap_authenticate_request {
- __le32 auth_type;
- __u8 uid[8];
- __u8 challenge[32];
-} __packed;
-
-struct gb_cap_authenticate_response {
- __u8 result_code;
- __u8 response[64];
- __u8 signature[0];
-} __packed;
-
-
-/* Bootrom Protocol */
-
-/* Version of the Greybus bootrom protocol we support */
-#define GB_BOOTROM_VERSION_MAJOR 0x00
-#define GB_BOOTROM_VERSION_MINOR 0x01
-
-/* Greybus bootrom request types */
-#define GB_BOOTROM_TYPE_VERSION 0x01
-#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
-#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
-#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
-#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
-#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
-
-/* Greybus bootrom boot stages */
-#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
-#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
-#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
-
-/* Greybus bootrom ready to boot status */
-#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
-#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
-#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
-
-/* Max bootrom data fetch size in bytes */
-#define GB_BOOTROM_FETCH_MAX 2000
-
-struct gb_bootrom_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_bootrom_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* Bootrom protocol firmware size request/response */
-struct gb_bootrom_firmware_size_request {
- __u8 stage;
-} __packed;
-
-struct gb_bootrom_firmware_size_response {
- __le32 size;
-} __packed;
-
-/* Bootrom protocol get firmware request/response */
-struct gb_bootrom_get_firmware_request {
- __le32 offset;
- __le32 size;
-} __packed;
-
-struct gb_bootrom_get_firmware_response {
- __u8 data[0];
-} __packed;
-
-/* Bootrom protocol Ready to boot request */
-struct gb_bootrom_ready_to_boot_request {
- __u8 status;
-} __packed;
-/* Bootrom protocol Ready to boot response has no payload */
-
-/* Bootrom protocol get VID/PID request has no payload */
-struct gb_bootrom_get_vid_pid_response {
- __le32 vendor_id;
- __le32 product_id;
-} __packed;
-
-
-/* Power Supply */
-
-/* Greybus power supply request types */
-#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
-#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
-#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
-#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
-#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
-#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
-
-/* Greybus power supply battery technologies types */
-#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
-#define GB_POWER_SUPPLY_TECH_LION 0x0002
-#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
-#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
-#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
-#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
-
-/* Greybus power supply types */
-#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
-#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
-#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
-#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
-#define GB_POWER_SUPPLY_USB_TYPE 0x0004
-#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
-#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
-#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
-
-/* Greybus power supply health values */
-#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
-#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
-#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
-#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
-#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
-#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
-#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
-#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
-
-/* Greybus power supply status values */
-#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
-#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
-#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
-#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
-
-/* Greybus power supply capacity level values */
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
-#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
-
-/* Greybus power supply scope values */
-#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
-#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
-#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
-
-struct gb_power_supply_get_supplies_response {
- __u8 supplies_count;
-} __packed;
-
-struct gb_power_supply_get_description_request {
- __u8 psy_id;
-} __packed;
-
-struct gb_power_supply_get_description_response {
- __u8 manufacturer[32];
- __u8 model[32];
- __u8 serial_number[32];
- __le16 type;
- __u8 properties_count;
-} __packed;
-
-struct gb_power_supply_props_desc {
- __u8 property;
-#define GB_POWER_SUPPLY_PROP_STATUS 0x00
-#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
-#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
-#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
-#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
-#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
-#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
-#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
-#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
-#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
-#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
-#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
-#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
-#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
-#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
-#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
-#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
-#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
-#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
-#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
-#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
-#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
-#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
-#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
-#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
-#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
-#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
-#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
-#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
-#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
-#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
-#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
-#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
-#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
-#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
-#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
-#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
-#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
-#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
-#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
-#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
-#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
-#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
-#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
-#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
-#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
-#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
-#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
-#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
-#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
- __u8 is_writeable;
-} __packed;
-
-struct gb_power_supply_get_property_descriptors_request {
- __u8 psy_id;
-} __packed;
-
-struct gb_power_supply_get_property_descriptors_response {
- __u8 properties_count;
- struct gb_power_supply_props_desc props[];
-} __packed;
-
-struct gb_power_supply_get_property_request {
- __u8 psy_id;
- __u8 property;
-} __packed;
-
-struct gb_power_supply_get_property_response {
- __le32 prop_val;
-};
-
-struct gb_power_supply_set_property_request {
- __u8 psy_id;
- __u8 property;
- __le32 prop_val;
-} __packed;
-
-struct gb_power_supply_event_request {
- __u8 psy_id;
- __u8 event;
-#define GB_POWER_SUPPLY_UPDATE 0x01
-} __packed;
-
-
-/* HID */
-
-/* Greybus HID operation types */
-#define GB_HID_TYPE_GET_DESC 0x02
-#define GB_HID_TYPE_GET_REPORT_DESC 0x03
-#define GB_HID_TYPE_PWR_ON 0x04
-#define GB_HID_TYPE_PWR_OFF 0x05
-#define GB_HID_TYPE_GET_REPORT 0x06
-#define GB_HID_TYPE_SET_REPORT 0x07
-#define GB_HID_TYPE_IRQ_EVENT 0x08
-
-/* Report type */
-#define GB_HID_INPUT_REPORT 0
-#define GB_HID_OUTPUT_REPORT 1
-#define GB_HID_FEATURE_REPORT 2
-
-/* Different request/response structures */
-/* HID get descriptor response */
-struct gb_hid_desc_response {
- __u8 bLength;
- __le16 wReportDescLength;
- __le16 bcdHID;
- __le16 wProductID;
- __le16 wVendorID;
- __u8 bCountryCode;
-} __packed;
-
-/* HID get report request/response */
-struct gb_hid_get_report_request {
- __u8 report_type;
- __u8 report_id;
-} __packed;
-
-/* HID set report request */
-struct gb_hid_set_report_request {
- __u8 report_type;
- __u8 report_id;
- __u8 report[0];
-} __packed;
-
-/* HID input report request, via interrupt pipe */
-struct gb_hid_input_report_request {
- __u8 report[0];
-} __packed;
-
-
-/* I2C */
-
-/* Greybus i2c request types */
-#define GB_I2C_TYPE_FUNCTIONALITY 0x02
-#define GB_I2C_TYPE_TRANSFER 0x05
-
-/* functionality request has no payload */
-struct gb_i2c_functionality_response {
- __le32 functionality;
-} __packed;
-
-/*
- * Outgoing data immediately follows the op count and ops array.
- * The data for each write (master -> slave) op in the array is sent
- * in order, with no (e.g. pad) bytes separating them.
- *
- * Short reads cause the entire transfer request to fail So response
- * payload consists only of bytes read, and the number of bytes is
- * exactly what was specified in the corresponding op. Like
- * outgoing data, the incoming data is in order and contiguous.
- */
-struct gb_i2c_transfer_op {
- __le16 addr;
- __le16 flags;
- __le16 size;
-} __packed;
-
-struct gb_i2c_transfer_request {
- __le16 op_count;
- struct gb_i2c_transfer_op ops[0]; /* op_count of these */
-} __packed;
-struct gb_i2c_transfer_response {
- __u8 data[0]; /* inbound data */
-} __packed;
-
-
-/* GPIO */
-
-/* Greybus GPIO request types */
-#define GB_GPIO_TYPE_LINE_COUNT 0x02
-#define GB_GPIO_TYPE_ACTIVATE 0x03
-#define GB_GPIO_TYPE_DEACTIVATE 0x04
-#define GB_GPIO_TYPE_GET_DIRECTION 0x05
-#define GB_GPIO_TYPE_DIRECTION_IN 0x06
-#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
-#define GB_GPIO_TYPE_GET_VALUE 0x08
-#define GB_GPIO_TYPE_SET_VALUE 0x09
-#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
-#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
-#define GB_GPIO_TYPE_IRQ_MASK 0x0c
-#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
-#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
-
-#define GB_GPIO_IRQ_TYPE_NONE 0x00
-#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
-#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
-#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
-#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
-#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
-
-/* line count request has no payload */
-struct gb_gpio_line_count_response {
- __u8 count;
-} __packed;
-
-struct gb_gpio_activate_request {
- __u8 which;
-} __packed;
-/* activate response has no payload */
-
-struct gb_gpio_deactivate_request {
- __u8 which;
-} __packed;
-/* deactivate response has no payload */
-
-struct gb_gpio_get_direction_request {
- __u8 which;
-} __packed;
-struct gb_gpio_get_direction_response {
- __u8 direction;
-} __packed;
-
-struct gb_gpio_direction_in_request {
- __u8 which;
-} __packed;
-/* direction in response has no payload */
-
-struct gb_gpio_direction_out_request {
- __u8 which;
- __u8 value;
-} __packed;
-/* direction out response has no payload */
-
-struct gb_gpio_get_value_request {
- __u8 which;
-} __packed;
-struct gb_gpio_get_value_response {
- __u8 value;
-} __packed;
-
-struct gb_gpio_set_value_request {
- __u8 which;
- __u8 value;
-} __packed;
-/* set value response has no payload */
-
-struct gb_gpio_set_debounce_request {
- __u8 which;
- __le16 usec;
-} __packed;
-/* debounce response has no payload */
-
-struct gb_gpio_irq_type_request {
- __u8 which;
- __u8 type;
-} __packed;
-/* irq type response has no payload */
-
-struct gb_gpio_irq_mask_request {
- __u8 which;
-} __packed;
-/* irq mask response has no payload */
-
-struct gb_gpio_irq_unmask_request {
- __u8 which;
-} __packed;
-/* irq unmask response has no payload */
-
-/* irq event requests originate on another module and are handled on the AP */
-struct gb_gpio_irq_event_request {
- __u8 which;
-} __packed;
-/* irq event has no response */
-
-
-/* PWM */
-
-/* Greybus PWM operation types */
-#define GB_PWM_TYPE_PWM_COUNT 0x02
-#define GB_PWM_TYPE_ACTIVATE 0x03
-#define GB_PWM_TYPE_DEACTIVATE 0x04
-#define GB_PWM_TYPE_CONFIG 0x05
-#define GB_PWM_TYPE_POLARITY 0x06
-#define GB_PWM_TYPE_ENABLE 0x07
-#define GB_PWM_TYPE_DISABLE 0x08
-
-/* pwm count request has no payload */
-struct gb_pwm_count_response {
- __u8 count;
-} __packed;
-
-struct gb_pwm_activate_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_deactivate_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_config_request {
- __u8 which;
- __le32 duty;
- __le32 period;
-} __packed;
-
-struct gb_pwm_polarity_request {
- __u8 which;
- __u8 polarity;
-} __packed;
-
-struct gb_pwm_enable_request {
- __u8 which;
-} __packed;
-
-struct gb_pwm_disable_request {
- __u8 which;
-} __packed;
-
-/* SPI */
-
-/* Should match up with modes in linux/spi/spi.h */
-#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
-#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
-#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */
-#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA)
-#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0)
-#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA)
-#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
-#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
-#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
-#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
-
-/* Should match up with flags in linux/spi/spi.h */
-#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
-#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
-
-/* Greybus spi operation types */
-#define GB_SPI_TYPE_MASTER_CONFIG 0x02
-#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
-#define GB_SPI_TYPE_TRANSFER 0x04
-
-/* mode request has no payload */
-struct gb_spi_master_config_response {
- __le32 bits_per_word_mask;
- __le32 min_speed_hz;
- __le32 max_speed_hz;
- __le16 mode;
- __le16 flags;
- __u8 num_chipselect;
-} __packed;
-
-struct gb_spi_device_config_request {
- __u8 chip_select;
-} __packed;
-
-struct gb_spi_device_config_response {
- __le16 mode;
- __u8 bits_per_word;
- __le32 max_speed_hz;
- __u8 device_type;
-#define GB_SPI_SPI_DEV 0x00
-#define GB_SPI_SPI_NOR 0x01
-#define GB_SPI_SPI_MODALIAS 0x02
- __u8 name[32];
-} __packed;
-
-/**
- * struct gb_spi_transfer - a read/write buffer pair
- * @speed_hz: Select a speed other than the device default for this transfer. If
- * 0 the default (from @spi_device) is used.
- * @len: size of rx and tx buffers (in bytes)
- * @delay_usecs: microseconds to delay after this transfer before (optionally)
- * changing the chipselect status, then starting the next transfer or
- * completing this spi_message.
- * @cs_change: affects chipselect after this transfer completes
- * @bits_per_word: select a bits_per_word other than the device default for this
- * transfer. If 0 the default (from @spi_device) is used.
- */
-struct gb_spi_transfer {
- __le32 speed_hz;
- __le32 len;
- __le16 delay_usecs;
- __u8 cs_change;
- __u8 bits_per_word;
- __u8 xfer_flags;
-#define GB_SPI_XFER_READ 0x01
-#define GB_SPI_XFER_WRITE 0x02
-#define GB_SPI_XFER_INPROGRESS 0x04
-} __packed;
-
-struct gb_spi_transfer_request {
- __u8 chip_select; /* of the spi device */
- __u8 mode; /* of the spi device */
- __le16 count;
- struct gb_spi_transfer transfers[0]; /* count of these */
-} __packed;
-
-struct gb_spi_transfer_response {
- __u8 data[0]; /* inbound data */
-} __packed;
-
-/* Version of the Greybus SVC protocol we support */
-#define GB_SVC_VERSION_MAJOR 0x00
-#define GB_SVC_VERSION_MINOR 0x01
-
-/* Greybus SVC request types */
-#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
-#define GB_SVC_TYPE_SVC_HELLO 0x02
-#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
-#define GB_SVC_TYPE_INTF_RESET 0x06
-#define GB_SVC_TYPE_CONN_CREATE 0x07
-#define GB_SVC_TYPE_CONN_DESTROY 0x08
-#define GB_SVC_TYPE_DME_PEER_GET 0x09
-#define GB_SVC_TYPE_DME_PEER_SET 0x0a
-#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
-#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
-#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
-#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
-#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
-#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
-#define GB_SVC_TYPE_INTF_EJECT 0x11
-#define GB_SVC_TYPE_PING 0x13
-#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
-#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
-#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
-#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
-#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
-#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
-#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
-#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
-#define GB_SVC_TYPE_MODULE_REMOVED 0x20
-#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
-#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
-#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
-#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
-#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
-#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
-#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
-#define GB_SVC_TYPE_INTF_RESUME 0x28
-#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
-#define GB_SVC_TYPE_INTF_OOPS 0x2a
-
-/* Greybus SVC protocol status values */
-#define GB_SVC_OP_SUCCESS 0x00
-#define GB_SVC_OP_UNKNOWN_ERROR 0x01
-#define GB_SVC_INTF_NOT_DETECTED 0x02
-#define GB_SVC_INTF_NO_UPRO_LINK 0x03
-#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
-#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
-#define GB_SVC_INTF_NO_V_SYS 0x06
-#define GB_SVC_INTF_V_CHG 0x07
-#define GB_SVC_INTF_WAKE_BUSY 0x08
-#define GB_SVC_INTF_NO_REFCLK 0x09
-#define GB_SVC_INTF_RELEASING 0x0a
-#define GB_SVC_INTF_NO_ORDER 0x0b
-#define GB_SVC_INTF_MBOX_SET 0x0c
-#define GB_SVC_INTF_BAD_MBOX 0x0d
-#define GB_SVC_INTF_OP_TIMEOUT 0x0e
-#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
-
-struct gb_svc_version_request {
- __u8 major;
- __u8 minor;
-} __packed;
-
-struct gb_svc_version_response {
- __u8 major;
- __u8 minor;
-} __packed;
-
-/* SVC protocol hello request */
-struct gb_svc_hello_request {
- __le16 endo_id;
- __u8 interface_id;
-} __packed;
-/* hello response has no payload */
-
-struct gb_svc_intf_device_id_request {
- __u8 intf_id;
- __u8 device_id;
-} __packed;
-/* device id response has no payload */
-
-struct gb_svc_intf_reset_request {
- __u8 intf_id;
-} __packed;
-/* interface reset response has no payload */
-
-struct gb_svc_intf_eject_request {
- __u8 intf_id;
-} __packed;
-/* interface eject response has no payload */
-
-struct gb_svc_conn_create_request {
- __u8 intf1_id;
- __le16 cport1_id;
- __u8 intf2_id;
- __le16 cport2_id;
- __u8 tc;
- __u8 flags;
-} __packed;
-/* connection create response has no payload */
-
-struct gb_svc_conn_destroy_request {
- __u8 intf1_id;
- __le16 cport1_id;
- __u8 intf2_id;
- __le16 cport2_id;
-} __packed;
-/* connection destroy response has no payload */
-
-struct gb_svc_dme_peer_get_request {
- __u8 intf_id;
- __le16 attr;
- __le16 selector;
-} __packed;
-
-struct gb_svc_dme_peer_get_response {
- __le16 result_code;
- __le32 attr_value;
-} __packed;
-
-struct gb_svc_dme_peer_set_request {
- __u8 intf_id;
- __le16 attr;
- __le16 selector;
- __le32 value;
-} __packed;
-
-struct gb_svc_dme_peer_set_response {
- __le16 result_code;
-} __packed;
-
-/* Greybus init-status values, currently retrieved using DME peer gets. */
-#define GB_INIT_SPI_BOOT_STARTED 0x02
-#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
-#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
-#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
-#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
-#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
-
-struct gb_svc_route_create_request {
- __u8 intf1_id;
- __u8 dev1_id;
- __u8 intf2_id;
- __u8 dev2_id;
-} __packed;
-/* route create response has no payload */
-
-struct gb_svc_route_destroy_request {
- __u8 intf1_id;
- __u8 intf2_id;
-} __packed;
-/* route destroy response has no payload */
-
-/* used for svc_intf_vsys_{enable,disable} */
-struct gb_svc_intf_vsys_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_vsys_response {
- __u8 result_code;
-#define GB_SVC_INTF_VSYS_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_VSYS_FAIL 0x02
-} __packed;
-
-/* used for svc_intf_refclk_{enable,disable} */
-struct gb_svc_intf_refclk_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_refclk_response {
- __u8 result_code;
-#define GB_SVC_INTF_REFCLK_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_REFCLK_FAIL 0x02
-} __packed;
-
-/* used for svc_intf_unipro_{enable,disable} */
-struct gb_svc_intf_unipro_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_unipro_response {
- __u8 result_code;
-#define GB_SVC_INTF_UNIPRO_OK 0x00
- /* 0x01 is reserved */
-#define GB_SVC_INTF_UNIPRO_FAIL 0x02
-#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
-} __packed;
-
-#define GB_SVC_UNIPRO_FAST_MODE 0x01
-#define GB_SVC_UNIPRO_SLOW_MODE 0x02
-#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
-#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
-#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
-#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
-#define GB_SVC_UNIPRO_OFF_MODE 0x12
-
-#define GB_SVC_SMALL_AMPLITUDE 0x01
-#define GB_SVC_LARGE_AMPLITUDE 0x02
-
-#define GB_SVC_NO_DE_EMPHASIS 0x00
-#define GB_SVC_SMALL_DE_EMPHASIS 0x01
-#define GB_SVC_LARGE_DE_EMPHASIS 0x02
-
-#define GB_SVC_PWRM_RXTERMINATION 0x01
-#define GB_SVC_PWRM_TXTERMINATION 0x02
-#define GB_SVC_PWRM_LINE_RESET 0x04
-#define GB_SVC_PWRM_SCRAMBLING 0x20
-
-#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
-
-#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
-#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
-
-#define GB_SVC_SETPWRM_PWR_OK 0x00
-#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
-#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
-#define GB_SVC_SETPWRM_PWR_BUSY 0x03
-#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
-#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
-
-struct gb_svc_l2_timer_cfg {
- __le16 tsb_fc0_protection_timeout;
- __le16 tsb_tc0_replay_timeout;
- __le16 tsb_afc0_req_timeout;
- __le16 tsb_fc1_protection_timeout;
- __le16 tsb_tc1_replay_timeout;
- __le16 tsb_afc1_req_timeout;
- __le16 reserved_for_tc2[3];
- __le16 reserved_for_tc3[3];
-} __packed;
-
-struct gb_svc_intf_set_pwrm_request {
- __u8 intf_id;
- __u8 hs_series;
- __u8 tx_mode;
- __u8 tx_gear;
- __u8 tx_nlanes;
- __u8 tx_amplitude;
- __u8 tx_hs_equalizer;
- __u8 rx_mode;
- __u8 rx_gear;
- __u8 rx_nlanes;
- __u8 flags;
- __le32 quirks;
- struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
-} __packed;
-
-struct gb_svc_intf_set_pwrm_response {
- __u8 result_code;
-} __packed;
-
-struct gb_svc_key_event_request {
- __le16 key_code;
-#define GB_KEYCODE_ARA 0x00
-
- __u8 key_event;
-#define GB_SVC_KEY_RELEASED 0x00
-#define GB_SVC_KEY_PRESSED 0x01
-} __packed;
-
-#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
-
-struct gb_svc_pwrmon_rail_count_get_response {
- __u8 rail_count;
-} __packed;
-
-#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
-
-struct gb_svc_pwrmon_rail_names_get_response {
- __u8 status;
- __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
-} __packed;
-
-#define GB_SVC_PWRMON_TYPE_CURR 0x01
-#define GB_SVC_PWRMON_TYPE_VOL 0x02
-#define GB_SVC_PWRMON_TYPE_PWR 0x03
-
-#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
-#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
-#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
-#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
-
-struct gb_svc_pwrmon_sample_get_request {
- __u8 rail_id;
- __u8 measurement_type;
-} __packed;
-
-struct gb_svc_pwrmon_sample_get_response {
- __u8 result;
- __le32 measurement;
-} __packed;
-
-struct gb_svc_pwrmon_intf_sample_get_request {
- __u8 intf_id;
- __u8 measurement_type;
-} __packed;
-
-struct gb_svc_pwrmon_intf_sample_get_response {
- __u8 result;
- __le32 measurement;
-} __packed;
-
-#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
-
-struct gb_svc_module_inserted_request {
- __u8 primary_intf_id;
- __u8 intf_count;
- __le16 flags;
-} __packed;
-/* module_inserted response has no payload */
-
-struct gb_svc_module_removed_request {
- __u8 primary_intf_id;
-} __packed;
-/* module_removed response has no payload */
-
-struct gb_svc_intf_activate_request {
- __u8 intf_id;
-} __packed;
-
-#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
-#define GB_SVC_INTF_TYPE_DUMMY 0x01
-#define GB_SVC_INTF_TYPE_UNIPRO 0x02
-#define GB_SVC_INTF_TYPE_GREYBUS 0x03
-
-struct gb_svc_intf_activate_response {
- __u8 status;
- __u8 intf_type;
-} __packed;
-
-struct gb_svc_intf_resume_request {
- __u8 intf_id;
-} __packed;
-
-struct gb_svc_intf_resume_response {
- __u8 status;
-} __packed;
-
-#define GB_SVC_INTF_MAILBOX_NONE 0x00
-#define GB_SVC_INTF_MAILBOX_AP 0x01
-#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
-
-struct gb_svc_intf_mailbox_event_request {
- __u8 intf_id;
- __le16 result_code;
- __le32 mailbox;
-} __packed;
-/* intf_mailbox_event response has no payload */
-
-struct gb_svc_intf_oops_request {
- __u8 intf_id;
- __u8 reason;
-} __packed;
-/* intf_oops response has no payload */
-
-
-/* RAW */
-
-/* Greybus raw request types */
-#define GB_RAW_TYPE_SEND 0x02
-
-struct gb_raw_send_request {
- __le32 len;
- __u8 data[0];
-} __packed;
-
-
-/* UART */
-
-/* Greybus UART operation types */
-#define GB_UART_TYPE_SEND_DATA 0x02
-#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
-#define GB_UART_TYPE_SET_LINE_CODING 0x04
-#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
-#define GB_UART_TYPE_SEND_BREAK 0x06
-#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
-#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
-#define GB_UART_TYPE_FLUSH_FIFOS 0x09
-
-/* Represents data from AP -> Module */
-struct gb_uart_send_data_request {
- __le16 size;
- __u8 data[0];
-} __packed;
-
-/* recv-data-request flags */
-#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
-#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
-#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
-#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
-
-/* Represents data from Module -> AP */
-struct gb_uart_recv_data_request {
- __le16 size;
- __u8 flags;
- __u8 data[0];
-} __packed;
-
-struct gb_uart_receive_credits_request {
- __le16 count;
-} __packed;
-
-struct gb_uart_set_line_coding_request {
- __le32 rate;
- __u8 format;
-#define GB_SERIAL_1_STOP_BITS 0
-#define GB_SERIAL_1_5_STOP_BITS 1
-#define GB_SERIAL_2_STOP_BITS 2
-
- __u8 parity;
-#define GB_SERIAL_NO_PARITY 0
-#define GB_SERIAL_ODD_PARITY 1
-#define GB_SERIAL_EVEN_PARITY 2
-#define GB_SERIAL_MARK_PARITY 3
-#define GB_SERIAL_SPACE_PARITY 4
-
- __u8 data_bits;
-
- __u8 flow_control;
-#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
-} __packed;
-
-/* output control lines */
-#define GB_UART_CTRL_DTR 0x01
-#define GB_UART_CTRL_RTS 0x02
-
-struct gb_uart_set_control_line_state_request {
- __u8 control;
-} __packed;
-
-struct gb_uart_set_break_request {
- __u8 state;
-} __packed;
-
-/* input control lines and line errors */
-#define GB_UART_CTRL_DCD 0x01
-#define GB_UART_CTRL_DSR 0x02
-#define GB_UART_CTRL_RI 0x04
-
-struct gb_uart_serial_state_request {
- __u8 control;
-} __packed;
-
-struct gb_uart_serial_flush_request {
- __u8 flags;
-#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
-#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
-} __packed;
-
-/* Loopback */
-
-/* Greybus loopback request types */
-#define GB_LOOPBACK_TYPE_PING 0x02
-#define GB_LOOPBACK_TYPE_TRANSFER 0x03
-#define GB_LOOPBACK_TYPE_SINK 0x04
-
-/*
- * Loopback request/response header format should be identical
- * to simplify bandwidth and data movement analysis.
- */
-struct gb_loopback_transfer_request {
- __le32 len;
- __le32 reserved0;
- __le32 reserved1;
- __u8 data[0];
-} __packed;
-
-struct gb_loopback_transfer_response {
- __le32 len;
- __le32 reserved0;
- __le32 reserved1;
- __u8 data[0];
-} __packed;
-
-/* SDIO */
-/* Greybus SDIO operation types */
-#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
-#define GB_SDIO_TYPE_SET_IOS 0x03
-#define GB_SDIO_TYPE_COMMAND 0x04
-#define GB_SDIO_TYPE_TRANSFER 0x05
-#define GB_SDIO_TYPE_EVENT 0x06
-
-/* get caps response: request has no payload */
-struct gb_sdio_get_caps_response {
- __le32 caps;
-#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
-#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
-#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
-#define GB_SDIO_CAP_MMC_HS 0x00000008
-#define GB_SDIO_CAP_SD_HS 0x00000010
-#define GB_SDIO_CAP_ERASE 0x00000020
-#define GB_SDIO_CAP_1_2V_DDR 0x00000040
-#define GB_SDIO_CAP_1_8V_DDR 0x00000080
-#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
-#define GB_SDIO_CAP_UHS_SDR12 0x00000200
-#define GB_SDIO_CAP_UHS_SDR25 0x00000400
-#define GB_SDIO_CAP_UHS_SDR50 0x00000800
-#define GB_SDIO_CAP_UHS_SDR104 0x00001000
-#define GB_SDIO_CAP_UHS_DDR50 0x00002000
-#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
-#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
-#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
-#define GB_SDIO_CAP_HS200_1_2V 0x00020000
-#define GB_SDIO_CAP_HS200_1_8V 0x00040000
-#define GB_SDIO_CAP_HS400_1_2V 0x00080000
-#define GB_SDIO_CAP_HS400_1_8V 0x00100000
-
- /* see possible values below at vdd */
- __le32 ocr;
- __le32 f_min;
- __le32 f_max;
- __le16 max_blk_count;
- __le16 max_blk_size;
-} __packed;
-
-/* set ios request: response has no payload */
-struct gb_sdio_set_ios_request {
- __le32 clock;
- __le32 vdd;
-#define GB_SDIO_VDD_165_195 0x00000001
-#define GB_SDIO_VDD_20_21 0x00000002
-#define GB_SDIO_VDD_21_22 0x00000004
-#define GB_SDIO_VDD_22_23 0x00000008
-#define GB_SDIO_VDD_23_24 0x00000010
-#define GB_SDIO_VDD_24_25 0x00000020
-#define GB_SDIO_VDD_25_26 0x00000040
-#define GB_SDIO_VDD_26_27 0x00000080
-#define GB_SDIO_VDD_27_28 0x00000100
-#define GB_SDIO_VDD_28_29 0x00000200
-#define GB_SDIO_VDD_29_30 0x00000400
-#define GB_SDIO_VDD_30_31 0x00000800
-#define GB_SDIO_VDD_31_32 0x00001000
-#define GB_SDIO_VDD_32_33 0x00002000
-#define GB_SDIO_VDD_33_34 0x00004000
-#define GB_SDIO_VDD_34_35 0x00008000
-#define GB_SDIO_VDD_35_36 0x00010000
-
- __u8 bus_mode;
-#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
-#define GB_SDIO_BUSMODE_PUSHPULL 0x01
-
- __u8 power_mode;
-#define GB_SDIO_POWER_OFF 0x00
-#define GB_SDIO_POWER_UP 0x01
-#define GB_SDIO_POWER_ON 0x02
-#define GB_SDIO_POWER_UNDEFINED 0x03
-
- __u8 bus_width;
-#define GB_SDIO_BUS_WIDTH_1 0x00
-#define GB_SDIO_BUS_WIDTH_4 0x02
-#define GB_SDIO_BUS_WIDTH_8 0x03
-
- __u8 timing;
-#define GB_SDIO_TIMING_LEGACY 0x00
-#define GB_SDIO_TIMING_MMC_HS 0x01
-#define GB_SDIO_TIMING_SD_HS 0x02
-#define GB_SDIO_TIMING_UHS_SDR12 0x03
-#define GB_SDIO_TIMING_UHS_SDR25 0x04
-#define GB_SDIO_TIMING_UHS_SDR50 0x05
-#define GB_SDIO_TIMING_UHS_SDR104 0x06
-#define GB_SDIO_TIMING_UHS_DDR50 0x07
-#define GB_SDIO_TIMING_MMC_DDR52 0x08
-#define GB_SDIO_TIMING_MMC_HS200 0x09
-#define GB_SDIO_TIMING_MMC_HS400 0x0A
-
- __u8 signal_voltage;
-#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
-#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
-#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
-
- __u8 drv_type;
-#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
-#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
-#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
-#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
-} __packed;
-
-/* command request */
-struct gb_sdio_command_request {
- __u8 cmd;
- __u8 cmd_flags;
-#define GB_SDIO_RSP_NONE 0x00
-#define GB_SDIO_RSP_PRESENT 0x01
-#define GB_SDIO_RSP_136 0x02
-#define GB_SDIO_RSP_CRC 0x04
-#define GB_SDIO_RSP_BUSY 0x08
-#define GB_SDIO_RSP_OPCODE 0x10
-
- __u8 cmd_type;
-#define GB_SDIO_CMD_AC 0x00
-#define GB_SDIO_CMD_ADTC 0x01
-#define GB_SDIO_CMD_BC 0x02
-#define GB_SDIO_CMD_BCR 0x03
-
- __le32 cmd_arg;
- __le16 data_blocks;
- __le16 data_blksz;
-} __packed;
-
-struct gb_sdio_command_response {
- __le32 resp[4];
-} __packed;
-
-/* transfer request */
-struct gb_sdio_transfer_request {
- __u8 data_flags;
-#define GB_SDIO_DATA_WRITE 0x01
-#define GB_SDIO_DATA_READ 0x02
-#define GB_SDIO_DATA_STREAM 0x04
-
- __le16 data_blocks;
- __le16 data_blksz;
- __u8 data[0];
-} __packed;
-
-struct gb_sdio_transfer_response {
- __le16 data_blocks;
- __le16 data_blksz;
- __u8 data[0];
-} __packed;
-
-/* event request: generated by module and is defined as unidirectional */
-struct gb_sdio_event_request {
- __u8 event;
-#define GB_SDIO_CARD_INSERTED 0x01
-#define GB_SDIO_CARD_REMOVED 0x02
-#define GB_SDIO_WP 0x04
-} __packed;
-
-/* Camera */
-
-/* Greybus Camera request types */
-#define GB_CAMERA_TYPE_CAPABILITIES 0x02
-#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
-#define GB_CAMERA_TYPE_CAPTURE 0x04
-#define GB_CAMERA_TYPE_FLUSH 0x05
-#define GB_CAMERA_TYPE_METADATA 0x06
-
-#define GB_CAMERA_MAX_STREAMS 4
-#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
-
-/* Greybus Camera Configure Streams request payload */
-struct gb_camera_stream_config_request {
- __le16 width;
- __le16 height;
- __le16 format;
- __le16 padding;
-} __packed;
-
-struct gb_camera_configure_streams_request {
- __u8 num_streams;
- __u8 flags;
-#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
- __le16 padding;
- struct gb_camera_stream_config_request config[0];
-} __packed;
-
-/* Greybus Camera Configure Streams response payload */
-struct gb_camera_stream_config_response {
- __le16 width;
- __le16 height;
- __le16 format;
- __u8 virtual_channel;
- __u8 data_type[2];
- __le16 max_pkt_size;
- __u8 padding;
- __le32 max_size;
-} __packed;
-
-struct gb_camera_configure_streams_response {
- __u8 num_streams;
-#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
- __u8 flags;
- __u8 padding[2];
- __le32 data_rate;
- struct gb_camera_stream_config_response config[0];
-};
-
-/* Greybus Camera Capture request payload - response has no payload */
-struct gb_camera_capture_request {
- __le32 request_id;
- __u8 streams;
- __u8 padding;
- __le16 num_frames;
- __u8 settings[0];
-} __packed;
-
-/* Greybus Camera Flush response payload - request has no payload */
-struct gb_camera_flush_response {
- __le32 request_id;
-} __packed;
-
-/* Greybus Camera Metadata request payload - operation has no response */
-struct gb_camera_metadata_request {
- __le32 request_id;
- __le16 frame_number;
- __u8 stream;
- __u8 padding;
- __u8 metadata[0];
-} __packed;
-
-/* Lights */
-
-/* Greybus Lights request types */
-#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
-#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
-#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
-#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
-#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
-#define GB_LIGHTS_TYPE_SET_BLINK 0x07
-#define GB_LIGHTS_TYPE_SET_COLOR 0x08
-#define GB_LIGHTS_TYPE_SET_FADE 0x09
-#define GB_LIGHTS_TYPE_EVENT 0x0A
-#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
-#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
-#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
-#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
-
-/* Greybus Light modes */
-
-/*
- * if you add any specific mode below, update also the
- * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
- */
-#define GB_CHANNEL_MODE_NONE 0x00000000
-#define GB_CHANNEL_MODE_BATTERY 0x00000001
-#define GB_CHANNEL_MODE_POWER 0x00000002
-#define GB_CHANNEL_MODE_WIRELESS 0x00000004
-#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
-#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
-#define GB_CHANNEL_MODE_BUTTONS 0x00000020
-#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
-#define GB_CHANNEL_MODE_ATTENTION 0x00000080
-#define GB_CHANNEL_MODE_FLASH 0x00000100
-#define GB_CHANNEL_MODE_TORCH 0x00000200
-#define GB_CHANNEL_MODE_INDICATOR 0x00000400
-
-/* Lights Mode valid bit values */
-#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
-#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
-
-/* Greybus Light Channels Flags */
-#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
-#define GB_LIGHT_CHANNEL_FADER 0x00000002
-#define GB_LIGHT_CHANNEL_BLINK 0x00000004
-
-/* get count of lights in module */
-struct gb_lights_get_lights_response {
- __u8 lights_count;
-} __packed;
-
-/* light config request payload */
-struct gb_lights_get_light_config_request {
- __u8 id;
-} __packed;
-
-/* light config response payload */
-struct gb_lights_get_light_config_response {
- __u8 channel_count;
- __u8 name[32];
-} __packed;
-
-/* channel config request payload */
-struct gb_lights_get_channel_config_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* channel flash config request payload */
-struct gb_lights_get_channel_flash_config_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* channel config response payload */
-struct gb_lights_get_channel_config_response {
- __u8 max_brightness;
- __le32 flags;
- __le32 color;
- __u8 color_name[32];
- __le32 mode;
- __u8 mode_name[32];
-} __packed;
-
-/* channel flash config response payload */
-struct gb_lights_get_channel_flash_config_response {
- __le32 intensity_min_uA;
- __le32 intensity_max_uA;
- __le32 intensity_step_uA;
- __le32 timeout_min_us;
- __le32 timeout_max_us;
- __le32 timeout_step_us;
-} __packed;
-
-/* blink request payload: response have no payload */
-struct gb_lights_blink_request {
- __u8 light_id;
- __u8 channel_id;
- __le16 time_on_ms;
- __le16 time_off_ms;
-} __packed;
-
-/* set brightness request payload: response have no payload */
-struct gb_lights_set_brightness_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 brightness;
-} __packed;
-
-/* set color request payload: response have no payload */
-struct gb_lights_set_color_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 color;
-} __packed;
-
-/* set fade request payload: response have no payload */
-struct gb_lights_set_fade_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 fade_in;
- __u8 fade_out;
-} __packed;
-
-/* event request: generated by module */
-struct gb_lights_event_request {
- __u8 light_id;
- __u8 event;
-#define GB_LIGHTS_LIGHT_CONFIG 0x01
-} __packed;
-
-/* set flash intensity request payload: response have no payload */
-struct gb_lights_set_flash_intensity_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 intensity_uA;
-} __packed;
-
-/* set flash strobe state request payload: response have no payload */
-struct gb_lights_set_flash_strobe_request {
- __u8 light_id;
- __u8 channel_id;
- __u8 state;
-} __packed;
-
-/* set flash timeout request payload: response have no payload */
-struct gb_lights_set_flash_timeout_request {
- __u8 light_id;
- __u8 channel_id;
- __le32 timeout_us;
-} __packed;
-
-/* get flash fault request payload */
-struct gb_lights_get_flash_fault_request {
- __u8 light_id;
- __u8 channel_id;
-} __packed;
-
-/* get flash fault response payload */
-struct gb_lights_get_flash_fault_response {
- __le32 fault;
-#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
-#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
-#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
-#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
-#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
-#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
-#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
-#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
-#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
-} __packed;
-
-/* Audio */
-
-#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
-#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
-#define GB_AUDIO_TYPE_GET_CONTROL 0x04
-#define GB_AUDIO_TYPE_SET_CONTROL 0x05
-#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
-#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
-#define GB_AUDIO_TYPE_GET_PCM 0x08
-#define GB_AUDIO_TYPE_SET_PCM 0x09
-#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
- /* 0x0b unused */
-#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
-#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
-#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
- /* 0x0f unused */
-#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
-#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
-#define GB_AUDIO_TYPE_JACK_EVENT 0x12
-#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
-#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
-#define GB_AUDIO_TYPE_SEND_DATA 0x15
-
-/* Module must be able to buffer 10ms of audio data, minimum */
-#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
-
-#define GB_AUDIO_PCM_NAME_MAX 32
-#define AUDIO_DAI_NAME_MAX 32
-#define AUDIO_CONTROL_NAME_MAX 32
-#define AUDIO_CTL_ELEM_NAME_MAX 44
-#define AUDIO_ENUM_NAME_MAX 64
-#define AUDIO_WIDGET_NAME_MAX 32
-
-/* See SNDRV_PCM_FMTBIT_* in Linux source */
-#define GB_AUDIO_PCM_FMT_S8 BIT(0)
-#define GB_AUDIO_PCM_FMT_U8 BIT(1)
-#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
-#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
-#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
-#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
-#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
-#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
-#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
-#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
-#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
-#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
-#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
-#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
-
-/* See SNDRV_PCM_RATE_* in Linux source */
-#define GB_AUDIO_PCM_RATE_5512 BIT(0)
-#define GB_AUDIO_PCM_RATE_8000 BIT(1)
-#define GB_AUDIO_PCM_RATE_11025 BIT(2)
-#define GB_AUDIO_PCM_RATE_16000 BIT(3)
-#define GB_AUDIO_PCM_RATE_22050 BIT(4)
-#define GB_AUDIO_PCM_RATE_32000 BIT(5)
-#define GB_AUDIO_PCM_RATE_44100 BIT(6)
-#define GB_AUDIO_PCM_RATE_48000 BIT(7)
-#define GB_AUDIO_PCM_RATE_64000 BIT(8)
-#define GB_AUDIO_PCM_RATE_88200 BIT(9)
-#define GB_AUDIO_PCM_RATE_96000 BIT(10)
-#define GB_AUDIO_PCM_RATE_176400 BIT(11)
-#define GB_AUDIO_PCM_RATE_192000 BIT(12)
-
-#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
-#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
-
-#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
-#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
-
-/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
-#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
-#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
-#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
-#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
-
-/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
-#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
-#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
-#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
-#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
-#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
-#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
-#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
-
-/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
-#define GB_AUDIO_ACCESS_READ BIT(0)
-#define GB_AUDIO_ACCESS_WRITE BIT(1)
-#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
-#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
-#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
-#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
-#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
-#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
-#define GB_AUDIO_ACCESS_LOCK BIT(8)
-#define GB_AUDIO_ACCESS_OWNER BIT(9)
-
-/* enum snd_soc_dapm_type */
-#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
-#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
-#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
-#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
-#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
-#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
-#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
-#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
-#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
-#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
-#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
-#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
-#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
-#define GB_AUDIO_WIDGET_TYPE_HP 0xd
-#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
-#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
-#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
-#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
-#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
-#define GB_AUDIO_WIDGET_TYPE_POST 0x13
-#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
-#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
-#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
-#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
-#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
-#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
-#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
-#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
-#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
-
-#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
-#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
-
-#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
-#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
-
-#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
-#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
-
-#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
-#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
-#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
-#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
-#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
-#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
-#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
-#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
-#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
-
-#define GB_AUDIO_INVALID_INDEX 0xff
-
-/* enum snd_jack_types */
-#define GB_AUDIO_JACK_HEADPHONE 0x0000001
-#define GB_AUDIO_JACK_MICROPHONE 0x0000002
-#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
- GB_AUDIO_JACK_MICROPHONE)
-#define GB_AUDIO_JACK_LINEOUT 0x0000004
-#define GB_AUDIO_JACK_MECHANICAL 0x0000008
-#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
-#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
- GB_AUDIO_JACK_VIDEOOUT)
-#define GB_AUDIO_JACK_LINEIN 0x0000020
-#define GB_AUDIO_JACK_OC_HPHL 0x0000040
-#define GB_AUDIO_JACK_OC_HPHR 0x0000080
-#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
-#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
- GB_AUDIO_JACK_MICROPHONE | \
- GB_AUDIO_JACK_MICROPHONE2)
-/* Kept separate from switches to facilitate implementation */
-#define GB_AUDIO_JACK_BTN_0 0x4000000
-#define GB_AUDIO_JACK_BTN_1 0x2000000
-#define GB_AUDIO_JACK_BTN_2 0x1000000
-#define GB_AUDIO_JACK_BTN_3 0x0800000
-
-struct gb_audio_pcm {
- __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
- __le32 formats; /* GB_AUDIO_PCM_FMT_* */
- __le32 rates; /* GB_AUDIO_PCM_RATE_* */
- __u8 chan_min;
- __u8 chan_max;
- __u8 sig_bits; /* number of bits of content */
-} __packed;
-
-struct gb_audio_dai {
- __u8 name[AUDIO_DAI_NAME_MAX];
- __le16 data_cport;
- struct gb_audio_pcm capture;
- struct gb_audio_pcm playback;
-} __packed;
-
-struct gb_audio_integer {
- __le32 min;
- __le32 max;
- __le32 step;
-} __packed;
-
-struct gb_audio_integer64 {
- __le64 min;
- __le64 max;
- __le64 step;
-} __packed;
-
-struct gb_audio_enumerated {
- __le32 items;
- __le16 names_length;
- __u8 names[0];
-} __packed;
-
-struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
- __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
- __le16 dimen[4];
- union {
- struct gb_audio_integer integer;
- struct gb_audio_integer64 integer64;
- struct gb_audio_enumerated enumerated;
- } value;
-} __packed;
-
-struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
- __le64 timestamp; /* XXX needed? */
- union {
- __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
- __le64 integer64_value[2];
- __le32 enumerated_item[2];
- } value;
-} __packed;
-
-struct gb_audio_control {
- __u8 name[AUDIO_CONTROL_NAME_MAX];
- __u8 id; /* 0-63 */
- __u8 iface; /* GB_AUDIO_IFACE_* */
- __le16 data_cport;
- __le32 access; /* GB_AUDIO_ACCESS_* */
- __u8 count; /* count of same elements */
- __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
- struct gb_audio_ctl_elem_info info;
-} __packed;
-
-struct gb_audio_widget {
- __u8 name[AUDIO_WIDGET_NAME_MAX];
- __u8 sname[AUDIO_WIDGET_NAME_MAX];
- __u8 id;
- __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
- __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
- __u8 ncontrols;
- struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
-} __packed;
-
-struct gb_audio_route {
- __u8 source_id; /* widget id */
- __u8 destination_id; /* widget id */
- __u8 control_id; /* 0-63 */
- __u8 index; /* Selection within the control */
-} __packed;
-
-struct gb_audio_topology {
- __u8 num_dais;
- __u8 num_controls;
- __u8 num_widgets;
- __u8 num_routes;
- __le32 size_dais;
- __le32 size_controls;
- __le32 size_widgets;
- __le32 size_routes;
- __le32 jack_type;
- /*
- * struct gb_audio_dai dai[num_dais];
- * struct gb_audio_control controls[num_controls];
- * struct gb_audio_widget widgets[num_widgets];
- * struct gb_audio_route routes[num_routes];
- */
- __u8 data[0];
-} __packed;
-
-struct gb_audio_get_topology_size_response {
- __le16 size;
-} __packed;
-
-struct gb_audio_get_topology_response {
- struct gb_audio_topology topology;
-} __packed;
-
-struct gb_audio_get_control_request {
- __u8 control_id;
- __u8 index;
-} __packed;
-
-struct gb_audio_get_control_response {
- struct gb_audio_ctl_elem_value value;
-} __packed;
-
-struct gb_audio_set_control_request {
- __u8 control_id;
- __u8 index;
- struct gb_audio_ctl_elem_value value;
-} __packed;
-
-struct gb_audio_enable_widget_request {
- __u8 widget_id;
-} __packed;
-
-struct gb_audio_disable_widget_request {
- __u8 widget_id;
-} __packed;
-
-struct gb_audio_get_pcm_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_get_pcm_response {
- __le32 format;
- __le32 rate;
- __u8 channels;
- __u8 sig_bits;
-} __packed;
-
-struct gb_audio_set_pcm_request {
- __le16 data_cport;
- __le32 format;
- __le32 rate;
- __u8 channels;
- __u8 sig_bits;
-} __packed;
-
-struct gb_audio_set_tx_data_size_request {
- __le16 data_cport;
- __le16 size;
-} __packed;
-
-struct gb_audio_activate_tx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_deactivate_tx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_set_rx_data_size_request {
- __le16 data_cport;
- __le16 size;
-} __packed;
-
-struct gb_audio_activate_rx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_deactivate_rx_request {
- __le16 data_cport;
-} __packed;
-
-struct gb_audio_jack_event_request {
- __u8 widget_id;
- __u8 jack_attribute;
- __u8 event;
-} __packed;
-
-struct gb_audio_button_event_request {
- __u8 widget_id;
- __u8 button_id;
- __u8 event;
-} __packed;
-
-struct gb_audio_streaming_event_request {
- __le16 data_cport;
- __u8 event;
-} __packed;
-
-struct gb_audio_send_data_request {
- __le64 timestamp;
- __u8 data[0];
-} __packed;
-
-
-/* Log */
-
-/* operations */
-#define GB_LOG_TYPE_SEND_LOG 0x02
-
-/* length */
-#define GB_LOG_MAX_LEN 1024
-
-struct gb_log_send_log_request {
- __le16 len;
- __u8 msg[0];
-} __packed;
-
-#endif /* __GREYBUS_PROTOCOLS_H */
-
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
deleted file mode 100644
index 6cf024a20a58..000000000000
--- a/drivers/staging/greybus/hd.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Host Device
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#ifndef __HD_H
-#define __HD_H
-
-struct gb_host_device;
-struct gb_message;
-
-struct gb_hd_driver {
- size_t hd_priv_size;
-
- int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
- unsigned long flags);
- void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
- unsigned long flags);
- int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
- int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
- u8 phase, unsigned int timeout);
- int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
- size_t peer_space, unsigned int timeout);
- int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
-
- int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
- struct gb_message *message, gfp_t gfp_mask);
- void (*message_cancel)(struct gb_message *message);
- int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
- int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
- int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
- bool async);
-};
-
-struct gb_host_device {
- struct device dev;
- int bus_id;
- const struct gb_hd_driver *driver;
-
- struct list_head modules;
- struct list_head connections;
- struct ida cport_id_map;
-
- /* Number of CPorts supported by the UniPro IP */
- size_t num_cports;
-
- /* Host device buffer constraints */
- size_t buffer_size_max;
-
- struct gb_svc *svc;
- /* Private data for the host driver */
- unsigned long hd_priv[0] __aligned(sizeof(s64));
-};
-#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
-
-int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
-void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
-int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
- unsigned long flags);
-void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
-
-struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
- struct device *parent,
- size_t buffer_size_max,
- size_t num_cports);
-int gb_hd_add(struct gb_host_device *hd);
-void gb_hd_del(struct gb_host_device *hd);
-void gb_hd_shutdown(struct gb_host_device *hd);
-void gb_hd_put(struct gb_host_device *hd);
-int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
- bool in_irq);
-
-int gb_hd_init(void);
-void gb_hd_exit(void);
-
-#endif /* __HD_H */
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 8ab810bf5716..04bfd9110502 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -12,8 +12,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
/* Greybus HID device's structure */
struct gb_hid {
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index 7bb85a75d3b1..ab06fc3b9e7e 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_i2c_device {
@@ -31,7 +31,14 @@ static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
return gb_i2c_functionality; /* All bits the same for now */
}
-static int gb_i2c_functionality_operation(struct gb_i2c_device *gb_i2c_dev)
+/*
+ * Do initial setup of the i2c device. This includes verifying we
+ * can support it (based on the protocol version it advertises).
+ * If that's OK, we get and cached its functionality bits.
+ *
+ * Note: gb_i2c_dev->connection is assumed to have been valid.
+ */
+static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
{
struct gb_i2c_functionality_response response;
u32 functionality;
@@ -235,19 +242,6 @@ static const struct i2c_algorithm gb_i2c_algorithm = {
.functionality = gb_i2c_functionality,
};
-/*
- * Do initial setup of the i2c device. This includes verifying we
- * can support it (based on the protocol version it advertises).
- * If that's OK, we get and cached its functionality bits.
- *
- * Note: gb_i2c_dev->connection is assumed to have been valid.
- */
-static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
-{
- /* Assume the functionality never changes, just get it once */
- return gb_i2c_functionality_operation(gb_i2c_dev);
-}
-
static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
deleted file mode 100644
index 1c00c5bb3ec9..000000000000
--- a/drivers/staging/greybus/interface.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Interface Block code
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __INTERFACE_H
-#define __INTERFACE_H
-
-enum gb_interface_type {
- GB_INTERFACE_TYPE_INVALID = 0,
- GB_INTERFACE_TYPE_UNKNOWN,
- GB_INTERFACE_TYPE_DUMMY,
- GB_INTERFACE_TYPE_UNIPRO,
- GB_INTERFACE_TYPE_GREYBUS,
-};
-
-#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
-#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
-#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
-#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
-#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
-#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
-#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
-
-struct gb_interface {
- struct device dev;
- struct gb_control *control;
-
- struct list_head bundles;
- struct list_head module_node;
- struct list_head manifest_descs;
- u8 interface_id; /* Physical location within the Endo */
- u8 device_id;
- u8 features; /* Feature flags set in the manifest */
-
- enum gb_interface_type type;
-
- u32 ddbl1_manufacturer_id;
- u32 ddbl1_product_id;
- u32 vendor_id;
- u32 product_id;
- u64 serial_number;
-
- struct gb_host_device *hd;
- struct gb_module *module;
-
- unsigned long quirks;
-
- struct mutex mutex;
-
- bool disconnected;
-
- bool ejected;
- bool removed;
- bool active;
- bool enabled;
- bool mode_switch;
- bool dme_read;
-
- struct work_struct mode_switch_work;
- struct completion mode_switch_completion;
-};
-#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
-
-struct gb_interface *gb_interface_create(struct gb_module *module,
- u8 interface_id);
-int gb_interface_activate(struct gb_interface *intf);
-void gb_interface_deactivate(struct gb_interface *intf);
-int gb_interface_enable(struct gb_interface *intf);
-void gb_interface_disable(struct gb_interface *intf);
-int gb_interface_add(struct gb_interface *intf);
-void gb_interface_del(struct gb_interface *intf);
-void gb_interface_put(struct gb_interface *intf);
-void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
- u32 mailbox);
-
-int gb_interface_request_mode_switch(struct gb_interface *intf);
-
-#endif /* __INTERFACE_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 010ae1e9c7fb..d6ba25f21d80 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -11,11 +11,9 @@
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
#include <media/v4l2-flash-led-class.h>
-#include "greybus.h"
-#include "greybus_protocols.h"
-
#define NAMES_MAX 32
struct gb_channel {
@@ -1098,21 +1096,21 @@ static void gb_lights_channel_release(struct gb_channel *channel)
static void gb_lights_light_release(struct gb_light *light)
{
int i;
- int count;
light->ready = false;
- count = light->channels_count;
-
if (light->has_flash)
gb_lights_light_v4l2_unregister(light);
+ light->has_flash = false;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < light->channels_count; i++)
gb_lights_channel_release(&light->channels[i]);
- light->channels_count--;
- }
+ light->channels_count = 0;
+
kfree(light->channels);
+ light->channels = NULL;
kfree(light->name);
+ light->name = NULL;
}
static void gb_lights_release(struct gb_lights *glights)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 15a88574dbb0..971f36dccac6 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -9,8 +9,7 @@
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/uaccess.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_log {
struct gb_connection *connection;
@@ -31,14 +30,14 @@ static int gb_log_request_handler(struct gb_operation *op)
/* Verify size of payload */
if (op->request->payload_size < sizeof(*receive)) {
dev_err(dev, "log request too small (%zu < %zu)\n",
- op->request->payload_size, sizeof(*receive));
+ op->request->payload_size, sizeof(*receive));
return -EINVAL;
}
receive = op->request->payload;
len = le16_to_cpu(receive->len);
if (len != (op->request->payload_size - sizeof(*receive))) {
dev_err(dev, "log request wrong size %d vs %zu\n", len,
- (op->request->payload_size - sizeof(*receive)));
+ (op->request->payload_size - sizeof(*receive)));
return -EINVAL;
}
if (len == 0) {
@@ -83,7 +82,7 @@ static int gb_log_probe(struct gb_bundle *bundle,
return -ENOMEM;
connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
- gb_log_request_handler);
+ gb_log_request_handler);
if (IS_ERR(connection)) {
retval = PTR_ERR(connection);
goto error_free;
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 48d85ebe404a..583d9708a191 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -25,12 +25,9 @@
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <linux/pm_runtime.h>
-
+#include <linux/greybus.h>
#include <asm/div64.h>
-#include "greybus.h"
-#include "connection.h"
-
#define NSEC_PER_DAY 86400000000000ULL
struct gb_loopback_stats {
@@ -882,7 +879,7 @@ static int gb_loopback_fn(void *data)
gb->type = 0;
gb->send_count = 0;
sysfs_notify(&gb->dev->kobj, NULL,
- "iteration_count");
+ "iteration_count");
dev_dbg(&bundle->dev, "load test complete\n");
} else {
dev_dbg(&bundle->dev,
@@ -1054,7 +1051,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
/* Allocate kfifo */
if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
- GFP_KERNEL)) {
+ GFP_KERNEL)) {
retval = -ENOMEM;
goto out_conn;
}
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
deleted file mode 100644
index f3c95a255631..000000000000
--- a/drivers/staging/greybus/manifest.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus manifest parsing
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __MANIFEST_H
-#define __MANIFEST_H
-
-struct gb_interface;
-bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
-
-#endif /* __MANIFEST_H */
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
deleted file mode 100644
index b1ebcc6636db..000000000000
--- a/drivers/staging/greybus/module.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Module code
- *
- * Copyright 2016 Google Inc.
- * Copyright 2016 Linaro Ltd.
- */
-
-#ifndef __MODULE_H
-#define __MODULE_H
-
-struct gb_module {
- struct device dev;
- struct gb_host_device *hd;
-
- struct list_head hd_node;
-
- u8 module_id;
- size_t num_interfaces;
-
- bool disconnected;
-
- struct gb_interface *interfaces[0];
-};
-#define to_gb_module(d) container_of(d, struct gb_module, dev)
-
-struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
- size_t num_interfaces);
-int gb_module_add(struct gb_module *module);
-void gb_module_del(struct gb_module *module);
-void gb_module_put(struct gb_module *module);
-
-#endif /* __MODULE_H */
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
deleted file mode 100644
index 40b7b02fff88..000000000000
--- a/drivers/staging/greybus/operation.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus operations
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#ifndef __OPERATION_H
-#define __OPERATION_H
-
-#include <linux/completion.h>
-
-struct gb_operation;
-
-/* The default amount of time a request is given to complete */
-#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
-
-/*
- * The top bit of the type in an operation message header indicates
- * whether the message is a request (bit clear) or response (bit set)
- */
-#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
-
-enum gb_operation_result {
- GB_OP_SUCCESS = 0x00,
- GB_OP_INTERRUPTED = 0x01,
- GB_OP_TIMEOUT = 0x02,
- GB_OP_NO_MEMORY = 0x03,
- GB_OP_PROTOCOL_BAD = 0x04,
- GB_OP_OVERFLOW = 0x05,
- GB_OP_INVALID = 0x06,
- GB_OP_RETRY = 0x07,
- GB_OP_NONEXISTENT = 0x08,
- GB_OP_UNKNOWN_ERROR = 0xfe,
- GB_OP_MALFUNCTION = 0xff,
-};
-
-#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
-#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
-
-/*
- * Protocol code should only examine the payload and payload_size fields, and
- * host-controller drivers may use the hcpriv field. All other fields are
- * intended to be private to the operations core code.
- */
-struct gb_message {
- struct gb_operation *operation;
- struct gb_operation_msg_hdr *header;
-
- void *payload;
- size_t payload_size;
-
- void *buffer;
-
- void *hcpriv;
-};
-
-#define GB_OPERATION_FLAG_INCOMING BIT(0)
-#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
-#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
-#define GB_OPERATION_FLAG_CORE BIT(3)
-
-#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
- GB_OPERATION_FLAG_UNIDIRECTIONAL)
-
-/*
- * A Greybus operation is a remote procedure call performed over a
- * connection between two UniPro interfaces.
- *
- * Every operation consists of a request message sent to the other
- * end of the connection coupled with a reply message returned to
- * the sender. Every operation has a type, whose interpretation is
- * dependent on the protocol associated with the connection.
- *
- * Only four things in an operation structure are intended to be
- * directly usable by protocol handlers: the operation's connection
- * pointer; the operation type; the request message payload (and
- * size); and the response message payload (and size). Note that a
- * message with a 0-byte payload has a null message payload pointer.
- *
- * In addition, every operation has a result, which is an errno
- * value. Protocol handlers access the operation result using
- * gb_operation_result().
- */
-typedef void (*gb_operation_callback)(struct gb_operation *);
-struct gb_operation {
- struct gb_connection *connection;
- struct gb_message *request;
- struct gb_message *response;
-
- unsigned long flags;
- u8 type;
- u16 id;
- int errno; /* Operation result */
-
- struct work_struct work;
- gb_operation_callback callback;
- struct completion completion;
- struct timer_list timer;
-
- struct kref kref;
- atomic_t waiters;
-
- int active;
- struct list_head links; /* connection->operations */
-
- void *private;
-};
-
-static inline bool
-gb_operation_is_incoming(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_INCOMING;
-}
-
-static inline bool
-gb_operation_is_unidirectional(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
-}
-
-static inline bool
-gb_operation_short_response_allowed(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
-}
-
-static inline bool gb_operation_is_core(struct gb_operation *operation)
-{
- return operation->flags & GB_OPERATION_FLAG_CORE;
-}
-
-void gb_connection_recv(struct gb_connection *connection,
- void *data, size_t size);
-
-int gb_operation_result(struct gb_operation *operation);
-
-size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
-struct gb_operation *
-gb_operation_create_flags(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, unsigned long flags,
- gfp_t gfp);
-
-static inline struct gb_operation *
-gb_operation_create(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, gfp_t gfp)
-{
- return gb_operation_create_flags(connection, type, request_size,
- response_size, 0, gfp);
-}
-
-struct gb_operation *
-gb_operation_create_core(struct gb_connection *connection,
- u8 type, size_t request_size,
- size_t response_size, unsigned long flags,
- gfp_t gfp);
-
-void gb_operation_get(struct gb_operation *operation);
-void gb_operation_put(struct gb_operation *operation);
-
-bool gb_operation_response_alloc(struct gb_operation *operation,
- size_t response_size, gfp_t gfp);
-
-int gb_operation_request_send(struct gb_operation *operation,
- gb_operation_callback callback,
- unsigned int timeout,
- gfp_t gfp);
-int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
- unsigned int timeout);
-static inline int
-gb_operation_request_send_sync(struct gb_operation *operation)
-{
- return gb_operation_request_send_sync_timeout(operation,
- GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-void gb_operation_cancel(struct gb_operation *operation, int errno);
-void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
-
-void greybus_message_sent(struct gb_host_device *hd,
- struct gb_message *message, int status);
-
-int gb_operation_sync_timeout(struct gb_connection *connection, int type,
- void *request, int request_size,
- void *response, int response_size,
- unsigned int timeout);
-int gb_operation_unidirectional_timeout(struct gb_connection *connection,
- int type, void *request, int request_size,
- unsigned int timeout);
-
-static inline int gb_operation_sync(struct gb_connection *connection, int type,
- void *request, int request_size,
- void *response, int response_size)
-{
- return gb_operation_sync_timeout(connection, type,
- request, request_size, response, response_size,
- GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-static inline int gb_operation_unidirectional(struct gb_connection *connection,
- int type, void *request, int request_size)
-{
- return gb_operation_unidirectional_timeout(connection, type,
- request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
-}
-
-static inline void *gb_operation_get_data(struct gb_operation *operation)
-{
- return operation->private;
-}
-
-static inline void gb_operation_set_data(struct gb_operation *operation,
- void *data)
-{
- operation->private = data;
-}
-
-int gb_operation_init(void);
-void gb_operation_exit(void);
-
-#endif /* !__OPERATION_H */
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 34b40a409ea3..ec96f28887f9 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -10,8 +10,7 @@
#include <linux/module.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
#define PROP_MAX 32
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index 4a6d394b6c44..891a6a672378 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_pwm_chip {
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 838acbe84ca0..64a17dfe3b6e 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -13,8 +13,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/uaccess.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_raw {
struct gb_connection *connection;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index a097a8916b3b..68c5718be827 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -12,8 +12,8 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
struct gb_sdio_host {
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
index 47d896992b35..68e8d272db6d 100644
--- a/drivers/staging/greybus/spi.c
+++ b/drivers/staging/greybus/spi.c
@@ -7,8 +7,8 @@
*/
#include <linux/module.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#include "spilib.h"
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index 2e07c6b41334..fc27c52de74a 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -10,9 +10,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/greybus.h>
#include <linux/spi/spi.h>
-#include "greybus.h"
#include "spilib.h"
struct gb_spilib {
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
index 043d4d32c3ee..9d416839e3be 100644
--- a/drivers/staging/greybus/spilib.h
+++ b/drivers/staging/greybus/spilib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Greybus SPI library header
*
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
deleted file mode 100644
index ad01783bac9c..000000000000
--- a/drivers/staging/greybus/svc.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus SVC code
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __SVC_H
-#define __SVC_H
-
-#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
-#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
-#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
-
-enum gb_svc_state {
- GB_SVC_STATE_RESET,
- GB_SVC_STATE_PROTOCOL_VERSION,
- GB_SVC_STATE_SVC_HELLO,
-};
-
-enum gb_svc_watchdog_bite {
- GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
- GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
-};
-
-struct gb_svc_watchdog;
-
-struct svc_debugfs_pwrmon_rail {
- u8 id;
- struct gb_svc *svc;
-};
-
-struct gb_svc {
- struct device dev;
-
- struct gb_host_device *hd;
- struct gb_connection *connection;
- enum gb_svc_state state;
- struct ida device_id_map;
- struct workqueue_struct *wq;
-
- u16 endo_id;
- u8 ap_intf_id;
-
- u8 protocol_major;
- u8 protocol_minor;
-
- struct gb_svc_watchdog *watchdog;
- enum gb_svc_watchdog_bite action;
-
- struct dentry *debugfs_dentry;
- struct svc_debugfs_pwrmon_rail *pwrmon_rails;
-};
-#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
-
-struct gb_svc *gb_svc_create(struct gb_host_device *hd);
-int gb_svc_add(struct gb_svc *svc);
-void gb_svc_del(struct gb_svc *svc);
-void gb_svc_put(struct gb_svc *svc);
-
-int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
- u8 measurement_type, u32 *value);
-int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
-int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
- u8 intf2_id, u8 dev2_id);
-void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
-int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
- u8 intf2_id, u16 cport2_id, u8 cport_flags);
-void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
- u8 intf2_id, u16 cport2_id);
-int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
-int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
-int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
-int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
-
-int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
- u32 *value);
-int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
- u32 value);
-int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
- u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
- u8 tx_amplitude, u8 tx_hs_equalizer,
- u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
- u8 flags, u32 quirks,
- struct gb_svc_l2_timer_cfg *local,
- struct gb_svc_l2_timer_cfg *remote);
-int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
-int gb_svc_ping(struct gb_svc *svc);
-int gb_svc_watchdog_create(struct gb_svc *svc);
-void gb_svc_watchdog_destroy(struct gb_svc *svc);
-bool gb_svc_watchdog_enabled(struct gb_svc *svc);
-int gb_svc_watchdog_enable(struct gb_svc *svc);
-int gb_svc_watchdog_disable(struct gb_svc *svc);
-
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
-#endif /* __SVC_H */
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index cebc1d90a180..ba6f905f26fa 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -4,8 +4,6 @@
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Provided under the three clause BSD license found in the LICENSE file.
*/
#include <errno.h>
#include <fcntl.h>
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index b3bffe91ae99..55c51143bb09 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -28,8 +28,8 @@
#include <linux/kfifo.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
#define GB_NUM_MINORS 16 /* 16 is more than enough */
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
index 1c246c73a085..8e9d9d59a357 100644
--- a/drivers/staging/greybus/usb.c
+++ b/drivers/staging/greybus/usb.c
@@ -10,8 +10,8 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/greybus.h>
-#include "greybus.h"
#include "gbphy.h"
/* Greybus USB request types */
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 3e5dedeacd5c..0e2b188e5ca3 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -13,8 +13,7 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/pm_runtime.h>
-
-#include "greybus.h"
+#include <linux/greybus.h>
struct gb_vibrator_device {
struct gb_connection *connection;
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 62f4b3b1b457..82099db4bf0c 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -309,15 +309,12 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct adis *st = iio_priv(indio_dev);
- int bits = 10;
- s16 val16;
u8 addr;
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
- val16 = val & ((1 << bits) - 1);
addr = adis16240_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
+ return adis_write_reg_16(st, addr, val & GENMASK(9, 0));
}
return -EINVAL;
}
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index b6d12fe7c12a..e6b660489165 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -25,8 +25,6 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/adc/ad_sigma_delta.h>
-#include "ad7192.h"
-
/* Registers */
#define AD7192_REG_COMM 0 /* Communications Register (WO, 8-bit) */
#define AD7192_REG_STAT 0 /* Status Register (RO, 8-bit) */
@@ -145,6 +143,10 @@
#define AD7192_EXT_FREQ_MHZ_MAX 5120000
#define AD7192_INT_FREQ_MHZ 4915200
+#define AD7192_NO_SYNC_FILTER 1
+#define AD7192_SYNC3_FILTER 3
+#define AD7192_SYNC4_FILTER 4
+
/* NOTE:
* The AD7190/2/5 features a dual use data out ready DOUT/RDY output.
* In order to avoid contentions on the SPI bus, it's therefore necessary
@@ -252,7 +254,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
{
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
- bool rej60_en, sinc3_en, refin2_en, chop_en;
+ bool rej60_en, refin2_en;
bool buf_en, bipolar, burnout_curr_en;
unsigned long long scale_uv;
int i, ret, id;
@@ -284,24 +286,12 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
if (rej60_en)
st->mode |= AD7192_MODE_REJ60;
- sinc3_en = of_property_read_bool(np, "adi,sinc3-filter-enable");
- if (sinc3_en)
- st->mode |= AD7192_MODE_SINC3;
-
refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable");
if (refin2_en && st->devid != ID_AD7195)
st->conf |= AD7192_CONF_REFSEL;
- chop_en = of_property_read_bool(np, "adi,chop-enable");
- if (chop_en) {
- st->conf |= AD7192_CONF_CHOP;
- if (sinc3_en)
- st->f_order = 3; /* SINC 3rd order */
- else
- st->f_order = 4; /* SINC 4th order */
- } else {
- st->f_order = 1;
- }
+ st->conf &= ~AD7192_CONF_CHOP;
+ st->f_order = AD7192_NO_SYNC_FILTER;
buf_en = of_property_read_bool(np, "adi,buffer-enable");
if (buf_en)
@@ -313,7 +303,7 @@ static int ad7192_setup(struct ad7192_state *st, struct device_node *np)
burnout_curr_en = of_property_read_bool(np,
"adi,burnout-currents-enable");
- if (burnout_curr_en && buf_en && !chop_en) {
+ if (burnout_curr_en && buf_en) {
st->conf |= AD7192_CONF_BURN;
} else if (burnout_curr_en) {
dev_warn(&st->sd.spi->dev,
@@ -411,6 +401,49 @@ static ssize_t ad7192_set(struct device *dev,
return ret ? ret : len;
}
+static void ad7192_get_available_filter_freq(struct ad7192_state *st,
+ int *freq)
+{
+ unsigned int fadc;
+
+ /* Formulas for filter at page 25 of the datasheet */
+ fadc = DIV_ROUND_CLOSEST(st->fclk,
+ AD7192_SYNC4_FILTER * AD7192_MODE_RATE(st->mode));
+ freq[0] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
+
+ fadc = DIV_ROUND_CLOSEST(st->fclk,
+ AD7192_SYNC3_FILTER * AD7192_MODE_RATE(st->mode));
+ freq[1] = DIV_ROUND_CLOSEST(fadc * 240, 1024);
+
+ fadc = DIV_ROUND_CLOSEST(st->fclk, AD7192_MODE_RATE(st->mode));
+ freq[2] = DIV_ROUND_CLOSEST(fadc * 230, 1024);
+ freq[3] = DIV_ROUND_CLOSEST(fadc * 272, 1024);
+}
+
+static ssize_t ad7192_show_filter_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+ unsigned int freq_avail[4], i;
+ size_t len = 0;
+
+ ad7192_get_available_filter_freq(st, freq_avail);
+
+ for (i = 0; i < ARRAY_SIZE(freq_avail); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d.%d ", freq_avail[i] / 1000,
+ freq_avail[i] % 1000);
+
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(filter_low_pass_3db_frequency_available,
+ 0444, ad7192_show_filter_avail, NULL, 0);
+
static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
ad7192_show_bridge_switch, ad7192_set,
AD7192_REG_GPOCON);
@@ -420,6 +453,7 @@ static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
AD7192_REG_MODE);
static struct attribute *ad7192_attributes[] = {
+ &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
&iio_dev_attr_ac_excitation_en.dev_attr.attr,
NULL
@@ -430,6 +464,7 @@ static const struct attribute_group ad7192_attribute_group = {
};
static struct attribute *ad7195_attributes[] = {
+ &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
&iio_dev_attr_bridge_switch_en.dev_attr.attr,
NULL
};
@@ -443,6 +478,75 @@ static unsigned int ad7192_get_temp_scale(bool unipolar)
return unipolar ? 2815 * 2 : 2815;
}
+static int ad7192_set_3db_filter_freq(struct ad7192_state *st,
+ int val, int val2)
+{
+ int freq_avail[4], i, ret, freq;
+ unsigned int diff_new, diff_old;
+ int idx = 0;
+
+ diff_old = U32_MAX;
+ freq = val * 1000 + val2;
+
+ ad7192_get_available_filter_freq(st, freq_avail);
+
+ for (i = 0; i < ARRAY_SIZE(freq_avail); i++) {
+ diff_new = abs(freq - freq_avail[i]);
+ if (diff_new < diff_old) {
+ diff_old = diff_new;
+ idx = i;
+ }
+ }
+
+ switch (idx) {
+ case 0:
+ st->f_order = AD7192_SYNC4_FILTER;
+ st->mode &= ~AD7192_MODE_SINC3;
+
+ st->conf |= AD7192_CONF_CHOP;
+ break;
+ case 1:
+ st->f_order = AD7192_SYNC3_FILTER;
+ st->mode |= AD7192_MODE_SINC3;
+
+ st->conf |= AD7192_CONF_CHOP;
+ break;
+ case 2:
+ st->f_order = AD7192_NO_SYNC_FILTER;
+ st->mode &= ~AD7192_MODE_SINC3;
+
+ st->conf &= ~AD7192_CONF_CHOP;
+ break;
+ case 3:
+ st->f_order = AD7192_NO_SYNC_FILTER;
+ st->mode |= AD7192_MODE_SINC3;
+
+ st->conf &= ~AD7192_CONF_CHOP;
+ break;
+ }
+
+ ret = ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ if (ret < 0)
+ return ret;
+
+ return ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
+}
+
+static int ad7192_get_3db_filter_freq(struct ad7192_state *st)
+{
+ unsigned int fadc;
+
+ fadc = DIV_ROUND_CLOSEST(st->fclk,
+ st->f_order * AD7192_MODE_RATE(st->mode));
+
+ if (st->conf & AD7192_CONF_CHOP)
+ return DIV_ROUND_CLOSEST(fadc * 240, 1024);
+ if (st->mode & AD7192_MODE_SINC3)
+ return DIV_ROUND_CLOSEST(fadc * 272, 1024);
+ else
+ return DIV_ROUND_CLOSEST(fadc * 230, 1024);
+}
+
static int ad7192_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -483,6 +587,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
*val = st->fclk /
(st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *val = ad7192_get_3db_filter_freq(st);
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
@@ -537,6 +645,9 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
st->mode |= AD7192_MODE_RATE(div);
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
break;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = ad7192_set_3db_filter_freq(st, val, val2 / 1000);
+ break;
default:
ret = -EINVAL;
}
@@ -555,6 +666,8 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_SAMP_FREQ:
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -655,6 +768,8 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
for (i = 0; i < indio_dev->num_channels; i++) {
*chan = channels[i];
+ chan->info_mask_shared_by_all |=
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY);
if (chan->type != IIO_TEMP)
chan->info_mask_shared_by_type_available |=
BIT(IIO_CHAN_INFO_SCALE);
@@ -666,16 +781,10 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
static int ad7192_probe(struct spi_device *spi)
{
- const struct ad7192_platform_data *pdata = dev_get_platdata(&spi->dev);
struct ad7192_state *st;
struct iio_dev *indio_dev;
int ret, voltage_uv = 0;
- if (!pdata) {
- dev_err(&spi->dev, "no platform data?\n");
- return -ENODEV;
- }
-
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ?\n");
return -ENODEV;
@@ -713,12 +822,10 @@ static int ad7192_probe(struct spi_device *spi)
voltage_uv = regulator_get_voltage(st->avdd);
- if (pdata->vref_mv)
- st->int_vref_mv = pdata->vref_mv;
- else if (voltage_uv)
+ if (voltage_uv)
st->int_vref_mv = voltage_uv / 1000;
else
- dev_warn(&spi->dev, "reference voltage undefined\n");
+ dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
spi_set_drvdata(spi, indio_dev);
st->devid = spi_get_device_id(spi)->driver_data;
@@ -809,11 +916,23 @@ static const struct spi_device_id ad7192_id[] = {
{"ad7195", ID_AD7195},
{}
};
+
MODULE_DEVICE_TABLE(spi, ad7192_id);
+static const struct of_device_id ad7192_of_match[] = {
+ { .compatible = "adi,ad7190" },
+ { .compatible = "adi,ad7192" },
+ { .compatible = "adi,ad7193" },
+ { .compatible = "adi,ad7195" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ad7192_of_match);
+
static struct spi_driver ad7192_driver = {
.driver = {
.name = "ad7192",
+ .of_match_table = ad7192_of_match,
},
.probe = ad7192_probe,
.remove = ad7192_remove,
diff --git a/drivers/staging/iio/adc/ad7192.h b/drivers/staging/iio/adc/ad7192.h
deleted file mode 100644
index f3669e1df084..000000000000
--- a/drivers/staging/iio/adc/ad7192.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * AD7190 AD7192 AD7195 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- */
-#ifndef IIO_ADC_AD7192_H_
-#define IIO_ADC_AD7192_H_
-
-/*
- * TODO: struct ad7192_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7192_platform_data - platform/board specific information
- * @vref_mv: the external reference voltage in millivolt
- * @clock_source_sel: [0..3]
- * 0 External 4.92 MHz clock connected from MCLK1 to MCLK2
- * 1 External Clock applied to MCLK2
- * 2 Internal 4.92 MHz Clock not available at the MCLK2 pin
- * 3 Internal 4.92 MHz Clock available at the MCLK2 pin
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @refin2_en: REFIN1/REFIN2 Reference Select (AD7190/2 only)
- * @rej60_en: 50/60Hz notch filter enable
- * @sinc3_en: SINC3 filter enable (default SINC4)
- * @chop_en: CHOP mode enable
- * @buf_en: buffered input mode enable
- * @unipolar_en: unipolar mode enable
- * @burnout_curr_en: constant current generators on AIN(+|-) enable
- */
-
-struct ad7192_platform_data {
- u16 vref_mv;
-};
-
-#endif /* IIO_ADC_AD7192_H_ */
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 0c1bd108c386..4b25a3a314ed 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -671,7 +671,7 @@ static int ad2s1210_probe(struct spi_device *spi)
indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels);
indio_dev->name = spi_get_device_id(spi)->name;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
@@ -683,15 +683,6 @@ static int ad2s1210_probe(struct spi_device *spi)
return 0;
}
-static int ad2s1210_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-
static const struct of_device_id ad2s1210_of_match[] = {
{ .compatible = "adi,ad2s1210", },
{ }
@@ -710,7 +701,6 @@ static struct spi_driver ad2s1210_driver = {
.of_match_table = of_match_ptr(ad2s1210_of_match),
},
.probe = ad2s1210_probe,
- .remove = ad2s1210_remove,
.id_table = ad2s1210_id,
};
module_spi_driver(ad2s1210_driver);
diff --git a/drivers/staging/isdn/hysdn/Kconfig b/drivers/staging/isdn/hysdn/Kconfig
index 1971ef850c9a..4c8a9283b9dd 100644
--- a/drivers/staging/isdn/hysdn/Kconfig
+++ b/drivers/staging/isdn/hysdn/Kconfig
@@ -5,7 +5,7 @@ config HYSDN
help
Say Y here if you have one of Hypercope's active PCI ISDN cards
Champ, Ergo and Metro. You will then get a module called hysdn.
- Please read the file <file:Documentation/isdn/README.hysdn> for more
+ Please read the file <file:Documentation/isdn/hysdn.rst> for more
information.
config HYSDN_CAPI
diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c
index bea37ae30ebb..dcb9ef7a2651 100644
--- a/drivers/staging/isdn/hysdn/hysdn_net.c
+++ b/drivers/staging/isdn/hysdn/hysdn_net.c
@@ -286,7 +286,7 @@ hysdn_net_create(hysdn_card *card)
if (card->debug_flags & LOG_NET_INIT)
hysdn_addlog(card, "network device created");
- return (0); /* and return success */
+ return 0; /* and return success */
} /* hysdn_net_create */
/***************************************************************************/
diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c
index 73079213ec94..48afd9f5316e 100644
--- a/drivers/staging/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/staging/isdn/hysdn/hysdn_procconf.c
@@ -382,7 +382,7 @@ hysdn_procconf_init(void)
}
printk(KERN_NOTICE "HYSDN: procfs initialised\n");
- return (0);
+ return 0;
} /* hysdn_procconf_init */
/*************************************************************************************/
diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
index c124a836db27..738122afc2ae 100644
--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c
+++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c
@@ -53,15 +53,15 @@ struct core_table_entry {
static
void parse_core_table_entry_v0(struct core_table_entry *cte, const u64 read_val)
{
- cte->type = ((read_val & 0xFFF0000000000000) >> 52);
- cte->offset = ((read_val & 0x00000000FFFF0000) >> 16) * 4096;
- cte->length = ((read_val & 0x0000FFFF00000000) >> 32) * 8;
- cte->s2c_dma_present = ((read_val & 0x0008000000000000) >> 51);
- cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000) >> 48);
- cte->c2s_dma_present = ((read_val & 0x0000000000008000) >> 15);
- cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000) >> 12);
- cte->irq_count = ((read_val & 0x0000000000000C00) >> 10);
- cte->irq_base_num = ((read_val & 0x00000000000003F8) >> 3);
+ cte->type = ((read_val & 0xFFF0000000000000UL) >> 52);
+ cte->offset = ((read_val & 0x00000000FFFF0000UL) >> 16) * 4096;
+ cte->length = ((read_val & 0x0000FFFF00000000UL) >> 32) * 8;
+ cte->s2c_dma_present = ((read_val & 0x0008000000000000UL) >> 51);
+ cte->s2c_dma_channel_num = ((read_val & 0x0007000000000000UL) >> 48);
+ cte->c2s_dma_present = ((read_val & 0x0000000000008000UL) >> 15);
+ cte->c2s_dma_channel_num = ((read_val & 0x0000000000007000UL) >> 12);
+ cte->irq_count = ((read_val & 0x0000000000000C00UL) >> 10);
+ cte->irq_base_num = ((read_val & 0x00000000000003F8UL) >> 3);
}
static
diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
index cb05cca687e1..0a23727d0dc3 100644
--- a/drivers/staging/kpc2000/kpc2000/core.c
+++ b/drivers/staging/kpc2000/kpc2000/core.c
@@ -205,7 +205,7 @@ static void wait_and_read_ssid(struct kp2000_device *pcard)
u64 read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
unsigned long timeout;
- if (read_val & 0x8000000000000000) {
+ if (read_val & 0x8000000000000000UL) {
pcard->ssid = read_val;
return;
}
@@ -213,7 +213,7 @@ static void wait_and_read_ssid(struct kp2000_device *pcard)
timeout = jiffies + (HZ * 2);
do {
read_val = readq(pcard->sysinfo_regs_base + REG_FPGA_SSID);
- if (read_val & 0x8000000000000000) {
+ if (read_val & 0x8000000000000000UL) {
pcard->ssid = read_val;
return;
}
@@ -241,16 +241,16 @@ static int read_system_regs(struct kp2000_device *pcard)
}
read_val = readq(pcard->sysinfo_regs_base + REG_CARD_ID_AND_BUILD);
- pcard->card_id = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->build_version = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->card_id = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->build_version = (read_val & 0x00000000FFFFFFFFUL) >> 0;
read_val = readq(pcard->sysinfo_regs_base + REG_DATE_AND_TIME_STAMPS);
- pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->build_timestamp = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->build_datestamp = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->build_timestamp = (read_val & 0x00000000FFFFFFFFUL) >> 0;
read_val = readq(pcard->sysinfo_regs_base + REG_CORE_TABLE_OFFSET);
- pcard->core_table_length = (read_val & 0xFFFFFFFF00000000) >> 32;
- pcard->core_table_offset = (read_val & 0x00000000FFFFFFFF) >> 0;
+ pcard->core_table_length = (read_val & 0xFFFFFFFF00000000UL) >> 32;
+ pcard->core_table_offset = (read_val & 0x00000000FFFFFFFFUL) >> 0;
wait_and_read_ssid(pcard);
@@ -401,7 +401,7 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
goto err_release_dma;
// Disable all "user" interrupts because they're not used yet.
- writeq(0xFFFFFFFFFFFFFFFF,
+ writeq(0xFFFFFFFFFFFFFFFFUL,
pcard->sysinfo_regs_base + REG_INTERRUPT_MASK);
// let the card master PCIe
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index b108da4ac633..bc02534d8dc3 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -123,9 +123,9 @@ struct i2c_device {
// FIXME!
#undef inb_p
-#define inb_p(a) readq((void *)a)
+#define inb_p(a) readq((void __iomem *)a)
#undef outb_p
-#define outb_p(d, a) writeq(d, (void *)a)
+#define outb_p(d, a) writeq(d, (void __iomem *)a)
/* Make sure the SMBus host is ready to start transmitting.
* Return 0 if it is, -EBUSY if it is not.
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 35ac1d7070b3..3be33c450cab 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -412,8 +412,7 @@ kp_spi_cleanup(struct spi_device *spidev)
{
struct kp_spi_controller_state *cs = spidev->controller_state;
- if (cs)
- kfree(cs);
+ kfree(cs);
}
/******************
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 48ca88bc6b0b..cb52bd9a6d2f 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -146,15 +146,15 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
card_addr += desc->DescByteCount;
dma_addr = sg_dma_address(sg) + (p * 0x80000);
- desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0;
- desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32;
+ desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFFUL) >> 0;
+ desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000UL) >> 32;
user_ctl = acd->priv->user_ctl;
if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
user_ctl = acd->priv->user_ctl_last;
}
- desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0;
- desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32;
+ desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFFUL) >> 0;
+ desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000UL) >> 32;
if (i == acd->mapped_entry_count-1 && p == pcnt-1)
desc->acd = acd;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 534d85d6c5e3..642adc4c24d2 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -22,10 +22,6 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/allegro-dvt/Kconfig"
-source "drivers/staging/media/bcm2048/Kconfig"
-
-source "drivers/staging/media/davinci_vpfe/Kconfig"
-
source "drivers/staging/media/hantro/Kconfig"
source "drivers/staging/media/imx/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index c486298194da..2f1711a8aeed 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
-obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
-obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
index f050c7347fd5..6f0cd0784786 100644
--- a/drivers/staging/media/allegro-dvt/allegro-core.c
+++ b/drivers/staging/media/allegro-dvt/allegro-core.c
@@ -2947,10 +2947,8 @@ static int allegro_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(&pdev->dev, irq,
allegro_hardirq,
allegro_irq_thread,
diff --git a/drivers/staging/media/bcm2048/Kconfig b/drivers/staging/media/bcm2048/Kconfig
deleted file mode 100644
index ab2d50cac140..000000000000
--- a/drivers/staging/media/bcm2048/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Multimedia Video device configuration
-#
-
-config I2C_BCM2048
- tristate "Broadcom BCM2048 FM Radio Receiver support"
- depends on I2C && VIDEO_V4L2 && RADIO_ADAPTERS
- help
- Say Y here if you want support to BCM2048 FM Radio Receiver.
- This device driver supports only i2c bus.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-bcm2048.
diff --git a/drivers/staging/media/bcm2048/Makefile b/drivers/staging/media/bcm2048/Makefile
deleted file mode 100644
index f42056848dc6..000000000000
--- a/drivers/staging/media/bcm2048/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_I2C_BCM2048) += radio-bcm2048.o
diff --git a/drivers/staging/media/bcm2048/TODO b/drivers/staging/media/bcm2048/TODO
deleted file mode 100644
index 6bee2a2dad68..000000000000
--- a/drivers/staging/media/bcm2048/TODO
+++ /dev/null
@@ -1,24 +0,0 @@
-TODO:
-
-From the initial code review:
-
-The main thing you need to do is to implement all the controls using the
-control framework (see Documentation/media/kapi/v4l2-controls.rst).
-Most drivers are by now converted to the control framework, so you will
-find many examples of how to do this in drivers/media/radio.
-
-The sysfs stuff should be replaced by controls as well. A lot of the RDS
-support is now available as controls (although there may well be some
-missing features, but that is easy enough to add). Since the RDS data is
-actually read() from the device I am not sure whether the RDS
-properties/controls should be there at all.
-
-Correct Coding Style, as this driver also violates several Style
-rules, and do evil tricks, like returning from a function inside a
-macro.
-
-Finally this driver should probably be split up into two parts: one
-v4l2_subdev-based core driver and one platform driver. See e.g.
-radio-si4713/si4713-i2c.c as a good example. But I would wait with that
-until the rest of the driver is cleaned up. Then I have a better idea of
-whether this is necessary or not.
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
deleted file mode 100644
index 2c60a1fb6350..000000000000
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ /dev/null
@@ -1,2689 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/staging/media/radio-bcm2048.c
- *
- * Driver for I2C Broadcom BCM2048 FM Radio Receiver:
- *
- * Copyright (C) Nokia Corporation
- * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- *
- * Copyright (C) Nils Faerber <nils.faerber@kernelconcepts.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-/*
- * History:
- * Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- * Version 0.0.1
- * - Initial implementation
- * 2010-02-21 Nils Faerber <nils.faerber@kernelconcepts.de>
- * Version 0.0.2
- * - Add support for interrupt driven rds data reading
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/version.h>
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include "radio-bcm2048.h"
-
-/* driver definitions */
-#define BCM2048_DRIVER_AUTHOR "Eero Nurkkala <ext-eero.nurkkala@nokia.com>"
-#define BCM2048_DRIVER_NAME BCM2048_NAME
-#define BCM2048_DRIVER_CARD "Broadcom bcm2048 FM Radio Receiver"
-#define BCM2048_DRIVER_DESC "I2C driver for BCM2048 FM Radio Receiver"
-
-/* I2C Control Registers */
-#define BCM2048_I2C_FM_RDS_SYSTEM 0x00
-#define BCM2048_I2C_FM_CTRL 0x01
-#define BCM2048_I2C_RDS_CTRL0 0x02
-#define BCM2048_I2C_RDS_CTRL1 0x03
-#define BCM2048_I2C_FM_AUDIO_PAUSE 0x04
-#define BCM2048_I2C_FM_AUDIO_CTRL0 0x05
-#define BCM2048_I2C_FM_AUDIO_CTRL1 0x06
-#define BCM2048_I2C_FM_SEARCH_CTRL0 0x07
-#define BCM2048_I2C_FM_SEARCH_CTRL1 0x08
-#define BCM2048_I2C_FM_SEARCH_TUNE_MODE 0x09
-#define BCM2048_I2C_FM_FREQ0 0x0a
-#define BCM2048_I2C_FM_FREQ1 0x0b
-#define BCM2048_I2C_FM_AF_FREQ0 0x0c
-#define BCM2048_I2C_FM_AF_FREQ1 0x0d
-#define BCM2048_I2C_FM_CARRIER 0x0e
-#define BCM2048_I2C_FM_RSSI 0x0f
-#define BCM2048_I2C_FM_RDS_MASK0 0x10
-#define BCM2048_I2C_FM_RDS_MASK1 0x11
-#define BCM2048_I2C_FM_RDS_FLAG0 0x12
-#define BCM2048_I2C_FM_RDS_FLAG1 0x13
-#define BCM2048_I2C_RDS_WLINE 0x14
-#define BCM2048_I2C_RDS_BLKB_MATCH0 0x16
-#define BCM2048_I2C_RDS_BLKB_MATCH1 0x17
-#define BCM2048_I2C_RDS_BLKB_MASK0 0x18
-#define BCM2048_I2C_RDS_BLKB_MASK1 0x19
-#define BCM2048_I2C_RDS_PI_MATCH0 0x1a
-#define BCM2048_I2C_RDS_PI_MATCH1 0x1b
-#define BCM2048_I2C_RDS_PI_MASK0 0x1c
-#define BCM2048_I2C_RDS_PI_MASK1 0x1d
-#define BCM2048_I2C_SPARE1 0x20
-#define BCM2048_I2C_SPARE2 0x21
-#define BCM2048_I2C_FM_RDS_REV 0x28
-#define BCM2048_I2C_SLAVE_CONFIGURATION 0x29
-#define BCM2048_I2C_RDS_DATA 0x80
-#define BCM2048_I2C_FM_BEST_TUNE_MODE 0x90
-
-/* BCM2048_I2C_FM_RDS_SYSTEM */
-#define BCM2048_FM_ON 0x01
-#define BCM2048_RDS_ON 0x02
-
-/* BCM2048_I2C_FM_CTRL */
-#define BCM2048_BAND_SELECT 0x01
-#define BCM2048_STEREO_MONO_AUTO_SELECT 0x02
-#define BCM2048_STEREO_MONO_MANUAL_SELECT 0x04
-#define BCM2048_STEREO_MONO_BLEND_SWITCH 0x08
-#define BCM2048_HI_LO_INJECTION 0x10
-
-/* BCM2048_I2C_RDS_CTRL0 */
-#define BCM2048_RBDS_RDS_SELECT 0x01
-#define BCM2048_FLUSH_FIFO 0x02
-
-/* BCM2048_I2C_FM_AUDIO_PAUSE */
-#define BCM2048_AUDIO_PAUSE_RSSI_TRESH 0x0f
-#define BCM2048_AUDIO_PAUSE_DURATION 0xf0
-
-/* BCM2048_I2C_FM_AUDIO_CTRL0 */
-#define BCM2048_RF_MUTE 0x01
-#define BCM2048_MANUAL_MUTE 0x02
-#define BCM2048_DAC_OUTPUT_LEFT 0x04
-#define BCM2048_DAC_OUTPUT_RIGHT 0x08
-#define BCM2048_AUDIO_ROUTE_DAC 0x10
-#define BCM2048_AUDIO_ROUTE_I2S 0x20
-#define BCM2048_DE_EMPHASIS_SELECT 0x40
-#define BCM2048_AUDIO_BANDWIDTH_SELECT 0x80
-
-/* BCM2048_I2C_FM_SEARCH_CTRL0 */
-#define BCM2048_SEARCH_RSSI_THRESHOLD 0x7f
-#define BCM2048_SEARCH_DIRECTION 0x80
-
-/* BCM2048_I2C_FM_SEARCH_TUNE_MODE */
-#define BCM2048_FM_AUTO_SEARCH 0x03
-
-/* BCM2048_I2C_FM_RSSI */
-#define BCM2048_RSSI_VALUE 0xff
-
-/* BCM2048_I2C_FM_RDS_MASK0 */
-/* BCM2048_I2C_FM_RDS_MASK1 */
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
-#define BCM2048_FM_FLAG_RSSI_LOW 0x04
-#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
-#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
-#define BCM2048_FLAG_STEREO_DETECTED 0x20
-#define BCM2048_FLAG_STEREO_ACTIVE 0x40
-
-/* BCM2048_I2C_RDS_DATA */
-#define BCM2048_SLAVE_ADDRESS 0x3f
-#define BCM2048_SLAVE_ENABLE 0x80
-
-/* BCM2048_I2C_FM_BEST_TUNE_MODE */
-#define BCM2048_BEST_TUNE_MODE 0x80
-
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED 0x01
-#define BCM2048_FM_FLAG_SEARCH_TUNE_FAIL 0x02
-#define BCM2048_FM_FLAG_RSSI_LOW 0x04
-#define BCM2048_FM_FLAG_CARRIER_ERROR_HIGH 0x08
-#define BCM2048_FM_FLAG_AUDIO_PAUSE_INDICATION 0x10
-#define BCM2048_FLAG_STEREO_DETECTED 0x20
-#define BCM2048_FLAG_STEREO_ACTIVE 0x40
-
-#define BCM2048_RDS_FLAG_FIFO_WLINE 0x02
-#define BCM2048_RDS_FLAG_B_BLOCK_MATCH 0x08
-#define BCM2048_RDS_FLAG_SYNC_LOST 0x10
-#define BCM2048_RDS_FLAG_PI_MATCH 0x20
-
-#define BCM2048_RDS_MARK_END_BYTE0 0x7C
-#define BCM2048_RDS_MARK_END_BYTEN 0xFF
-
-#define BCM2048_FM_FLAGS_ALL (FM_FLAG_SEARCH_TUNE_FINISHED | \
- FM_FLAG_SEARCH_TUNE_FAIL | \
- FM_FLAG_RSSI_LOW | \
- FM_FLAG_CARRIER_ERROR_HIGH | \
- FM_FLAG_AUDIO_PAUSE_INDICATION | \
- FLAG_STEREO_DETECTED | FLAG_STEREO_ACTIVE)
-
-#define BCM2048_RDS_FLAGS_ALL (RDS_FLAG_FIFO_WLINE | \
- RDS_FLAG_B_BLOCK_MATCH | \
- RDS_FLAG_SYNC_LOST | RDS_FLAG_PI_MATCH)
-
-#define BCM2048_DEFAULT_TIMEOUT 1500
-#define BCM2048_AUTO_SEARCH_TIMEOUT 3000
-
-#define BCM2048_FREQDEV_UNIT 10000
-#define BCM2048_FREQV4L2_MULTI 625
-#define dev_to_v4l2(f) (((f) * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
-#define v4l2_to_dev(f) (((f) * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
-
-#define msb(x) ((u8)((u16)(x) >> 8))
-#define lsb(x) ((u8)((u16)(x) & 0x00FF))
-#define compose_u16(msb, lsb) (((u16)(msb) << 8) | (lsb))
-
-#define BCM2048_DEFAULT_POWERING_DELAY 20
-#define BCM2048_DEFAULT_REGION 0x02
-#define BCM2048_DEFAULT_MUTE 0x01
-#define BCM2048_DEFAULT_RSSI_THRESHOLD 0x64
-#define BCM2048_DEFAULT_RDS_WLINE 0x7E
-
-#define BCM2048_FM_SEARCH_INACTIVE 0x00
-#define BCM2048_FM_PRE_SET_MODE 0x01
-#define BCM2048_FM_AUTO_SEARCH_MODE 0x02
-#define BCM2048_FM_AF_JUMP_MODE 0x03
-
-#define BCM2048_FREQUENCY_BASE 64000
-
-#define BCM2048_POWER_ON 0x01
-#define BCM2048_POWER_OFF 0x00
-
-#define BCM2048_ITEM_ENABLED 0x01
-#define BCM2048_SEARCH_DIRECTION_UP 0x01
-
-#define BCM2048_DE_EMPHASIS_75us 75
-#define BCM2048_DE_EMPHASIS_50us 50
-
-#define BCM2048_SCAN_FAIL 0x00
-#define BCM2048_SCAN_OK 0x01
-
-#define BCM2048_FREQ_ERROR_FLOOR -20
-#define BCM2048_FREQ_ERROR_ROOF 20
-
-/* -60 dB is reported as full signal strength */
-#define BCM2048_RSSI_LEVEL_BASE -60
-#define BCM2048_RSSI_LEVEL_ROOF -100
-#define BCM2048_RSSI_LEVEL_ROOF_NEG 100
-#define BCM2048_SIGNAL_MULTIPLIER (0xFFFF / \
- (BCM2048_RSSI_LEVEL_ROOF_NEG + \
- BCM2048_RSSI_LEVEL_BASE))
-
-#define BCM2048_RDS_FIFO_DUPLE_SIZE 0x03
-#define BCM2048_RDS_CRC_MASK 0x0F
-#define BCM2048_RDS_CRC_NONE 0x00
-#define BCM2048_RDS_CRC_MAX_2BITS 0x04
-#define BCM2048_RDS_CRC_LEAST_2BITS 0x08
-#define BCM2048_RDS_CRC_UNRECOVARABLE 0x0C
-
-#define BCM2048_RDS_BLOCK_MASK 0xF0
-#define BCM2048_RDS_BLOCK_A 0x00
-#define BCM2048_RDS_BLOCK_B 0x10
-#define BCM2048_RDS_BLOCK_C 0x20
-#define BCM2048_RDS_BLOCK_D 0x30
-#define BCM2048_RDS_BLOCK_C_SCORED 0x40
-#define BCM2048_RDS_BLOCK_E 0x60
-
-#define BCM2048_RDS_RT 0x20
-#define BCM2048_RDS_PS 0x00
-
-#define BCM2048_RDS_GROUP_AB_MASK 0x08
-#define BCM2048_RDS_GROUP_A 0x00
-#define BCM2048_RDS_GROUP_B 0x08
-
-#define BCM2048_RDS_RT_AB_MASK 0x10
-#define BCM2048_RDS_RT_A 0x00
-#define BCM2048_RDS_RT_B 0x10
-#define BCM2048_RDS_RT_INDEX 0x0F
-
-#define BCM2048_RDS_PS_INDEX 0x03
-
-struct rds_info {
- u16 rds_pi;
-#define BCM2048_MAX_RDS_RT (64 + 1)
- u8 rds_rt[BCM2048_MAX_RDS_RT];
- u8 rds_rt_group_b;
- u8 rds_rt_ab;
-#define BCM2048_MAX_RDS_PS (8 + 1)
- u8 rds_ps[BCM2048_MAX_RDS_PS];
- u8 rds_ps_group;
- u8 rds_ps_group_cnt;
-#define BCM2048_MAX_RDS_RADIO_TEXT 255
- u8 radio_text[BCM2048_MAX_RDS_RADIO_TEXT + 3];
- u8 text_len;
-};
-
-struct region_info {
- u32 bottom_frequency;
- u32 top_frequency;
- u8 deemphasis;
- u8 channel_spacing;
- u8 region;
-};
-
-struct bcm2048_device {
- struct i2c_client *client;
- struct video_device videodev;
- struct work_struct work;
- struct completion compl;
- struct mutex mutex;
- struct bcm2048_platform_data *platform_data;
- struct rds_info rds_info;
- struct region_info region_info;
- u16 frequency;
- u8 cache_fm_rds_system;
- u8 cache_fm_ctrl;
- u8 cache_fm_audio_ctrl0;
- u8 cache_fm_search_ctrl0;
- u8 power_state;
- u8 rds_state;
- u8 fifo_size;
- u8 scan_state;
- u8 mute_state;
-
- /* for rds data device read */
- wait_queue_head_t read_queue;
- unsigned int users;
- unsigned char rds_data_available;
- unsigned int rd_index;
-};
-
-static int radio_nr = -1; /* radio device minor (-1 ==> auto assign) */
-module_param(radio_nr, int, 0000);
-MODULE_PARM_DESC(radio_nr,
- "Minor number for radio device (-1 ==> auto assign)");
-
-static const struct region_info region_configs[] = {
- /* USA */
- {
- .channel_spacing = 20,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 75,
- .region = 0,
- },
- /* Australia */
- {
- .channel_spacing = 20,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 50,
- .region = 1,
- },
- /* Europe */
- {
- .channel_spacing = 10,
- .bottom_frequency = 87500,
- .top_frequency = 108000,
- .deemphasis = 50,
- .region = 2,
- },
- /* Japan */
- {
- .channel_spacing = 10,
- .bottom_frequency = 76000,
- .top_frequency = 90000,
- .deemphasis = 50,
- .region = 3,
- },
-};
-
-/*
- * I2C Interface read / write
- */
-static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg,
- unsigned int value)
-{
- struct i2c_client *client = bdev->client;
- u8 data[2];
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- data[0] = reg & 0xff;
- data[1] = value & 0xff;
-
- if (i2c_master_send(client, data, 2) == 2)
- return 0;
-
- dev_err(&bdev->client->dev, "BCM I2C error!\n");
- dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n");
- return -EIO;
-}
-
-static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg,
- u8 *value)
-{
- struct i2c_client *client = bdev->client;
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- value[0] = i2c_smbus_read_byte_data(client, reg & 0xff);
-
- return 0;
-}
-
-static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg,
- u8 *value, u8 duples)
-{
- struct i2c_client *client = bdev->client;
- struct i2c_adapter *adap = client->adapter;
- struct i2c_msg msg[2];
- u8 buf;
-
- if (!bdev->power_state) {
- dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n");
- return -EIO;
- }
-
- buf = reg & 0xff;
-
- msg[0].addr = client->addr;
- msg[0].flags = client->flags & I2C_M_TEN;
- msg[0].len = 1;
- msg[0].buf = &buf;
-
- msg[1].addr = client->addr;
- msg[1].flags = client->flags & I2C_M_TEN;
- msg[1].flags |= I2C_M_RD;
- msg[1].len = duples;
- msg[1].buf = value;
-
- return i2c_transfer(adap, msg, 2);
-}
-
-/*
- * BCM2048 - I2C register programming helpers
- */
-static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power)
-{
- int err = 0;
-
- mutex_lock(&bdev->mutex);
-
- if (power) {
- bdev->power_state = BCM2048_POWER_ON;
- bdev->cache_fm_rds_system |= BCM2048_FM_ON;
- } else {
- bdev->cache_fm_rds_system &= ~BCM2048_FM_ON;
- }
-
- /*
- * Warning! FM cannot be turned off because then
- * the I2C communications get ruined!
- * Comment off the "if (power)" when the chip works!
- */
- if (power)
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
- bdev->cache_fm_rds_system);
- msleep(BCM2048_DEFAULT_POWERING_DELAY);
-
- if (!power)
- bdev->power_state = BCM2048_POWER_OFF;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_power_state(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_FM_ON))
- return BCM2048_POWER_ON;
-
- return err;
-}
-
-static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
-{
- int err;
- u8 flags;
-
- bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON;
-
- if (rds_on) {
- bdev->cache_fm_rds_system |= BCM2048_RDS_ON;
- bdev->rds_state = BCM2048_RDS_ON;
- flags = BCM2048_RDS_FLAG_FIFO_WLINE;
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- } else {
- flags = 0;
- bdev->rds_state = 0;
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
- }
- if (err)
- return err;
-
- return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
- bdev->cache_fm_rds_system);
-}
-
-static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value);
-
- if (!err && (value & BCM2048_RDS_ON))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_set_rds_no_lock(bdev, rds_on);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_get_rds_no_lock(bdev);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi(struct bcm2048_device *bdev)
-{
- return bdev->rds_info.rds_pi;
-}
-
-static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev,
- u8 enabled)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT;
-
- if (enabled)
- bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev,
- u8 hi_lo)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION;
-
- if (hi_lo)
- bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_HI_LO_INJECTION))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency)
-{
- int err;
-
- if (frequency < bdev->region_info.bottom_frequency ||
- frequency > bdev->region_info.top_frequency)
- return -EDOM;
-
- frequency -= BCM2048_FREQUENCY_BASE;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1,
- msb(frequency));
-
- if (!err)
- bdev->frequency = frequency;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (err)
- return err;
-
- err = compose_u16(msb, lsb);
- err += BCM2048_FREQUENCY_BASE;
-
- return err;
-}
-
-static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev,
- u32 frequency)
-{
- int err;
-
- if (frequency < bdev->region_info.bottom_frequency ||
- frequency > bdev->region_info.top_frequency)
- return -EDOM;
-
- frequency -= BCM2048_FREQUENCY_BASE;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0,
- lsb(frequency));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1,
- msb(frequency));
- if (!err)
- bdev->frequency = frequency;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (err)
- return err;
-
- err = compose_u16(msb, lsb);
- err += BCM2048_FREQUENCY_BASE;
-
- return err;
-}
-
-static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d)
-{
- int err;
- u8 deemphasis;
-
- if (d == BCM2048_DE_EMPHASIS_75us)
- deemphasis = BCM2048_DE_EMPHASIS_SELECT;
- else
- deemphasis = 0;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT;
- bdev->cache_fm_audio_ctrl0 |= deemphasis;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- if (!err)
- bdev->region_info.deemphasis = d;
-
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- if (value & BCM2048_DE_EMPHASIS_SELECT)
- return BCM2048_DE_EMPHASIS_75us;
-
- return BCM2048_DE_EMPHASIS_50us;
- }
-
- return err;
-}
-
-static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
-{
- int err;
- u32 new_frequency = 0;
-
- if (region >= ARRAY_SIZE(region_configs))
- return -EINVAL;
-
- mutex_lock(&bdev->mutex);
- bdev->region_info = region_configs[region];
-
- if (region_configs[region].bottom_frequency < 87500)
- bdev->cache_fm_ctrl |= BCM2048_BAND_SELECT;
- else
- bdev->cache_fm_ctrl &= ~BCM2048_BAND_SELECT;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL,
- bdev->cache_fm_ctrl);
- if (err) {
- mutex_unlock(&bdev->mutex);
- goto done;
- }
- mutex_unlock(&bdev->mutex);
-
- if (bdev->frequency < region_configs[region].bottom_frequency ||
- bdev->frequency > region_configs[region].top_frequency)
- new_frequency = region_configs[region].bottom_frequency;
-
- if (new_frequency > 0) {
- err = bcm2048_set_fm_frequency(bdev, new_frequency);
-
- if (err)
- goto done;
- }
-
- err = bcm2048_set_fm_deemphasis(bdev,
- region_configs[region].deemphasis);
-
-done:
- return err;
-}
-
-static int bcm2048_get_region(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
- err = bdev->region_info.region;
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
-
- if (mute)
- bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE |
- BCM2048_MANUAL_MUTE);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- if (!err)
- bdev->mute_state = mute;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_mute(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- if (bdev->power_state) {
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- &value);
- if (!err)
- err = value & (BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE);
- } else {
- err = bdev->mute_state;
- }
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- route &= (BCM2048_AUDIO_ROUTE_DAC | BCM2048_AUDIO_ROUTE_I2S);
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC |
- BCM2048_AUDIO_ROUTE_I2S);
- bdev->cache_fm_audio_ctrl0 |= route;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_audio_route(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & (BCM2048_AUDIO_ROUTE_DAC |
- BCM2048_AUDIO_ROUTE_I2S);
-
- return err;
-}
-
-static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
- bdev->cache_fm_audio_ctrl0 |= channels;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0,
- bdev->cache_fm_audio_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_dac_output(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & (BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
-
- return err;
-}
-
-static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev,
- u8 threshold)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- threshold &= BCM2048_SEARCH_RSSI_THRESHOLD;
- bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD;
- bdev->cache_fm_search_ctrl0 |= threshold;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
- bdev->cache_fm_search_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & BCM2048_SEARCH_RSSI_THRESHOLD;
-
- return err;
-}
-
-static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev,
- u8 direction)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION;
-
- if (direction)
- bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION;
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0,
- bdev->cache_fm_search_ctrl0);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_SEARCH_DIRECTION))
- return BCM2048_SEARCH_DIRECTION_UP;
-
- return err;
-}
-
-static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
- u8 mode)
-{
- int err, timeout, restart_rds = 0;
- u8 value, flags;
-
- value = mode & BCM2048_FM_AUTO_SEARCH;
-
- flags = BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
- BCM2048_FM_FLAG_SEARCH_TUNE_FAIL;
-
- mutex_lock(&bdev->mutex);
-
- /*
- * If RDS is enabled, and frequency is changed, RDS quits working.
- * Thus, always restart RDS if it's enabled. Moreover, RDS must
- * not be enabled while changing the frequency because it can
- * provide a race to the mutex from the workqueue handler if RDS
- * IRQ occurs while waiting for frequency changed IRQ.
- */
- if (bcm2048_get_rds_no_lock(bdev)) {
- err = bcm2048_set_rds_no_lock(bdev, 0);
- if (err)
- goto unlock;
- restart_rds = 1;
- }
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags);
-
- if (err)
- goto unlock;
-
- bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value);
-
- if (mode != BCM2048_FM_AUTO_SEARCH_MODE)
- timeout = BCM2048_DEFAULT_TIMEOUT;
- else
- timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
-
- if (!wait_for_completion_timeout(&bdev->compl,
- msecs_to_jiffies(timeout)))
- dev_err(&bdev->client->dev, "IRQ timeout.\n");
-
- if (value)
- if (!bdev->scan_state)
- err = -EIO;
-
-unlock:
- if (restart_rds)
- err |= bcm2048_set_rds_no_lock(bdev, 1);
-
- mutex_unlock(&bdev->mutex);
-
- return err;
-}
-
-static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE,
- &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value & BCM2048_FM_AUTO_SEARCH;
-
- return err;
-}
-
-static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_BLKB_MASK0,
- lsb(mask));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_RDS_BLKB_MASK1,
- msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_BLKB_MASK0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_RDS_BLKB_MASK1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev,
- u16 match)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_BLKB_MATCH0,
- lsb(match));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_RDS_BLKB_MATCH1,
- msb(match));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_BLKB_MATCH0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_RDS_BLKB_MATCH1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_PI_MASK0, lsb(mask));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_RDS_PI_MASK1, msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_PI_MASK0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_RDS_PI_MASK1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_PI_MATCH0,
- lsb(match));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_RDS_PI_MATCH1,
- msb(match));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev)
-{
- int err;
- u8 lsb = 0, msb = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_PI_MATCH0, &lsb);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_RDS_PI_MATCH1, &msb);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(msb, lsb);
-
- return err;
-}
-
-static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, lsb(mask));
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, msb(mask));
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev)
-{
- int err;
- u8 value0 = 0, value1 = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(value1, value0);
-
- return err;
-}
-
-static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev)
-{
- int err;
- u8 value0 = 0, value1 = 0;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0);
- err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return compose_u16(value1, value0);
-
- return err;
-}
-
-static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev)
-{
- return bdev->region_info.bottom_frequency;
-}
-
-static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev)
-{
- return bdev->region_info.top_frequency;
-}
-
-static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode)
-{
- int err;
- u8 value = 0;
-
- mutex_lock(&bdev->mutex);
-
- /* Perform read as the manual indicates */
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- &value);
- value &= ~BCM2048_BEST_TUNE_MODE;
-
- if (mode)
- value |= BCM2048_BEST_TUNE_MODE;
- err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- value);
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE,
- &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err && (value & BCM2048_BEST_TUNE_MODE))
- return BCM2048_ITEM_ENABLED;
-
- return err;
-}
-
-static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev)
-{
- int err = 0;
- s8 value;
-
- mutex_lock(&bdev->mutex);
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value);
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value;
-
- return err;
-}
-
-static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev)
-{
- int err;
- s8 value;
-
- mutex_lock(&bdev->mutex);
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value);
- mutex_unlock(&bdev->mutex);
-
- if (!err)
- return value;
-
- return err;
-}
-
-static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline);
-
- if (!err)
- bdev->fifo_size = wline;
-
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_wline(struct bcm2048_device *bdev)
-{
- int err;
- u8 value;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- bdev->fifo_size = value;
- return value;
- }
-
- return err;
-}
-
-static int bcm2048_checkrev(struct bcm2048_device *bdev)
-{
- int err;
- u8 version;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version);
-
- mutex_unlock(&bdev->mutex);
-
- if (!err) {
- dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n",
- version);
- return version;
- }
-
- return err;
-}
-
-static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, j = 0, ce = 0, cr = 0;
- char data_buffer[BCM2048_MAX_RDS_RT + 1];
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
- if (bdev->rds_info.rds_rt[i]) {
- ce = i;
- /* Skip the carriage return */
- if (bdev->rds_info.rds_rt[i] != 0x0d) {
- data_buffer[j++] = bdev->rds_info.rds_rt[i];
- } else {
- cr = i;
- break;
- }
- }
- }
-
- if (j <= BCM2048_MAX_RDS_RT)
- data_buffer[j] = 0;
-
- for (i = 0; i < BCM2048_MAX_RDS_RT; i++) {
- if (!bdev->rds_info.rds_rt[i]) {
- if (cr && (i < cr)) {
- err = -EBUSY;
- goto unlock;
- }
- if (i < ce) {
- if (cr && (i >= cr))
- break;
- err = -EBUSY;
- goto unlock;
- }
- }
- }
-
- memcpy(data, data_buffer, sizeof(data_buffer));
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, j = 0;
- char data_buffer[BCM2048_MAX_RDS_PS + 1];
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- for (i = 0; i < BCM2048_MAX_RDS_PS; i++) {
- if (bdev->rds_info.rds_ps[i]) {
- data_buffer[j++] = bdev->rds_info.rds_ps[i];
- } else {
- if (i < (BCM2048_MAX_RDS_PS - 1)) {
- err = -EBUSY;
- goto unlock;
- }
- }
- }
-
- if (j <= BCM2048_MAX_RDS_PS)
- data_buffer[j] = 0;
-
- memcpy(data, data_buffer, sizeof(data_buffer));
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev)
-{
- int i, cnt = 0;
- u16 pi;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
- /* Block A match, only data without crc errors taken */
- if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) {
- pi = (bdev->rds_info.radio_text[i + 1] << 8) +
- bdev->rds_info.radio_text[i + 2];
-
- if (!bdev->rds_info.rds_pi) {
- bdev->rds_info.rds_pi = pi;
- return;
- }
- if (pi != bdev->rds_info.rds_pi) {
- cnt++;
- if (cnt > 3) {
- bdev->rds_info.rds_pi = pi;
- cnt = 0;
- }
- } else {
- cnt = 0;
- }
- }
- }
-}
-
-static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i)
-{
- return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK;
-}
-
-static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i,
- int index, int crc)
-{
- /* Good data will overwrite poor data */
- if (crc) {
- if (!bdev->rds_info.rds_rt[index])
- bdev->rds_info.rds_rt[index] =
- bdev->rds_info.radio_text[i + 1];
- if (!bdev->rds_info.rds_rt[index + 1])
- bdev->rds_info.rds_rt[index + 1] =
- bdev->rds_info.radio_text[i + 2];
- } else {
- bdev->rds_info.rds_rt[index] =
- bdev->rds_info.radio_text[i + 1];
- bdev->rds_info.rds_rt[index + 1] =
- bdev->rds_info.radio_text[i + 2];
- }
-}
-
-static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i)
-{
- int crc, rt_id, rt_group_b, rt_ab, index = 0;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return -EIO;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_B) {
- rt_id = bdev->rds_info.radio_text[i + 1] &
- BCM2048_RDS_BLOCK_MASK;
- rt_group_b = bdev->rds_info.radio_text[i + 1] &
- BCM2048_RDS_GROUP_AB_MASK;
- rt_ab = bdev->rds_info.radio_text[i + 2] &
- BCM2048_RDS_RT_AB_MASK;
-
- if (rt_group_b != bdev->rds_info.rds_rt_group_b) {
- memset(bdev->rds_info.rds_rt, 0,
- sizeof(bdev->rds_info.rds_rt));
- bdev->rds_info.rds_rt_group_b = rt_group_b;
- }
-
- if (rt_id == BCM2048_RDS_RT) {
- /* A to B or (vice versa), means: clear screen */
- if (rt_ab != bdev->rds_info.rds_rt_ab) {
- memset(bdev->rds_info.rds_rt, 0,
- sizeof(bdev->rds_info.rds_rt));
- bdev->rds_info.rds_rt_ab = rt_ab;
- }
-
- index = bdev->rds_info.radio_text[i + 2] &
- BCM2048_RDS_RT_INDEX;
-
- if (bdev->rds_info.rds_rt_group_b)
- index <<= 1;
- else
- index <<= 2;
-
- return index;
- }
- }
-
- return -EIO;
-}
-
-static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return 0;
-
- if ((index + 2) >= BCM2048_MAX_RDS_RT) {
- dev_err(&bdev->client->dev,
- "Incorrect index = %d\n", index);
- return 0;
- }
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_C) {
- if (bdev->rds_info.rds_rt_group_b)
- return 1;
- bcm2048_parse_rds_rt_block(bdev, i, index, crc);
- return 1;
- }
-
- return 0;
-}
-
-static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return;
-
- if ((index + 4) >= BCM2048_MAX_RDS_RT) {
- dev_err(&bdev->client->dev,
- "Incorrect index = %d\n", index);
- return;
- }
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_D)
- bcm2048_parse_rds_rt_block(bdev, i, index + 2, crc);
-}
-
-static void bcm2048_parse_rds_rt(struct bcm2048_device *bdev)
-{
- int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
- if (match_b) {
- match_b = 0;
- index = bcm2048_parse_rt_match_b(bdev, i);
- if (index >= 0 && index <= (BCM2048_MAX_RDS_RT - 5))
- match_c = 1;
- continue;
- } else if (match_c) {
- match_c = 0;
- if (bcm2048_parse_rt_match_c(bdev, i, index))
- match_d = 1;
- continue;
- } else if (match_d) {
- match_d = 0;
- bcm2048_parse_rt_match_d(bdev, i, index);
- continue;
- }
-
- /* Skip erroneous blocks due to messed up A block altogether */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_A) {
- crc = bcm2048_rds_block_crc(bdev, i);
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- continue;
- /* Synchronize to a good RDS PI */
- if (((bdev->rds_info.radio_text[i + 1] << 8) +
- bdev->rds_info.radio_text[i + 2]) ==
- bdev->rds_info.rds_pi)
- match_b = 1;
- }
- }
-}
-
-static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i,
- int index, int crc)
-{
- /* Good data will overwrite poor data */
- if (crc) {
- if (!bdev->rds_info.rds_ps[index])
- bdev->rds_info.rds_ps[index] =
- bdev->rds_info.radio_text[i + 1];
- if (!bdev->rds_info.rds_ps[index + 1])
- bdev->rds_info.rds_ps[index + 1] =
- bdev->rds_info.radio_text[i + 2];
- } else {
- bdev->rds_info.rds_ps[index] =
- bdev->rds_info.radio_text[i + 1];
- bdev->rds_info.rds_ps[index + 1] =
- bdev->rds_info.radio_text[i + 2];
- }
-}
-
-static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return 0;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_C)
- return 1;
-
- return 0;
-}
-
-static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i,
- int index)
-{
- int crc;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return;
-
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_D)
- bcm2048_parse_rds_ps_block(bdev, i, index, crc);
-}
-
-static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i)
-{
- int crc, index, ps_id, ps_group;
-
- crc = bcm2048_rds_block_crc(bdev, i);
-
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- return -EIO;
-
- /* Block B Radio PS match */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_B) {
- ps_id = bdev->rds_info.radio_text[i + 1] &
- BCM2048_RDS_BLOCK_MASK;
- ps_group = bdev->rds_info.radio_text[i + 1] &
- BCM2048_RDS_GROUP_AB_MASK;
-
- /*
- * Poor RSSI will lead to RDS data corruption
- * So using 3 (same) sequential values to justify major changes
- */
- if (ps_group != bdev->rds_info.rds_ps_group) {
- if (crc == BCM2048_RDS_CRC_NONE) {
- bdev->rds_info.rds_ps_group_cnt++;
- if (bdev->rds_info.rds_ps_group_cnt > 2) {
- bdev->rds_info.rds_ps_group = ps_group;
- bdev->rds_info.rds_ps_group_cnt = 0;
- dev_err(&bdev->client->dev,
- "RDS PS Group change!\n");
- } else {
- return -EIO;
- }
- } else {
- bdev->rds_info.rds_ps_group_cnt = 0;
- }
- }
-
- if (ps_id == BCM2048_RDS_PS) {
- index = bdev->rds_info.radio_text[i + 2] &
- BCM2048_RDS_PS_INDEX;
- index <<= 1;
- return index;
- }
- }
-
- return -EIO;
-}
-
-static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev)
-{
- int i, index = 0, crc, match_b = 0, match_c = 0, match_d = 0;
-
- for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) {
- if (match_b) {
- match_b = 0;
- index = bcm2048_parse_ps_match_b(bdev, i);
- if (index >= 0 && index < (BCM2048_MAX_RDS_PS - 1))
- match_c = 1;
- continue;
- } else if (match_c) {
- match_c = 0;
- if (bcm2048_parse_ps_match_c(bdev, i, index))
- match_d = 1;
- continue;
- } else if (match_d) {
- match_d = 0;
- bcm2048_parse_ps_match_d(bdev, i, index);
- continue;
- }
-
- /* Skip erroneous blocks due to messed up A block altogether */
- if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
- BCM2048_RDS_BLOCK_A) {
- crc = bcm2048_rds_block_crc(bdev, i);
- if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
- continue;
- /* Synchronize to a good RDS PI */
- if (((bdev->rds_info.radio_text[i + 1] << 8) +
- bdev->rds_info.radio_text[i + 2]) ==
- bdev->rds_info.rds_pi)
- match_b = 1;
- }
- }
-}
-
-static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev)
-{
- int err;
-
- mutex_lock(&bdev->mutex);
-
- err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA,
- bdev->rds_info.radio_text, bdev->fifo_size);
- if (err != 2) {
- dev_err(&bdev->client->dev, "RDS Read problem\n");
- mutex_unlock(&bdev->mutex);
- return;
- }
-
- bdev->rds_info.text_len = bdev->fifo_size;
-
- bcm2048_parse_rds_pi(bdev);
- bcm2048_parse_rds_rt(bdev);
- bcm2048_parse_rds_ps(bdev);
-
- mutex_unlock(&bdev->mutex);
-
- wake_up_interruptible(&bdev->read_queue);
-}
-
-static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data)
-{
- int err = 0, i, p = 0;
- char *data_buffer;
-
- mutex_lock(&bdev->mutex);
-
- if (!bdev->rds_info.text_len) {
- err = -EINVAL;
- goto unlock;
- }
-
- data_buffer = kcalloc(BCM2048_MAX_RDS_RADIO_TEXT, 5, GFP_KERNEL);
- if (!data_buffer) {
- err = -ENOMEM;
- goto unlock;
- }
-
- for (i = 0; i < bdev->rds_info.text_len; i++) {
- p += sprintf(data_buffer + p, "%x ",
- bdev->rds_info.radio_text[i]);
- }
-
- memcpy(data, data_buffer, p);
- kfree(data_buffer);
-
-unlock:
- mutex_unlock(&bdev->mutex);
- return err;
-}
-
-/*
- * BCM2048 default initialization sequence
- */
-static int bcm2048_init(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC);
- if (err < 0)
- goto exit;
-
- err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT |
- BCM2048_DAC_OUTPUT_RIGHT);
-
-exit:
- return err;
-}
-
-/*
- * BCM2048 default deinitialization sequence
- */
-static int bcm2048_deinit(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_audio_route(bdev, 0);
- if (err < 0)
- return err;
-
- err = bcm2048_set_dac_output(bdev, 0);
- if (err < 0)
- return err;
-
- return bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-}
-
-/*
- * BCM2048 probe sequence
- */
-static int bcm2048_probe(struct bcm2048_device *bdev)
-{
- int err;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_checkrev(bdev);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_fm_search_rssi_threshold(bdev,
- BCM2048_DEFAULT_RSSI_THRESHOLD);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_get_rds_wline(bdev);
- if (err < BCM2048_DEFAULT_RDS_WLINE)
- err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE);
- if (err < 0)
- goto unlock;
-
- err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-
- init_waitqueue_head(&bdev->read_queue);
- bdev->rds_data_available = 0;
- bdev->rd_index = 0;
- bdev->users = 0;
-
-unlock:
- return err;
-}
-
-/*
- * BCM2048 workqueue handler
- */
-static void bcm2048_work(struct work_struct *work)
-{
- struct bcm2048_device *bdev;
- u8 flag_lsb = 0, flag_msb = 0, flags;
-
- bdev = container_of(work, struct bcm2048_device, work);
- bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb);
- bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb);
-
- if (flag_lsb & (BCM2048_FM_FLAG_SEARCH_TUNE_FINISHED |
- BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)) {
- if (flag_lsb & BCM2048_FM_FLAG_SEARCH_TUNE_FAIL)
- bdev->scan_state = BCM2048_SCAN_FAIL;
- else
- bdev->scan_state = BCM2048_SCAN_OK;
-
- complete(&bdev->compl);
- }
-
- if (flag_msb & BCM2048_RDS_FLAG_FIFO_WLINE) {
- bcm2048_rds_fifo_receive(bdev);
- if (bdev->rds_state) {
- flags = BCM2048_RDS_FLAG_FIFO_WLINE;
- bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1,
- flags);
- }
- bdev->rds_data_available = 1;
- bdev->rd_index = 0; /* new data, new start */
- }
-}
-
-/*
- * BCM2048 interrupt handler
- */
-static irqreturn_t bcm2048_handler(int irq, void *dev)
-{
- struct bcm2048_device *bdev = dev;
-
- dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n");
- if (bdev->power_state)
- schedule_work(&bdev->work);
-
- return IRQ_HANDLED;
-}
-
-/*
- * BCM2048 sysfs interface definitions
- */
-#define property_write(prop, type, mask, check) \
-static ssize_t bcm2048_##prop##_write(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, \
- size_t count) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- type value; \
- int err; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- if (sscanf(buf, mask, &value) != 1) \
- return -EINVAL; \
- \
- if (check) \
- return -EDOM; \
- \
- err = bcm2048_set_##prop(bdev, value); \
- \
- return err < 0 ? err : count; \
-}
-
-#define property_read(prop, mask) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- int value; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- value = bcm2048_get_##prop(bdev); \
- \
- if (value >= 0) \
- value = sprintf(buf, mask "\n", value); \
- \
- return value; \
-}
-
-#define property_signed_read(prop, size, mask) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- size value; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- value = bcm2048_get_##prop(bdev); \
- \
- return sprintf(buf, mask "\n", value); \
-}
-
-#define DEFINE_SYSFS_PROPERTY(prop, prop_type, mask, check) \
-property_write(prop, prop_type, mask, check) \
-property_read(prop, mask) \
-
-#define property_str_read(prop, size) \
-static ssize_t bcm2048_##prop##_read(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct bcm2048_device *bdev = dev_get_drvdata(dev); \
- int count; \
- u8 *out; \
- \
- if (!bdev) \
- return -ENODEV; \
- \
- out = kzalloc((size) + 1, GFP_KERNEL); \
- if (!out) \
- return -ENOMEM; \
- \
- bcm2048_get_##prop(bdev, out); \
- count = sprintf(buf, "%s\n", out); \
- \
- kfree(out); \
- \
- return count; \
-}
-
-DEFINE_SYSFS_PROPERTY(power_state, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(mute, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(audio_route, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(dac_output, unsigned int, "%u", 0)
-
-DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned int, "%u", value > 3)
-
-DEFINE_SYSFS_PROPERTY(rds, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_wline, unsigned int, "%u", 0)
-property_read(rds_pi, "%x")
-property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
-property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
-
-property_read(fm_rds_flags, "%u")
-property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT * 5)
-
-property_read(region_bottom_frequency, "%u")
-property_read(region_top_frequency, "%u")
-property_signed_read(fm_carrier_error, int, "%d")
-property_signed_read(fm_rssi, int, "%d")
-DEFINE_SYSFS_PROPERTY(region, unsigned int, "%u", 0)
-
-static struct device_attribute attrs[] = {
- __ATTR(power_state, 0644, bcm2048_power_state_read,
- bcm2048_power_state_write),
- __ATTR(mute, 0644, bcm2048_mute_read,
- bcm2048_mute_write),
- __ATTR(audio_route, 0644, bcm2048_audio_route_read,
- bcm2048_audio_route_write),
- __ATTR(dac_output, 0644, bcm2048_dac_output_read,
- bcm2048_dac_output_write),
- __ATTR(fm_hi_lo_injection, 0644,
- bcm2048_fm_hi_lo_injection_read,
- bcm2048_fm_hi_lo_injection_write),
- __ATTR(fm_frequency, 0644, bcm2048_fm_frequency_read,
- bcm2048_fm_frequency_write),
- __ATTR(fm_af_frequency, 0644,
- bcm2048_fm_af_frequency_read,
- bcm2048_fm_af_frequency_write),
- __ATTR(fm_deemphasis, 0644, bcm2048_fm_deemphasis_read,
- bcm2048_fm_deemphasis_write),
- __ATTR(fm_rds_mask, 0644, bcm2048_fm_rds_mask_read,
- bcm2048_fm_rds_mask_write),
- __ATTR(fm_best_tune_mode, 0644,
- bcm2048_fm_best_tune_mode_read,
- bcm2048_fm_best_tune_mode_write),
- __ATTR(fm_search_rssi_threshold, 0644,
- bcm2048_fm_search_rssi_threshold_read,
- bcm2048_fm_search_rssi_threshold_write),
- __ATTR(fm_search_mode_direction, 0644,
- bcm2048_fm_search_mode_direction_read,
- bcm2048_fm_search_mode_direction_write),
- __ATTR(fm_search_tune_mode, 0644,
- bcm2048_fm_search_tune_mode_read,
- bcm2048_fm_search_tune_mode_write),
- __ATTR(rds, 0644, bcm2048_rds_read,
- bcm2048_rds_write),
- __ATTR(rds_b_block_mask, 0644,
- bcm2048_rds_b_block_mask_read,
- bcm2048_rds_b_block_mask_write),
- __ATTR(rds_b_block_match, 0644,
- bcm2048_rds_b_block_match_read,
- bcm2048_rds_b_block_match_write),
- __ATTR(rds_pi_mask, 0644, bcm2048_rds_pi_mask_read,
- bcm2048_rds_pi_mask_write),
- __ATTR(rds_pi_match, 0644, bcm2048_rds_pi_match_read,
- bcm2048_rds_pi_match_write),
- __ATTR(rds_wline, 0644, bcm2048_rds_wline_read,
- bcm2048_rds_wline_write),
- __ATTR(rds_pi, 0444, bcm2048_rds_pi_read, NULL),
- __ATTR(rds_rt, 0444, bcm2048_rds_rt_read, NULL),
- __ATTR(rds_ps, 0444, bcm2048_rds_ps_read, NULL),
- __ATTR(fm_rds_flags, 0444, bcm2048_fm_rds_flags_read, NULL),
- __ATTR(region_bottom_frequency, 0444,
- bcm2048_region_bottom_frequency_read, NULL),
- __ATTR(region_top_frequency, 0444,
- bcm2048_region_top_frequency_read, NULL),
- __ATTR(fm_carrier_error, 0444,
- bcm2048_fm_carrier_error_read, NULL),
- __ATTR(fm_rssi, 0444,
- bcm2048_fm_rssi_read, NULL),
- __ATTR(region, 0644, bcm2048_region_read,
- bcm2048_region_write),
- __ATTR(rds_data, 0444, bcm2048_rds_data_read, NULL),
-};
-
-static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
- int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- device_remove_file(&bdev->client->dev, &attrs[i]);
-
- return 0;
-}
-
-static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++) {
- if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) {
- dev_err(&bdev->client->dev,
- "could not register sysfs entry\n");
- err = -EBUSY;
- bcm2048_sysfs_unregister_properties(bdev, i);
- break;
- }
- }
-
- return err;
-}
-
-static int bcm2048_fops_open(struct file *file)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
-
- bdev->users++;
- bdev->rd_index = 0;
- bdev->rds_data_available = 0;
-
- return 0;
-}
-
-static int bcm2048_fops_release(struct file *file)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
-
- bdev->users--;
-
- return 0;
-}
-
-static __poll_t bcm2048_fops_poll(struct file *file,
- struct poll_table_struct *pts)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
- __poll_t retval = 0;
-
- poll_wait(file, &bdev->read_queue, pts);
-
- if (bdev->rds_data_available)
- retval = EPOLLIN | EPOLLRDNORM;
-
- return retval;
-}
-
-static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct bcm2048_device *bdev = video_drvdata(file);
- int i;
- int retval = 0;
-
- /* we return at least 3 bytes, one block */
- count = (count / 3) * 3; /* only multiples of 3 */
- if (count < 3)
- return -ENOBUFS;
-
- while (!bdev->rds_data_available) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EWOULDBLOCK;
- goto done;
- }
- /* interruptible_sleep_on(&bdev->read_queue); */
- if (wait_event_interruptible(bdev->read_queue,
- bdev->rds_data_available) < 0) {
- retval = -EINTR;
- goto done;
- }
- }
-
- mutex_lock(&bdev->mutex);
- /* copy data to userspace */
- i = bdev->fifo_size - bdev->rd_index;
- if (count > i)
- count = (i / 3) * 3;
-
- i = 0;
- while (i < count) {
- unsigned char tmpbuf[3];
-
- tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index + i + 2];
- tmpbuf[i + 1] =
- bdev->rds_info.radio_text[bdev->rd_index + i + 1];
- tmpbuf[i + 2] =
- (bdev->rds_info.radio_text[bdev->rd_index + i] &
- 0xf0) >> 4;
- if ((bdev->rds_info.radio_text[bdev->rd_index + i] &
- BCM2048_RDS_CRC_MASK) == BCM2048_RDS_CRC_UNRECOVARABLE)
- tmpbuf[i + 2] |= 0x80;
- if (copy_to_user(buf + i, tmpbuf, 3)) {
- retval = -EFAULT;
- break;
- }
- i += 3;
- }
-
- bdev->rd_index += i;
- if (bdev->rd_index >= bdev->fifo_size)
- bdev->rds_data_available = 0;
-
- mutex_unlock(&bdev->mutex);
- if (retval == 0)
- retval = i;
-
-done:
- return retval;
-}
-
-/*
- * bcm2048_fops - file operations interface
- */
-static const struct v4l2_file_operations bcm2048_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
- /* for RDS read support */
- .open = bcm2048_fops_open,
- .release = bcm2048_fops_release,
- .read = bcm2048_fops_read,
- .poll = bcm2048_fops_poll
-};
-
-/*
- * Video4Linux Interface
- */
-static struct v4l2_queryctrl bcm2048_v4l2_queryctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_BALANCE,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_BASS,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_TREBLE,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
- {
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_AUDIO_LOUDNESS,
- .flags = V4L2_CTRL_FLAG_DISABLED,
- },
-};
-
-static int bcm2048_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
-
- strscpy(capability->driver, BCM2048_DRIVER_NAME,
- sizeof(capability->driver));
- strscpy(capability->card, BCM2048_DRIVER_CARD,
- sizeof(capability->card));
- snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr);
- return 0;
-}
-
-static int bcm2048_vidioc_g_input(struct file *filp, void *priv,
- unsigned int *i)
-{
- *i = 0;
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_input(struct file *filp, void *priv,
- unsigned int i)
-{
- if (i)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bcm2048_v4l2_queryctrl); i++) {
- if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) {
- *qc = bcm2048_v4l2_queryctrl[i];
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int bcm2048_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
-
- if (!bdev)
- return -ENODEV;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- err = bcm2048_get_mute(bdev);
- if (err >= 0)
- ctrl->value = err;
- break;
- }
-
- return err;
-}
-
-static int bcm2048_vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
-
- if (!bdev)
- return -ENODEV;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value) {
- if (bdev->power_state) {
- err = bcm2048_set_mute(bdev, ctrl->value);
- err |= bcm2048_deinit(bdev);
- }
- } else {
- if (!bdev->power_state) {
- err = bcm2048_init(bdev);
- err |= bcm2048_set_mute(bdev, ctrl->value);
- }
- }
- break;
- }
-
- return err;
-}
-
-static int bcm2048_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- if (audio->index > 1)
- return -EINVAL;
-
- strscpy(audio->name, "Radio", sizeof(audio->name));
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- s8 f_error;
- s8 rssi;
-
- if (!bdev)
- return -ENODEV;
-
- if (tuner->index > 0)
- return -EINVAL;
-
- strscpy(tuner->name, "FM Receiver", sizeof(tuner->name));
- tuner->type = V4L2_TUNER_RADIO;
- tuner->rangelow =
- dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev));
- tuner->rangehigh =
- dev_to_v4l2(bcm2048_get_region_top_frequency(bdev));
- tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
- tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
- tuner->audmode = V4L2_TUNER_MODE_STEREO;
- tuner->afc = 0;
- if (bdev->power_state) {
- /*
- * Report frequencies with high carrier errors to have zero
- * signal level
- */
- f_error = bcm2048_get_fm_carrier_error(bdev);
- if (f_error < BCM2048_FREQ_ERROR_FLOOR ||
- f_error > BCM2048_FREQ_ERROR_ROOF) {
- tuner->signal = 0;
- } else {
- /*
- * RSSI level -60 dB is defined to report full
- * signal strength
- */
- rssi = bcm2048_get_fm_rssi(bdev);
- if (rssi >= BCM2048_RSSI_LEVEL_BASE) {
- tuner->signal = 0xFFFF;
- } else if (rssi > BCM2048_RSSI_LEVEL_ROOF) {
- tuner->signal = (rssi +
- BCM2048_RSSI_LEVEL_ROOF_NEG)
- * BCM2048_SIGNAL_MULTIPLIER;
- } else {
- tuner->signal = 0;
- }
- }
- } else {
- tuner->signal = 0;
- }
-
- return 0;
-}
-
-static int bcm2048_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
-
- if (!bdev)
- return -ENODEV;
-
- if (tuner->index > 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int bcm2048_vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err = 0;
- int f;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- freq->type = V4L2_TUNER_RADIO;
- f = bcm2048_get_fm_frequency(bdev);
-
- if (f < 0)
- err = f;
- else
- freq->frequency = dev_to_v4l2(f);
-
- return err;
-}
-
-static int bcm2048_vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err;
-
- if (freq->type != V4L2_TUNER_RADIO)
- return -EINVAL;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency));
- err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE);
-
- return err;
-}
-
-static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file));
- int err;
-
- if (!bdev->power_state)
- return -ENODEV;
-
- if ((seek->tuner != 0) || (seek->type != V4L2_TUNER_RADIO))
- return -EINVAL;
-
- err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward);
- err |= bcm2048_set_fm_search_tune_mode(bdev,
- BCM2048_FM_AUTO_SEARCH_MODE);
-
- return err;
-}
-
-static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
- .vidioc_querycap = bcm2048_vidioc_querycap,
- .vidioc_g_input = bcm2048_vidioc_g_input,
- .vidioc_s_input = bcm2048_vidioc_s_input,
- .vidioc_queryctrl = bcm2048_vidioc_queryctrl,
- .vidioc_g_ctrl = bcm2048_vidioc_g_ctrl,
- .vidioc_s_ctrl = bcm2048_vidioc_s_ctrl,
- .vidioc_g_audio = bcm2048_vidioc_g_audio,
- .vidioc_s_audio = bcm2048_vidioc_s_audio,
- .vidioc_g_tuner = bcm2048_vidioc_g_tuner,
- .vidioc_s_tuner = bcm2048_vidioc_s_tuner,
- .vidioc_g_frequency = bcm2048_vidioc_g_frequency,
- .vidioc_s_frequency = bcm2048_vidioc_s_frequency,
- .vidioc_s_hw_freq_seek = bcm2048_vidioc_s_hw_freq_seek,
-};
-
-/*
- * bcm2048_viddev_template - video device interface
- */
-static const struct video_device bcm2048_viddev_template = {
- .fops = &bcm2048_fops,
- .name = BCM2048_DRIVER_NAME,
- .release = video_device_release_empty,
- .ioctl_ops = &bcm2048_ioctl_ops,
- .device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
- V4L2_CAP_HW_FREQ_SEEK,
-};
-
-/*
- * I2C driver interface
- */
-static int bcm2048_i2c_driver_probe(struct i2c_client *client)
-{
- struct bcm2048_device *bdev;
- int err;
-
- bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
- if (!bdev) {
- err = -ENOMEM;
- goto exit;
- }
-
- bdev->client = client;
- i2c_set_clientdata(client, bdev);
- mutex_init(&bdev->mutex);
- init_completion(&bdev->compl);
- INIT_WORK(&bdev->work, bcm2048_work);
-
- if (client->irq) {
- err = request_irq(client->irq,
- bcm2048_handler, IRQF_TRIGGER_FALLING,
- client->name, bdev);
- if (err < 0) {
- dev_err(&client->dev, "Could not request IRQ\n");
- goto free_bdev;
- }
- dev_dbg(&client->dev, "IRQ requested.\n");
- } else {
- dev_dbg(&client->dev, "IRQ not configured. Using timeouts.\n");
- }
-
- bdev->videodev = bcm2048_viddev_template;
- video_set_drvdata(&bdev->videodev, bdev);
- if (video_register_device(&bdev->videodev, VFL_TYPE_RADIO, radio_nr)) {
- dev_dbg(&client->dev, "Could not register video device.\n");
- err = -EIO;
- goto free_irq;
- }
-
- err = bcm2048_sysfs_register_properties(bdev);
- if (err < 0) {
- dev_dbg(&client->dev, "Could not register sysfs interface.\n");
- goto free_registration;
- }
-
- err = bcm2048_probe(bdev);
- if (err < 0) {
- dev_dbg(&client->dev, "Failed to probe device information.\n");
- goto free_sysfs;
- }
-
- return 0;
-
-free_sysfs:
- bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
-free_registration:
- video_unregister_device(&bdev->videodev);
-free_irq:
- if (client->irq)
- free_irq(client->irq, bdev);
-free_bdev:
- i2c_set_clientdata(client, NULL);
- kfree(bdev);
-exit:
- return err;
-}
-
-static int bcm2048_i2c_driver_remove(struct i2c_client *client)
-{
- struct bcm2048_device *bdev = i2c_get_clientdata(client);
-
- if (!client->adapter)
- return -ENODEV;
-
- if (bdev) {
- bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs));
- video_unregister_device(&bdev->videodev);
-
- if (bdev->power_state)
- bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
-
- if (client->irq > 0)
- free_irq(client->irq, bdev);
-
- cancel_work_sync(&bdev->work);
-
- kfree(bdev);
- }
-
- return 0;
-}
-
-/*
- * bcm2048_i2c_driver - i2c driver interface
- */
-static const struct i2c_device_id bcm2048_id[] = {
- { "bcm2048", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, bcm2048_id);
-
-static struct i2c_driver bcm2048_i2c_driver = {
- .driver = {
- .name = BCM2048_DRIVER_NAME,
- },
- .probe_new = bcm2048_i2c_driver_probe,
- .remove = bcm2048_i2c_driver_remove,
- .id_table = bcm2048_id,
-};
-
-module_i2c_driver(bcm2048_i2c_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(BCM2048_DRIVER_AUTHOR);
-MODULE_DESCRIPTION(BCM2048_DRIVER_DESC);
-MODULE_VERSION("0.0.2");
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
deleted file mode 100644
index 22887a075257..000000000000
--- a/drivers/staging/media/bcm2048/radio-bcm2048.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * drivers/staging/media/radio-bcm2048.h
- *
- * Property and command definitions for bcm2048 radio receiver chip.
- *
- * Copyright (C) Nokia Corporation
- * Contact: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef BCM2048_H
-#define BCM2048_H
-
-#define BCM2048_NAME "bcm2048"
-#define BCM2048_I2C_ADDR 0x22
-
-#endif /* ifndef BCM2048_H */
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
deleted file mode 100644
index 94bf6746c03f..000000000000
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config VIDEO_DM365_VPFE
- tristate "DM365 VPFE Media Controller Capture Driver"
- depends on VIDEO_V4L2
- depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
- depends on VIDEO_V4L2_SUBDEV_API
- depends on VIDEO_DAVINCI_VPBE_DISPLAY
- select VIDEOBUF2_DMA_CONTIG
- help
- Support for DM365 VPFE based Media Controller Capture driver.
-
- To compile this driver as a module, choose M here: the
- module will be called vpfe-mc-capture.
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
deleted file mode 100644
index 0ae8c5014f74..000000000000
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o
-
-davinci-vfpe-objs := \
- dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
- dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
-
-# Allow building it with COMPILE_TEST on other archs
-ifndef CONFIG_ARCH_DAVINCI
-ccflags-y += -I $(srctree)/arch/arm/mach-davinci/include/
-endif
diff --git a/drivers/staging/media/davinci_vpfe/TODO b/drivers/staging/media/davinci_vpfe/TODO
deleted file mode 100644
index cc8bd9306f2a..000000000000
--- a/drivers/staging/media/davinci_vpfe/TODO
+++ /dev/null
@@ -1,38 +0,0 @@
-TODO (general):
-==================================
-
-- User space interface refinement
- - Controls should be used when possible rather than private ioctl
- - No enums should be used
- - Use of MC and V4L2 subdev APIs when applicable
- - Single interface header might suffice
- - Current interface forces to configure everything at once
-- Get rid of the dm365_ipipe_hw.[ch] layer
-- Active external sub-devices defined by link configuration; no strcmp
- needed
-- More generic platform data (i2c adapters)
-- The driver should have no knowledge of possible external subdevs; see
- struct vpfe_subdev_id
-- Some of the hardware control should be refactorede
-- Check proper serialisation (through mutexes and spinlocks)
-- Names that are visible in kernel global namespace should have a common
- prefix (or a few)
-- While replacing the older driver in media folder, provide a compatibility
- layer and compatibility tests that warrants (using the libv4l's LD_PRELOAD
- approach) there is no regression for the users using the older driver.
-- make it independent of arch-specific APIs (mach/mux.h).
-
-Building of uImage and Applications:
-==================================
-
-As of now since the interface will undergo few changes all the include
-files are present in staging itself, to build for dm365 follow below steps,
-
-- copy vpfe.h from drivers/staging/media/davinci_vpfe/ to
- include/media/davinci/ folder for building the uImage.
-- copy davinci_vpfe_user.h from drivers/staging/media/davinci_vpfe/ to
- include/uapi/linux/davinci_vpfe.h, and add a entry in Kbuild (required
- for building application).
-- copy dm365_ipipeif_user.h from drivers/staging/media/davinci_vpfe/ to
- include/uapi/linux/dm365_ipipeif.h and a entry in Kbuild (required
- for building application).
diff --git a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt b/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
deleted file mode 100644
index a1e91778aa9b..000000000000
--- a/drivers/staging/media/davinci_vpfe/davinci-vpfe-mc.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-Davinci Video processing Front End (VPFE) driver
-
-Copyright (C) 2012 Texas Instruments Inc
-
-Contacts: Manjunath Hadli <manjunath.hadli@ti.com>
- Prabhakar Lad <prabhakar.lad@ti.com>
-
-
-Introduction
-============
-
-This file documents the Texas Instruments Davinci Video processing Front End
-(VPFE) driver located under drivers/media/platform/davinci. The original driver
-exists for Davinci VPFE, which is now being changed to Media Controller
-Framework.
-
-Currently the driver has been successfully used on the following
-version of Davinci:
-
- DM365/DM368
-
-The driver implements V4L2, Media controller and v4l2_subdev interfaces. Sensor,
-lens and flash drivers using the v4l2_subdev interface in the kernel are
-supported.
-
-
-Split to subdevs
-================
-
-The Davinci VPFE is split into V4L2 subdevs, each of the blocks inside the VPFE
-having one subdev to represent it. Each of the subdevs provide a V4L2 subdev
-interface to userspace.
-
- DAVINCI ISIF
- DAVINCI IPIPEIF
- DAVINCI IPIPE
- DAVINCI CROP RESIZER
- DAVINCI RESIZER A
- DAVINCI RESIZER B
-
-Each possible link in the VPFE is modelled by a link in the Media controller
-interface. For an example program see [1].
-
-
-ISIF, IPIPE, and RESIZER block IOCTLs
-======================================
-
-The Davinci Video processing Front End (VPFE) driver supports standard V4L2
-IOCTLs and controls where possible and practical. Much of the functions provided
-by the VPFE, however, does not fall under the standard IOCTL's.
-
-In general, there is a private ioctl for configuring each of the blocks
-containing hardware-dependent functions.
-
-The following private IOCTLs are supported:
-
- VIDIOC_VPFE_ISIF_[S/G]_RAW_PARAMS
- VIDIOC_VPFE_IPIPE_[S/G]_CONFIG
- VIDIOC_VPFE_RSZ_[S/G]_CONFIG
-
-The parameter structures used by these ioctl's are described in
-include/uapi/linux/davinci_vpfe.h.
-
-The VIDIOC_VPFE_ISIF_S_RAW_PARAMS, VIDIOC_VPFE_IPIPE_S_CONFIG and
-VIDIOC_VPFE_RSZ_S_CONFIG are used to configure, enable and disable functions in
-the isif, ipipe and resizer blocks respectively. These IOCTL's control several
-functions in the blocks they control. VIDIOC_VPFE_ISIF_S_RAW_PARAMS IOCTL
-accepts a pointer to struct vpfe_isif_raw_config as its argument. Similarly
-VIDIOC_VPFE_IPIPE_S_CONFIG accepts a pointer to struct vpfe_ipipe_config. And
-VIDIOC_VPFE_RSZ_S_CONFIG accepts a pointer to struct vpfe_rsz_config as its
-argument. Similarly VIDIOC_VPFE_ISIF_G_RAW_PARAMS, VIDIOC_VPFE_IPIPE_G_CONFIG
-and VIDIOC_VPFE_RSZ_G_CONFIG are used to get the current configuration set in
-the isif, ipipe and resizer blocks respectively.
-
-The detailed functions of the VPFE itself related to a given VPFE block is
-described in the Technical Reference Manuals (TRMs) --- see the end of the
-document for those.
-
-
-IPIPEIF block IOCTLs
-======================================
-
-The following private IOCTLs are supported:
-
- VIDIOC_VPFE_IPIPEIF_[S/G]_CONFIG
-
-The parameter structures used by these ioctl's are described in
-include/uapi/linux/dm365_ipipeif.h
-
-The VIDIOC_VPFE_IPIPEIF_S_CONFIG is used to configure the ipipeif
-hardware block. The VIDIOC_VPFE_IPIPEIF_S_CONFIG and
-VIDIOC_VPFE_IPIPEIF_G_CONFIG accepts a pointer to struct ipipeif_params
-as its argument.
-
-
-VPFE Operating Modes
-==========================================
-
-a: Continuous Modes
-------------------------
-
-1: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> SDRAM
-
-2: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
- |
- <--------------------<----------------<---------------------<---|
- |
- V
- DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
-
-3: tvp514x/tvp7002/mt9p031---> DAVINCI ISIF---> DAVINCI IPIPEIF--->|
- |
- <--------------------<----------------<---------------------<---|
- |
- V
- DAVINCI IPIPE---> DAVINCI CROP RESIZER--->DAVINCI RESIZER [A/B]---> SDRAM
-
-a: Single Shot Modes
-------------------------
-
-1: SDRAM---> DAVINCI IPIPEIF---> DAVINCI IPIPE---> DAVINCI CROP RESIZER--->|
- |
- <----------------<----------------<------------------<---------------<--|
- |
- V
-DAVINCI RESIZER [A/B]---> SDRAM
-
-2: SDRAM---> DAVINCI IPIPEIF---> DAVINCI CROP RESIZER--->|
- |
- <----------------<----------------<---------------<---|
- |
- V
-DAVINCI RESIZER [A/B]---> SDRAM
-
-
-Technical reference manuals (TRMs) and other documentation
-==========================================================
-
-Davinci DM365 TRM:
-<URL:http://www.ti.com/lit/ds/sprs457e/sprs457e.pdf>
-Referenced MARCH 2009-REVISED JUNE 2011
-
-Davinci DM368 TRM:
-<URL:http://www.ti.com/lit/ds/sprs668c/sprs668c.pdf>
-Referenced APRIL 2010-REVISED JUNE 2011
-
-Davinci Video Processing Front End (VPFE) DM36x
-<URL:http://www.ti.com/lit/ug/sprufg8c/sprufg8c.pdf>
-
-
-References
-==========
-
-[1] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
deleted file mode 100644
index 8d772029c91d..000000000000
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ /dev/null
@@ -1,1287 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_USER_H
-#define _DAVINCI_VPFE_USER_H
-
-#include <linux/types.h>
-#include <linux/videodev2.h>
-
-/*
- * Private IOCTL
- *
- * VIDIOC_VPFE_ISIF_S_RAW_PARAMS: Set raw params in isif
- * VIDIOC_VPFE_ISIF_G_RAW_PARAMS: Get raw params from isif
- * VIDIOC_VPFE_PRV_S_CONFIG: Set ipipe engine configuration
- * VIDIOC_VPFE_PRV_G_CONFIG: Get ipipe engine configuration
- * VIDIOC_VPFE_RSZ_S_CONFIG: Set resizer engine configuration
- * VIDIOC_VPFE_RSZ_G_CONFIG: Get resizer engine configuration
- */
-
-#define VIDIOC_VPFE_ISIF_S_RAW_PARAMS \
- _IOW('V', BASE_VIDIOC_PRIVATE + 1, struct vpfe_isif_raw_config)
-#define VIDIOC_VPFE_ISIF_G_RAW_PARAMS \
- _IOR('V', BASE_VIDIOC_PRIVATE + 2, struct vpfe_isif_raw_config)
-#define VIDIOC_VPFE_IPIPE_S_CONFIG \
- _IOWR('P', BASE_VIDIOC_PRIVATE + 3, struct vpfe_ipipe_config)
-#define VIDIOC_VPFE_IPIPE_G_CONFIG \
- _IOWR('P', BASE_VIDIOC_PRIVATE + 4, struct vpfe_ipipe_config)
-#define VIDIOC_VPFE_RSZ_S_CONFIG \
- _IOWR('R', BASE_VIDIOC_PRIVATE + 5, struct vpfe_rsz_config)
-#define VIDIOC_VPFE_RSZ_G_CONFIG \
- _IOWR('R', BASE_VIDIOC_PRIVATE + 6, struct vpfe_rsz_config)
-
-/*
- * Private Control's for ISIF
- */
-#define VPFE_ISIF_CID_CRGAIN (V4L2_CID_USER_BASE | 0xa001)
-#define VPFE_ISIF_CID_CGRGAIN (V4L2_CID_USER_BASE | 0xa002)
-#define VPFE_ISIF_CID_CGBGAIN (V4L2_CID_USER_BASE | 0xa003)
-#define VPFE_ISIF_CID_CBGAIN (V4L2_CID_USER_BASE | 0xa004)
-#define VPFE_ISIF_CID_GAIN_OFFSET (V4L2_CID_USER_BASE | 0xa005)
-
-/*
- * Private Control's for ISIF and IPIPEIF
- */
-#define VPFE_CID_DPCM_PREDICTOR (V4L2_CID_USER_BASE | 0xa006)
-
-/************************************************************************
- * Vertical Defect Correction parameters
- ***********************************************************************/
-
-/**
- * vertical defect correction methods
- */
-enum vpfe_isif_vdfc_corr_mode {
- /* Defect level subtraction. Just fed through if saturating */
- VPFE_ISIF_VDFC_NORMAL,
- /**
- * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
- * if data saturating
- */
- VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT,
- /* Horizontal interpolation (((i-2)+(i+2))/2) */
- VPFE_ISIF_VDFC_HORZ_INTERPOL
-};
-
-/**
- * Max Size of the Vertical Defect Correction table
- */
-#define VPFE_ISIF_VDFC_TABLE_SIZE 8
-
-/**
- * Values used for shifting up the vdfc defect level
- */
-enum vpfe_isif_vdfc_shift {
- /* No Shift */
- VPFE_ISIF_VDFC_NO_SHIFT,
- /* Shift by 1 bit */
- VPFE_ISIF_VDFC_SHIFT_1,
- /* Shift by 2 bit */
- VPFE_ISIF_VDFC_SHIFT_2,
- /* Shift by 3 bit */
- VPFE_ISIF_VDFC_SHIFT_3,
- /* Shift by 4 bit */
- VPFE_ISIF_VDFC_SHIFT_4
-};
-
-/**
- * Defect Correction (DFC) table entry
- */
-struct vpfe_isif_vdfc_entry {
- /* vertical position of defect */
- unsigned short pos_vert;
- /* horizontal position of defect */
- unsigned short pos_horz;
- /**
- * Defect level of Vertical line defect position. This is subtracted
- * from the data at the defect position
- */
- unsigned char level_at_pos;
- /**
- * Defect level of the pixels upper than the vertical line defect.
- * This is subtracted from the data
- */
- unsigned char level_up_pixels;
- /**
- * Defect level of the pixels lower than the vertical line defect.
- * This is subtracted from the data
- */
- unsigned char level_low_pixels;
-};
-
-/**
- * Structure for Defect Correction (DFC) parameter
- */
-struct vpfe_isif_dfc {
- /* enable vertical defect correction */
- unsigned char en;
- /* Correction methods */
- enum vpfe_isif_vdfc_corr_mode corr_mode;
- /**
- * 0 - whole line corrected, 1 - not
- * pixels upper than the defect
- */
- unsigned char corr_whole_line;
- /**
- * defect level shift value. level_at_pos, level_upper_pos,
- * and level_lower_pos can be shifted up by this value
- */
- enum vpfe_isif_vdfc_shift def_level_shift;
- /* defect saturation level */
- unsigned short def_sat_level;
- /* number of vertical defects. Max is VPFE_ISIF_VDFC_TABLE_SIZE */
- short num_vdefects;
- /* VDFC table ptr */
- struct vpfe_isif_vdfc_entry table[VPFE_ISIF_VDFC_TABLE_SIZE];
-};
-
-/************************************************************************
- * Digital/Black clamp or DC Subtract parameters
- ************************************************************************/
-/**
- * Horizontal Black Clamp modes
- */
-enum vpfe_isif_horz_bc_mode {
- /**
- * Horizontal clamp disabled. Only vertical clamp
- * value is subtracted
- */
- VPFE_ISIF_HORZ_BC_DISABLE,
- /**
- * Horizontal clamp value is calculated and subtracted
- * from image data along with vertical clamp value
- */
- VPFE_ISIF_HORZ_BC_CLAMP_CALC_ENABLED,
- /**
- * Horizontal clamp value calculated from previous image
- * is subtracted from image data along with vertical clamp
- * value. How the horizontal clamp value for the first image
- * is calculated in this case ???
- */
- VPFE_ISIF_HORZ_BC_CLAMP_NOT_UPDATED
-};
-
-/**
- * Base window selection for Horizontal Black Clamp calculations
- */
-enum vpfe_isif_horz_bc_base_win_sel {
- /* Select Most left window for bc calculation */
- VPFE_ISIF_SEL_MOST_LEFT_WIN,
-
- /* Select Most right window for bc calculation */
- VPFE_ISIF_SEL_MOST_RIGHT_WIN,
-};
-
-/* Size of window in horizontal direction for horizontal bc */
-enum vpfe_isif_horz_bc_sz_h {
- VPFE_ISIF_HORZ_BC_SZ_H_2PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_4PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_8PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_16PIXELS
-};
-
-/* Size of window in vertcal direction for vertical bc */
-enum vpfe_isif_horz_bc_sz_v {
- VPFE_ISIF_HORZ_BC_SZ_H_32PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_64PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_128PIXELS,
- VPFE_ISIF_HORZ_BC_SZ_H_256PIXELS
-};
-
-/**
- * Structure for Horizontal Black Clamp config params
- */
-struct vpfe_isif_horz_bclamp {
- /* horizontal clamp mode */
- enum vpfe_isif_horz_bc_mode mode;
- /**
- * pixel value limit enable.
- * 0 - limit disabled
- * 1 - pixel value limited to 1023
- */
- unsigned char clamp_pix_limit;
- /**
- * Select most left or right window for clamp val
- * calculation
- */
- enum vpfe_isif_horz_bc_base_win_sel base_win_sel_calc;
- /* Window count per color for calculation. range 1-32 */
- unsigned char win_count_calc;
- /* Window start position - horizontal for calculation. 0 - 8191 */
- unsigned short win_start_h_calc;
- /* Window start position - vertical for calculation 0 - 8191 */
- unsigned short win_start_v_calc;
- /* Width of the sample window in pixels for calculation */
- enum vpfe_isif_horz_bc_sz_h win_h_sz_calc;
- /* Height of the sample window in pixels for calculation */
- enum vpfe_isif_horz_bc_sz_v win_v_sz_calc;
-};
-
-/**
- * Black Clamp vertical reset values
- */
-enum vpfe_isif_vert_bc_reset_val_sel {
- /* Reset value used is the clamp value calculated */
- VPFE_ISIF_VERT_BC_USE_HORZ_CLAMP_VAL,
- /* Reset value used is reset_clamp_val configured */
- VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL,
- /* No update, previous image value is used */
- VPFE_ISIF_VERT_BC_NO_UPDATE
-};
-
-enum vpfe_isif_vert_bc_sz_h {
- VPFE_ISIF_VERT_BC_SZ_H_2PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_4PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_8PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_16PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_32PIXELS,
- VPFE_ISIF_VERT_BC_SZ_H_64PIXELS
-};
-
-/**
- * Structure for Vertical Black Clamp configuration params
- */
-struct vpfe_isif_vert_bclamp {
- /* Reset value selection for vertical clamp calculation */
- enum vpfe_isif_vert_bc_reset_val_sel reset_val_sel;
- /* U12 value if reset_sel = ISIF_BC_VERT_USE_CONFIG_CLAMP_VAL */
- unsigned short reset_clamp_val;
- /**
- * U8Q8. Line average coefficient used in vertical clamp
- * calculation
- */
- unsigned char line_ave_coef;
- /* Width in pixels of the optical black region used for calculation. */
- enum vpfe_isif_vert_bc_sz_h ob_h_sz_calc;
- /* Height of the optical black region for calculation */
- unsigned short ob_v_sz_calc;
- /* Optical black region start position - horizontal. 0 - 8191 */
- unsigned short ob_start_h;
- /* Optical black region start position - vertical 0 - 8191 */
- unsigned short ob_start_v;
-};
-
-/**
- * Structure for Black Clamp configuration params
- */
-struct vpfe_isif_black_clamp {
- /**
- * this offset value is added irrespective of the clamp
- * enable status. S13
- */
- unsigned short dc_offset;
- /**
- * Enable black/digital clamp value to be subtracted
- * from the image data
- */
- unsigned char en;
- /**
- * black clamp mode. same/separate clamp for 4 colors
- * 0 - disable - same clamp value for all colors
- * 1 - clamp value calculated separately for all colors
- */
- unsigned char bc_mode_color;
- /* Vertical start position for bc subtraction */
- unsigned short vert_start_sub;
- /* Black clamp for horizontal direction */
- struct vpfe_isif_horz_bclamp horz;
- /* Black clamp for vertical direction */
- struct vpfe_isif_vert_bclamp vert;
-};
-
-/*************************************************************************
- ** Color Space Conversion (CSC)
- *************************************************************************/
-/**
- * Number of Coefficient values used for CSC
- */
-#define VPFE_ISIF_CSC_NUM_COEFF 16
-
-struct float_8_bit {
- /* 8 bit integer part */
- __u8 integer;
- /* 8 bit decimal part */
- __u8 decimal;
-};
-
-struct float_16_bit {
- /* 16 bit integer part */
- __u16 integer;
- /* 16 bit decimal part */
- __u16 decimal;
-};
-
-/*************************************************************************
- ** Color Space Conversion parameters
- *************************************************************************/
-/**
- * Structure used for CSC config params
- */
-struct vpfe_isif_color_space_conv {
- /* Enable color space conversion */
- unsigned char en;
- /**
- * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
- * so forth
- */
- struct float_8_bit coeff[VPFE_ISIF_CSC_NUM_COEFF];
-};
-
-enum vpfe_isif_datasft {
- /* No Shift */
- VPFE_ISIF_NO_SHIFT,
- /* 1 bit Shift */
- VPFE_ISIF_1BIT_SHIFT,
- /* 2 bit Shift */
- VPFE_ISIF_2BIT_SHIFT,
- /* 3 bit Shift */
- VPFE_ISIF_3BIT_SHIFT,
- /* 4 bit Shift */
- VPFE_ISIF_4BIT_SHIFT,
- /* 5 bit Shift */
- VPFE_ISIF_5BIT_SHIFT,
- /* 6 bit Shift */
- VPFE_ISIF_6BIT_SHIFT
-};
-
-#define VPFE_ISIF_LINEAR_TAB_SIZE 192
-/*************************************************************************
- ** Linearization parameters
- *************************************************************************/
-/**
- * Structure for Sensor data linearization
- */
-struct vpfe_isif_linearize {
- /* Enable or Disable linearization of data */
- unsigned char en;
- /* Shift value applied */
- enum vpfe_isif_datasft corr_shft;
- /* scale factor applied U11Q10 */
- struct float_16_bit scale_fact;
- /* Size of the linear table */
- unsigned short table[VPFE_ISIF_LINEAR_TAB_SIZE];
-};
-
-/*************************************************************************
- ** ISIF Raw configuration parameters
- *************************************************************************/
-enum vpfe_isif_fmt_mode {
- VPFE_ISIF_SPLIT,
- VPFE_ISIF_COMBINE
-};
-
-enum vpfe_isif_lnum {
- VPFE_ISIF_1LINE,
- VPFE_ISIF_2LINES,
- VPFE_ISIF_3LINES,
- VPFE_ISIF_4LINES
-};
-
-enum vpfe_isif_line {
- VPFE_ISIF_1STLINE,
- VPFE_ISIF_2NDLINE,
- VPFE_ISIF_3RDLINE,
- VPFE_ISIF_4THLINE
-};
-
-struct vpfe_isif_fmtplen {
- /**
- * number of program entries for SET0, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen0;
- /**
- * number of program entries for SET1, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen1;
- /**
- * number of program entries for SET2, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen2;
- /**
- * number of program entries for SET3, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- unsigned short plen3;
-};
-
-struct vpfe_isif_fmt_cfg {
- /* Split or combine or line alternate */
- enum vpfe_isif_fmt_mode fmtmode;
- /* enable or disable line alternating mode */
- unsigned char ln_alter_en;
- /* Split/combine line number */
- enum vpfe_isif_lnum lnum;
- /* Address increment Range 1 - 16 */
- unsigned int addrinc;
-};
-
-struct vpfe_isif_fmt_addr_ptr {
- /* Initial address */
- unsigned int init_addr;
- /* output line number */
- enum vpfe_isif_line out_line;
-};
-
-struct vpfe_isif_fmtpgm_ap {
- /* program address pointer */
- unsigned char pgm_aptr;
- /* program address increment or decrement */
- unsigned char pgmupdt;
-};
-
-struct vpfe_isif_data_formatter {
- /* Enable/Disable data formatter */
- unsigned char en;
- /* data formatter configuration */
- struct vpfe_isif_fmt_cfg cfg;
- /* Formatter program entries length */
- struct vpfe_isif_fmtplen plen;
- /* first pixel in a line fed to formatter */
- unsigned short fmtrlen;
- /* HD interval for output line. Only valid when split line */
- unsigned short fmthcnt;
- /* formatter address pointers */
- struct vpfe_isif_fmt_addr_ptr fmtaddr_ptr[16];
- /* program enable/disable */
- unsigned char pgm_en[32];
- /* program address pointers */
- struct vpfe_isif_fmtpgm_ap fmtpgm_ap[32];
-};
-
-struct vpfe_isif_df_csc {
- /* Color Space Conversion configuration, 0 - csc, 1 - df */
- unsigned int df_or_csc;
- /* csc configuration valid if df_or_csc is 0 */
- struct vpfe_isif_color_space_conv csc;
- /* data formatter configuration valid if df_or_csc is 1 */
- struct vpfe_isif_data_formatter df;
- /* start pixel in a line at the input */
- unsigned int start_pix;
- /* number of pixels in input line */
- unsigned int num_pixels;
- /* start line at the input */
- unsigned int start_line;
- /* number of lines at the input */
- unsigned int num_lines;
-};
-
-struct vpfe_isif_gain_offsets_adj {
- /* Enable or Disable Gain adjustment for SDRAM data */
- unsigned char gain_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- unsigned char gain_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- unsigned char gain_h3a_en;
- /* Enable or Disable Gain adjustment for SDRAM data */
- unsigned char offset_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- unsigned char offset_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- unsigned char offset_h3a_en;
-};
-
-struct vpfe_isif_cul {
- /* Horizontal Cull pattern for odd lines */
- unsigned char hcpat_odd;
- /* Horizontal Cull pattern for even lines */
- unsigned char hcpat_even;
- /* Vertical Cull pattern */
- unsigned char vcpat;
- /* Enable or disable lpf. Apply when cull is enabled */
- unsigned char en_lpf;
-};
-
-/* all the stuff in this struct will be provided by userland */
-struct vpfe_isif_raw_config {
- /* Linearization parameters for image sensor data input */
- struct vpfe_isif_linearize linearize;
- /* Data formatter or CSC */
- struct vpfe_isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) confguration */
- struct vpfe_isif_dfc dfc;
- /* Black/Digital Clamp configuration */
- struct vpfe_isif_black_clamp bclamp;
- /* Gain, offset adjustments */
- struct vpfe_isif_gain_offsets_adj gain_offset;
- /* Culling */
- struct vpfe_isif_cul culling;
- /* horizontal offset for Gain/LSC/DFC */
- unsigned short horz_offset;
- /* vertical offset for Gain/LSC/DFC */
- unsigned short vert_offset;
-};
-
-/**********************************************************************
- * IPIPE API Structures
- **********************************************************************/
-
-/* IPIPE module configurations */
-
-/* IPIPE input configuration */
-#define VPFE_IPIPE_INPUT_CONFIG BIT(0)
-/* LUT based Defect Pixel Correction */
-#define VPFE_IPIPE_LUTDPC BIT(1)
-/* On the fly (OTF) Defect Pixel Correction */
-#define VPFE_IPIPE_OTFDPC BIT(2)
-/* Noise Filter - 1 */
-#define VPFE_IPIPE_NF1 BIT(3)
-/* Noise Filter - 2 */
-#define VPFE_IPIPE_NF2 BIT(4)
-/* White Balance. Also a control ID */
-#define VPFE_IPIPE_WB BIT(5)
-/* 1st RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_1 BIT(6)
-/* 2nd RGB to RBG Blend module */
-#define VPFE_IPIPE_RGB2RGB_2 BIT(7)
-/* Gamma Correction */
-#define VPFE_IPIPE_GAMMA BIT(8)
-/* 3D LUT color conversion */
-#define VPFE_IPIPE_3D_LUT BIT(9)
-/* RGB to YCbCr module */
-#define VPFE_IPIPE_RGB2YUV BIT(10)
-/* YUV 422 conversion module */
-#define VPFE_IPIPE_YUV422_CONV BIT(11)
-/* Edge Enhancement */
-#define VPFE_IPIPE_YEE BIT(12)
-/* Green Imbalance Correction */
-#define VPFE_IPIPE_GIC BIT(13)
-/* CFA Interpolation */
-#define VPFE_IPIPE_CFA BIT(14)
-/* Chroma Artifact Reduction */
-#define VPFE_IPIPE_CAR BIT(15)
-/* Chroma Gain Suppression */
-#define VPFE_IPIPE_CGS BIT(16)
-/* Global brightness and contrast control */
-#define VPFE_IPIPE_GBCE BIT(17)
-
-#define VPFE_IPIPE_MAX_MODULES 18
-
-struct ipipe_float_u16 {
- unsigned short integer;
- unsigned short decimal;
-};
-
-struct ipipe_float_s16 {
- short integer;
- unsigned short decimal;
-};
-
-struct ipipe_float_u8 {
- unsigned char integer;
- unsigned char decimal;
-};
-
-/* Copy method selection for vertical correction
- * Used when ipipe_dfc_corr_meth is IPIPE_DPC_CTORB_AFTER_HINT
- */
-enum vpfe_ipipe_dpc_corr_meth {
- /* replace by black or white dot specified by repl_white */
- VPFE_IPIPE_DPC_REPL_BY_DOT = 0,
- /* Copy from left */
- VPFE_IPIPE_DPC_CL = 1,
- /* Copy from right */
- VPFE_IPIPE_DPC_CR = 2,
- /* Horizontal interpolation */
- VPFE_IPIPE_DPC_H_INTP = 3,
- /* Vertical interpolation */
- VPFE_IPIPE_DPC_V_INTP = 4,
- /* Copy from top */
- VPFE_IPIPE_DPC_CT = 5,
- /* Copy from bottom */
- VPFE_IPIPE_DPC_CB = 6,
- /* 2D interpolation */
- VPFE_IPIPE_DPC_2D_INTP = 7,
-};
-
-struct vpfe_ipipe_lutdpc_entry {
- /* Horizontal position */
- unsigned short horz_pos;
- /* vertical position */
- unsigned short vert_pos;
- enum vpfe_ipipe_dpc_corr_meth method;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_DPC 256
-
-/* Structure for configuring DPC module */
-struct vpfe_ipipe_lutdpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* 0 - replace with black dot, 1 - white dot when correction
- * method is IPIPE_DFC_REPL_BY_DOT=0,
- */
- unsigned char repl_white;
- /* number of entries in the correction table. Currently only
- * support up-to 256 entries. infinite mode is not supported
- */
- unsigned short dpc_size;
- struct vpfe_ipipe_lutdpc_entry table[VPFE_IPIPE_MAX_SIZE_DPC];
-};
-
-enum vpfe_ipipe_otfdpc_det_meth {
- VPFE_IPIPE_DPC_OTF_MIN_MAX,
- VPFE_IPIPE_DPC_OTF_MIN_MAX2
-};
-
-struct vpfe_ipipe_otfdpc_thr {
- unsigned short r;
- unsigned short gr;
- unsigned short gb;
- unsigned short b;
-};
-
-enum vpfe_ipipe_otfdpc_alg {
- VPFE_IPIPE_OTFDPC_2_0,
- VPFE_IPIPE_OTFDPC_3_0
-};
-
-struct vpfe_ipipe_otfdpc_2_0_cfg {
- /* defect detection threshold for MIN_MAX2 method (DPC 2.0 alg) */
- struct vpfe_ipipe_otfdpc_thr det_thr;
- /* defect correction threshold for MIN_MAX2 method (DPC 2.0 alg) or
- * maximum value for MIN_MAX method
- */
- struct vpfe_ipipe_otfdpc_thr corr_thr;
-};
-
-struct vpfe_ipipe_otfdpc_3_0_cfg {
- /* DPC3.0 activity adj shf. activity = (max2-min2) >> (6 -shf)
- */
- unsigned char act_adj_shf;
- /* DPC3.0 detection threshold, THR */
- unsigned short det_thr;
- /* DPC3.0 detection threshold slope, SLP */
- unsigned short det_slp;
- /* DPC3.0 detection threshold min, MIN */
- unsigned short det_thr_min;
- /* DPC3.0 detection threshold max, MAX */
- unsigned short det_thr_max;
- /* DPC3.0 correction threshold, THR */
- unsigned short corr_thr;
- /* DPC3.0 correction threshold slope, SLP */
- unsigned short corr_slp;
- /* DPC3.0 correction threshold min, MIN */
- unsigned short corr_thr_min;
- /* DPC3.0 correction threshold max, MAX */
- unsigned short corr_thr_max;
-};
-
-struct vpfe_ipipe_otfdpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- union {
- /* if alg is IPIPE_OTFDPC_2_0 */
- struct vpfe_ipipe_otfdpc_2_0_cfg dpc_2_0;
- /* if alg is IPIPE_OTFDPC_3_0 */
- struct vpfe_ipipe_otfdpc_3_0_cfg dpc_3_0;
- } alg_cfg;
-};
-
-/* Threshold values table size */
-#define VPFE_IPIPE_NF_THR_TABLE_SIZE 8
-/* Intensity values table size */
-#define VPFE_IPIPE_NF_STR_TABLE_SIZE 8
-
-/* NF, sampling method for green pixels */
-enum vpfe_ipipe_nf_sampl_meth {
- /* Same as R or B */
- VPFE_IPIPE_NF_BOX,
- /* Diamond mode */
- VPFE_IPIPE_NF_DIAMOND
-};
-
-/* Structure for configuring NF module */
-struct vpfe_ipipe_nf {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* Sampling method for green pixels */
- enum vpfe_ipipe_nf_sampl_meth gr_sample_meth;
- /* Down shift value in LUT reference address
- */
- unsigned char shft_val;
- /* Spread value in NF algorithm
- */
- unsigned char spread_val;
- /* Apply LSC gain to threshold. Enable this only if
- * LSC is enabled in ISIF
- */
- unsigned char apply_lsc_gain;
- /* Threshold values table */
- unsigned short thr[VPFE_IPIPE_NF_THR_TABLE_SIZE];
- /* intensity values table */
- unsigned char str[VPFE_IPIPE_NF_STR_TABLE_SIZE];
- /* Edge detection minimum threshold */
- unsigned short edge_det_min_thr;
- /* Edge detection maximum threshold */
- unsigned short edge_det_max_thr;
-};
-
-enum vpfe_ipipe_gic_alg {
- VPFE_IPIPE_GIC_ALG_CONST_GAIN,
- VPFE_IPIPE_GIC_ALG_ADAPT_GAIN
-};
-
-enum vpfe_ipipe_gic_thr_sel {
- VPFE_IPIPE_GIC_THR_REG,
- VPFE_IPIPE_GIC_THR_NF
-};
-
-enum vpfe_ipipe_gic_wt_fn_type {
- /* Use difference as index */
- VPFE_IPIPE_GIC_WT_FN_TYP_DIF,
- /* Use weight function as index */
- VPFE_IPIPE_GIC_WT_FN_TYP_HP_VAL
-};
-
-/* structure for Green Imbalance Correction */
-struct vpfe_ipipe_gic {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* 0 - Constant gain , 1 - Adaptive gain algorithm */
- enum vpfe_ipipe_gic_alg gic_alg;
- /* GIC gain or weight. Used for Constant gain and Adaptive algorithms
- */
- unsigned short gain;
- /* Threshold selection. GIC register values or NF2 thr table */
- enum vpfe_ipipe_gic_thr_sel thr_sel;
- /* thr1. Used when thr_sel is IPIPE_GIC_THR_REG */
- unsigned short thr;
- /* this value is used for thr2-thr1, thr3-thr2 or
- * thr4-thr3 when wt_fn_type is index. Otherwise it
- * is the
- */
- unsigned short slope;
- /* Apply LSC gain to threshold. Enable this only if
- * LSC is enabled in ISIF & thr_sel is IPIPE_GIC_THR_REG
- */
- unsigned char apply_lsc_gain;
- /* Multiply Nf2 threshold by this gain. Use this when thr_sel
- * is IPIPE_GIC_THR_NF
- */
- struct ipipe_float_u8 nf2_thr_gain;
- /* Weight function uses difference as index or high pass value.
- * Used for adaptive gain algorithm
- */
- enum vpfe_ipipe_gic_wt_fn_type wt_fn_type;
-};
-
-/* Structure for configuring WB module */
-struct vpfe_ipipe_wb {
- /* Offset (S12) for R */
- short ofst_r;
- /* Offset (S12) for Gr */
- short ofst_gr;
- /* Offset (S12) for Gb */
- short ofst_gb;
- /* Offset (S12) for B */
- short ofst_b;
- /* Gain (U13Q9) for Red */
- struct ipipe_float_u16 gain_r;
- /* Gain (U13Q9) for Gr */
- struct ipipe_float_u16 gain_gr;
- /* Gain (U13Q9) for Gb */
- struct ipipe_float_u16 gain_gb;
- /* Gain (U13Q9) for Blue */
- struct ipipe_float_u16 gain_b;
-};
-
-enum vpfe_ipipe_cfa_alg {
- /* Algorithm is 2DirAC */
- VPFE_IPIPE_CFA_ALG_2DIRAC,
- /* Algorithm is 2DirAC + Digital Antialiasing (DAA) */
- VPFE_IPIPE_CFA_ALG_2DIRAC_DAA,
- /* Algorithm is DAA */
- VPFE_IPIPE_CFA_ALG_DAA
-};
-
-/* Structure for CFA Interpolation */
-struct vpfe_ipipe_cfa {
- /* 2DirAC or 2DirAC + DAA */
- enum vpfe_ipipe_cfa_alg alg;
- /* 2Dir CFA HP value Low Threshold */
- unsigned short hpf_thr_2dir;
- /* 2Dir CFA HP value slope */
- unsigned short hpf_slp_2dir;
- /* 2Dir CFA HP mix threshold */
- unsigned short hp_mix_thr_2dir;
- /* 2Dir CFA HP mix slope */
- unsigned short hp_mix_slope_2dir;
- /* 2Dir Direction threshold */
- unsigned short dir_thr_2dir;
- /* 2Dir Direction slope */
- unsigned short dir_slope_2dir;
- /* 2Dir Non Directional Weight */
- unsigned short nd_wt_2dir;
- /* DAA Mono Hue Fraction */
- unsigned short hue_fract_daa;
- /* DAA Mono Edge threshold */
- unsigned short edge_thr_daa;
- /* DAA Mono threshold minimum */
- unsigned short thr_min_daa;
- /* DAA Mono threshold slope */
- unsigned short thr_slope_daa;
- /* DAA Mono slope minimum */
- unsigned short slope_min_daa;
- /* DAA Mono slope slope */
- unsigned short slope_slope_daa;
- /* DAA Mono LP wight */
- unsigned short lp_wt_daa;
-};
-
-/* Struct for configuring RGB2RGB blending module */
-struct vpfe_ipipe_rgb2rgb {
- /* Matrix coefficient for RR S12Q8 for ID = 1 and S11Q8 for ID = 2 */
- struct ipipe_float_s16 coef_rr;
- /* Matrix coefficient for GR S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gr;
- /* Matrix coefficient for BR S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_br;
- /* Matrix coefficient for RG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_rg;
- /* Matrix coefficient for GG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gg;
- /* Matrix coefficient for BG S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_bg;
- /* Matrix coefficient for RB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_rb;
- /* Matrix coefficient for GB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_gb;
- /* Matrix coefficient for BB S12Q8/S11Q8 */
- struct ipipe_float_s16 coef_bb;
- /* Output offset for R S13/S11 */
- int out_ofst_r;
- /* Output offset for G S13/S11 */
- int out_ofst_g;
- /* Output offset for B S13/S11 */
- int out_ofst_b;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_GAMMA 512
-
-enum vpfe_ipipe_gamma_tbl_size {
- VPFE_IPIPE_GAMMA_TBL_SZ_64 = 64,
- VPFE_IPIPE_GAMMA_TBL_SZ_128 = 128,
- VPFE_IPIPE_GAMMA_TBL_SZ_256 = 256,
- VPFE_IPIPE_GAMMA_TBL_SZ_512 = 512,
-};
-
-enum vpfe_ipipe_gamma_tbl_sel {
- VPFE_IPIPE_GAMMA_TBL_RAM = 0,
- VPFE_IPIPE_GAMMA_TBL_ROM = 1,
-};
-
-struct vpfe_ipipe_gamma_entry {
- /* 10 bit slope */
- short slope;
- /* 10 bit offset */
- unsigned short offset;
-};
-
-/* Structure for configuring Gamma correction module */
-struct vpfe_ipipe_gamma {
- /* 0 - Enable Gamma correction for Red
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_r;
- /* 0 - Enable Gamma correction for Blue
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_b;
- /* 0 - Enable Gamma correction for Green
- * 1 - bypass Gamma correction. Data is divided by 16
- */
- unsigned char bypass_g;
- /* IPIPE_GAMMA_TBL_RAM or IPIPE_GAMMA_TBL_ROM */
- enum vpfe_ipipe_gamma_tbl_sel tbl_sel;
- /* Table size for RAM gamma table.
- */
- enum vpfe_ipipe_gamma_tbl_size tbl_size;
- /* R table */
- struct vpfe_ipipe_gamma_entry table_r[VPFE_IPIPE_MAX_SIZE_GAMMA];
- /* Blue table */
- struct vpfe_ipipe_gamma_entry table_b[VPFE_IPIPE_MAX_SIZE_GAMMA];
- /* Green table */
- struct vpfe_ipipe_gamma_entry table_g[VPFE_IPIPE_MAX_SIZE_GAMMA];
-};
-
-#define VPFE_IPIPE_MAX_SIZE_3D_LUT 729
-
-struct vpfe_ipipe_3d_lut_entry {
- /* 10 bit entry for red */
- unsigned short r;
- /* 10 bit entry for green */
- unsigned short g;
- /* 10 bit entry for blue */
- unsigned short b;
-};
-
-/* structure for 3D-LUT */
-struct vpfe_ipipe_3d_lut {
- /* enable/disable 3D lut */
- unsigned char en;
- /* 3D - LUT table entry */
- struct vpfe_ipipe_3d_lut_entry table[VPFE_IPIPE_MAX_SIZE_3D_LUT];
-};
-
-/* Struct for configuring rgb2ycbcr module */
-struct vpfe_ipipe_rgb2yuv {
- /* Matrix coefficient for RY S12Q8 */
- struct ipipe_float_s16 coef_ry;
- /* Matrix coefficient for GY S12Q8 */
- struct ipipe_float_s16 coef_gy;
- /* Matrix coefficient for BY S12Q8 */
- struct ipipe_float_s16 coef_by;
- /* Matrix coefficient for RCb S12Q8 */
- struct ipipe_float_s16 coef_rcb;
- /* Matrix coefficient for GCb S12Q8 */
- struct ipipe_float_s16 coef_gcb;
- /* Matrix coefficient for BCb S12Q8 */
- struct ipipe_float_s16 coef_bcb;
- /* Matrix coefficient for RCr S12Q8 */
- struct ipipe_float_s16 coef_rcr;
- /* Matrix coefficient for GCr S12Q8 */
- struct ipipe_float_s16 coef_gcr;
- /* Matrix coefficient for BCr S12Q8 */
- struct ipipe_float_s16 coef_bcr;
- /* Output offset for R S11 */
- int out_ofst_y;
- /* Output offset for Cb S11 */
- int out_ofst_cb;
- /* Output offset for Cr S11 */
- int out_ofst_cr;
-};
-
-enum vpfe_ipipe_gbce_type {
- VPFE_IPIPE_GBCE_Y_VAL_TBL = 0,
- VPFE_IPIPE_GBCE_GAIN_TBL = 1,
-};
-
-#define VPFE_IPIPE_MAX_SIZE_GBCE_LUT 1024
-
-/* structure for Global brightness and Contrast */
-struct vpfe_ipipe_gbce {
- /* enable/disable GBCE */
- unsigned char en;
- /* Y - value table or Gain table */
- enum vpfe_ipipe_gbce_type type;
- /* ptr to LUT for GBCE with 1024 entries */
- unsigned short table[VPFE_IPIPE_MAX_SIZE_GBCE_LUT];
-};
-
-/* Chrominance position. Applicable only for YCbCr input
- * Applied after edge enhancement
- */
-enum vpfe_chr_pos {
- /* Co-siting, same position with luminance */
- VPFE_IPIPE_YUV422_CHR_POS_COSITE = 0,
- /* Centering, In the middle of luminance */
- VPFE_IPIPE_YUV422_CHR_POS_CENTRE = 1,
-};
-
-/* Structure for configuring yuv422 conversion module */
-struct vpfe_ipipe_yuv422_conv {
- /* Max Chrominance value */
- unsigned char en_chrom_lpf;
- /* 1 - enable LPF for chrminance, 0 - disable */
- enum vpfe_chr_pos chrom_pos;
-};
-
-#define VPFE_IPIPE_MAX_SIZE_YEE_LUT 1024
-
-enum vpfe_ipipe_yee_merge_meth {
- VPFE_IPIPE_YEE_ABS_MAX = 0,
- VPFE_IPIPE_YEE_EE_ES = 1,
-};
-
-/* Structure for configuring YUV Edge Enhancement module */
-struct vpfe_ipipe_yee {
- /* 1 - enable enhancement, 0 - disable */
- unsigned char en;
- /* enable/disable halo reduction in edge sharpner */
- unsigned char en_halo_red;
- /* Merge method between Edge Enhancer and Edge sharpner */
- enum vpfe_ipipe_yee_merge_meth merge_meth;
- /* HPF Shift length */
- unsigned char hpf_shft;
- /* HPF Coefficient 00, S10 */
- short hpf_coef_00;
- /* HPF Coefficient 01, S10 */
- short hpf_coef_01;
- /* HPF Coefficient 02, S10 */
- short hpf_coef_02;
- /* HPF Coefficient 10, S10 */
- short hpf_coef_10;
- /* HPF Coefficient 11, S10 */
- short hpf_coef_11;
- /* HPF Coefficient 12, S10 */
- short hpf_coef_12;
- /* HPF Coefficient 20, S10 */
- short hpf_coef_20;
- /* HPF Coefficient 21, S10 */
- short hpf_coef_21;
- /* HPF Coefficient 22, S10 */
- short hpf_coef_22;
- /* Lower threshold before referring to LUT */
- unsigned short yee_thr;
- /* Edge sharpener Gain */
- unsigned short es_gain;
- /* Edge sharpener lower threshold */
- unsigned short es_thr1;
- /* Edge sharpener upper threshold */
- unsigned short es_thr2;
- /* Edge sharpener gain on gradient */
- unsigned short es_gain_grad;
- /* Edge sharpener offset on gradient */
- unsigned short es_ofst_grad;
- /* Ptr to EE table. Must have 1024 entries */
- short table[VPFE_IPIPE_MAX_SIZE_YEE_LUT];
-};
-
-enum vpfe_ipipe_car_meth {
- /* Chromatic Gain Control */
- VPFE_IPIPE_CAR_CHR_GAIN_CTRL = 0,
- /* Dynamic switching between CHR_GAIN_CTRL
- * and MED_FLTR
- */
- VPFE_IPIPE_CAR_DYN_SWITCH = 1,
- /* Median Filter */
- VPFE_IPIPE_CAR_MED_FLTR = 2,
-};
-
-enum vpfe_ipipe_car_hpf_type {
- VPFE_IPIPE_CAR_HPF_Y = 0,
- VPFE_IPIPE_CAR_HPF_H = 1,
- VPFE_IPIPE_CAR_HPF_V = 2,
- VPFE_IPIPE_CAR_HPF_2D = 3,
- /* 2D HPF from YUV Edge Enhancement */
- VPFE_IPIPE_CAR_HPF_2D_YEE = 4,
-};
-
-struct vpfe_ipipe_car_gain {
- /* csup_gain */
- unsigned char gain;
- /* csup_shf. */
- unsigned char shft;
- /* gain minimum */
- unsigned short gain_min;
-};
-
-/* Structure for Chromatic Artifact Reduction */
-struct vpfe_ipipe_car {
- /* enable/disable */
- unsigned char en;
- /* Gain control or Dynamic switching */
- enum vpfe_ipipe_car_meth meth;
- /* Gain1 function configuration for Gain control */
- struct vpfe_ipipe_car_gain gain1;
- /* Gain2 function configuration for Gain control */
- struct vpfe_ipipe_car_gain gain2;
- /* HPF type used for CAR */
- enum vpfe_ipipe_car_hpf_type hpf;
- /* csup_thr: HPF threshold for Gain control */
- unsigned char hpf_thr;
- /* Down shift value for hpf. 2 bits */
- unsigned char hpf_shft;
- /* switch limit for median filter */
- unsigned char sw0;
- /* switch coefficient for Gain control */
- unsigned char sw1;
-};
-
-/* structure for Chromatic Gain Suppression */
-struct vpfe_ipipe_cgs {
- /* enable/disable */
- unsigned char en;
- /* gain1 bright side threshold */
- unsigned char h_thr;
- /* gain1 bright side slope */
- unsigned char h_slope;
- /* gain1 down shift value for bright side */
- unsigned char h_shft;
- /* gain1 bright side minimum gain */
- unsigned char h_min;
-};
-
-/* Max pixels allowed in the input. If above this either decimation
- * or frame division mode to be enabled
- */
-#define VPFE_IPIPE_MAX_INPUT_WIDTH 2600
-
-struct vpfe_ipipe_input_config {
- unsigned int vst;
- unsigned int hst;
-};
-
-/**
- * struct vpfe_ipipe_config - IPIPE engine configuration (user)
- * @input_config: Pointer to structure for ipipe configuration.
- * @flag: Specifies which ISP IPIPE functions should be enabled.
- * @lutdpc: Pointer to luma enhancement structure.
- * @otfdpc: Pointer to structure for defect correction.
- * @nf1: Pointer to structure for Noise Filter.
- * @nf2: Pointer to structure for Noise Filter.
- * @gic: Pointer to structure for Green Imbalance.
- * @wbal: Pointer to structure for White Balance.
- * @cfa: Pointer to structure containing the CFA interpolation.
- * @rgb2rgb1: Pointer to structure for RGB to RGB Blending.
- * @rgb2rgb2: Pointer to structure for RGB to RGB Blending.
- * @gamma: Pointer to gamma structure.
- * @lut: Pointer to structure for 3D LUT.
- * @rgb2yuv: Pointer to structure for RGB-YCbCr conversion.
- * @gbce: Pointer to structure for Global Brightness,Contrast Control.
- * @yuv422_conv: Pointer to structure for YUV 422 conversion.
- * @yee: Pointer to structure for Edge Enhancer.
- * @car: Pointer to structure for Chromatic Artifact Reduction.
- * @cgs: Pointer to structure for Chromatic Gain Suppression.
- */
-struct vpfe_ipipe_config {
- __u32 flag;
- struct vpfe_ipipe_input_config __user *input_config;
- struct vpfe_ipipe_lutdpc __user *lutdpc;
- struct vpfe_ipipe_otfdpc __user *otfdpc;
- struct vpfe_ipipe_nf __user *nf1;
- struct vpfe_ipipe_nf __user *nf2;
- struct vpfe_ipipe_gic __user *gic;
- struct vpfe_ipipe_wb __user *wbal;
- struct vpfe_ipipe_cfa __user *cfa;
- struct vpfe_ipipe_rgb2rgb __user *rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb __user *rgb2rgb2;
- struct vpfe_ipipe_gamma __user *gamma;
- struct vpfe_ipipe_3d_lut __user *lut;
- struct vpfe_ipipe_rgb2yuv __user *rgb2yuv;
- struct vpfe_ipipe_gbce __user *gbce;
- struct vpfe_ipipe_yuv422_conv __user *yuv422_conv;
- struct vpfe_ipipe_yee __user *yee;
- struct vpfe_ipipe_car __user *car;
- struct vpfe_ipipe_cgs __user *cgs;
-};
-
-/*******************************************************************
- ** Resizer API structures
- *******************************************************************/
-/* Interpolation types used for horizontal rescale */
-enum vpfe_rsz_intp_t {
- VPFE_RSZ_INTP_CUBIC,
- VPFE_RSZ_INTP_LINEAR
-};
-
-/* Horizontal LPF intensity selection */
-enum vpfe_rsz_h_lpf_lse_t {
- VPFE_RSZ_H_LPF_LSE_INTERN,
- VPFE_RSZ_H_LPF_LSE_USER_VAL
-};
-
-enum vpfe_rsz_down_scale_ave_sz {
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- VPFE_IPIPE_DWN_SCALE_1_OVER_4,
- VPFE_IPIPE_DWN_SCALE_1_OVER_8,
- VPFE_IPIPE_DWN_SCALE_1_OVER_16,
- VPFE_IPIPE_DWN_SCALE_1_OVER_32,
- VPFE_IPIPE_DWN_SCALE_1_OVER_64,
- VPFE_IPIPE_DWN_SCALE_1_OVER_128,
- VPFE_IPIPE_DWN_SCALE_1_OVER_256
-};
-
-struct vpfe_rsz_output_spec {
- /* enable horizontal flip */
- unsigned char h_flip;
- /* enable vertical flip */
- unsigned char v_flip;
- /* line start offset for y. */
- unsigned int vst_y;
- /* line start offset for c. Only for 420 */
- unsigned int vst_c;
- /* vertical rescale interpolation type, YCbCr or Luminance */
- enum vpfe_rsz_intp_t v_typ_y;
- /* vertical rescale interpolation type for Chrominance */
- enum vpfe_rsz_intp_t v_typ_c;
- /* vertical lpf intensity - Luminance */
- unsigned char v_lpf_int_y;
- /* vertical lpf intensity - Chrominance */
- unsigned char v_lpf_int_c;
- /* horizontal rescale interpolation types, YCbCr or Luminance */
- enum vpfe_rsz_intp_t h_typ_y;
- /* horizontal rescale interpolation types, Chrominance */
- enum vpfe_rsz_intp_t h_typ_c;
- /* horizontal lpf intensity - Luminance */
- unsigned char h_lpf_int_y;
- /* horizontal lpf intensity - Chrominance */
- unsigned char h_lpf_int_c;
- /* Use down scale mode for scale down */
- unsigned char en_down_scale;
- /* if downscale, set the downscale more average size for horizontal
- * direction. Used only if output width and height is less than
- * input sizes
- */
- enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
- /* if downscale, set the downscale more average size for vertical
- * direction. Used only if output width and height is less than
- * input sizes
- */
- enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
- /* Y offset. If set, the offset would be added to the base address
- */
- unsigned int user_y_ofst;
- /* C offset. If set, the offset would be added to the base address
- */
- unsigned int user_c_ofst;
-};
-
-struct vpfe_rsz_config_params {
- unsigned int vst;
- /* horizontal start position of the image
- * data to IPIPE
- */
- unsigned int hst;
- /* output spec of the image data coming out of resizer - 0(UYVY).
- */
- struct vpfe_rsz_output_spec output1;
- /* output spec of the image data coming out of resizer - 1(UYVY).
- */
- struct vpfe_rsz_output_spec output2;
- /* 0 , chroma sample at odd pixel, 1 - even pixel */
- unsigned char chroma_sample_even;
- unsigned char frame_div_mode_en;
- unsigned char yuv_y_min;
- unsigned char yuv_y_max;
- unsigned char yuv_c_min;
- unsigned char yuv_c_max;
- enum vpfe_chr_pos out_chr_pos;
- unsigned char bypass;
-};
-
-/* Structure for VIDIOC_VPFE_RSZ_[S/G]_CONFIG IOCTLs */
-struct vpfe_rsz_config {
- struct vpfe_rsz_config_params *config;
-};
-
-#endif /* _DAVINCI_VPFE_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
deleted file mode 100644
index 52397ad0e3e2..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ /dev/null
@@ -1,1852 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * IPIPE allows fine tuning of the input image using different
- * tuning modules in IPIPE. Some examples :- Noise filter, Defect
- * pixel correction etc. It essentially operate on Bayer Raw data
- * or YUV raw data. To do image tuning, application call,
- *
- */
-
-#include <linux/slab.h>
-#include <linux/bitops.h>
-
-#include "dm365_ipipe.h"
-#include "dm365_ipipe_hw.h"
-#include "vpfe_mc_capture.h"
-
-#define MIN_OUT_WIDTH 32
-#define MIN_OUT_HEIGHT 32
-
-/* ipipe input format's */
-static const unsigned int ipipe_input_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
-};
-
-/* ipipe output format's */
-static const unsigned int ipipe_output_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
-};
-
-static int ipipe_validate_lutdpc_params(struct vpfe_ipipe_lutdpc *lutdpc)
-{
- int i;
-
- if (lutdpc->en > 1 || lutdpc->repl_white > 1 ||
- lutdpc->dpc_size > LUT_DPC_MAX_SIZE)
- return -EINVAL;
-
- if (lutdpc->en)
- return -EINVAL;
-
- for (i = 0; i < lutdpc->dpc_size; i++)
- if (lutdpc->table[i].horz_pos > LUT_DPC_H_POS_MASK ||
- lutdpc->table[i].vert_pos > LUT_DPC_V_POS_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
- struct vpfe_ipipe_lutdpc *dpc_param;
-
- if (!param) {
- memset((void *)lutdpc, 0, sizeof(struct vpfe_ipipe_lutdpc));
- goto success;
- }
-
- dpc_param = param;
- lutdpc->en = dpc_param->en;
- lutdpc->repl_white = dpc_param->repl_white;
- lutdpc->dpc_size = dpc_param->dpc_size;
- memcpy(&lutdpc->table, &dpc_param->table,
- (dpc_param->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
- if (ipipe_validate_lutdpc_params(lutdpc) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_lutdpc_regs(ipipe->base_addr, ipipe->isp5_base_addr, lutdpc);
-
- return 0;
-}
-
-static int ipipe_get_lutdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_lutdpc *lut_param = param;
- struct vpfe_ipipe_lutdpc *lutdpc = &ipipe->config.lutdpc;
-
- lut_param->en = lutdpc->en;
- lut_param->repl_white = lutdpc->repl_white;
- lut_param->dpc_size = lutdpc->dpc_size;
- memcpy(&lut_param->table, &lutdpc->table,
- (lutdpc->dpc_size * sizeof(struct vpfe_ipipe_lutdpc_entry)));
-
- return 0;
-}
-
-static int ipipe_set_input_config(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
-
- if (!param)
- memset(config, 0, sizeof(struct vpfe_ipipe_input_config));
- else
- memcpy(config, param, sizeof(struct vpfe_ipipe_input_config));
- return 0;
-}
-
-static int ipipe_get_input_config(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
-
- if (!param)
- return -EINVAL;
-
- memcpy(param, config, sizeof(struct vpfe_ipipe_input_config));
-
- return 0;
-}
-
-static int ipipe_validate_otfdpc_params(struct vpfe_ipipe_otfdpc *dpc_param)
-{
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0;
- struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0;
-
- if (dpc_param->en > 1)
- return -EINVAL;
-
- if (dpc_param->alg == VPFE_IPIPE_OTFDPC_2_0) {
- dpc_2_0 = &dpc_param->alg_cfg.dpc_2_0;
- if (dpc_2_0->det_thr.r > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.gr > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.gb > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->det_thr.b > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.r > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.gr > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.gb > OTFDPC_DPC2_THR_MASK ||
- dpc_2_0->corr_thr.b > OTFDPC_DPC2_THR_MASK)
- return -EINVAL;
- return 0;
- }
-
- dpc_3_0 = &dpc_param->alg_cfg.dpc_3_0;
-
- if (dpc_3_0->act_adj_shf > OTF_DPC3_0_SHF_MASK ||
- dpc_3_0->det_thr > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->det_slp > OTF_DPC3_0_SLP_MASK ||
- dpc_3_0->det_thr_min > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->det_thr_max > OTF_DPC3_0_DET_MASK ||
- dpc_3_0->corr_thr > OTF_DPC3_0_CORR_MASK ||
- dpc_3_0->corr_slp > OTF_DPC3_0_SLP_MASK ||
- dpc_3_0->corr_thr_min > OTF_DPC3_0_CORR_MASK ||
- dpc_3_0->corr_thr_max > OTF_DPC3_0_CORR_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_otfdpc *dpc_param = param;
- struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
- struct device *dev;
-
- if (!param) {
- memset((void *)otfdpc, 0, sizeof(struct ipipe_otfdpc_2_0));
- goto success;
- }
- dev = ipipe->subdev.v4l2_dev->dev;
- memcpy(otfdpc, dpc_param, sizeof(struct vpfe_ipipe_otfdpc));
- if (ipipe_validate_otfdpc_params(otfdpc) < 0) {
- dev_err(dev, "Invalid otfdpc params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_otfdpc_regs(ipipe->base_addr, otfdpc);
-
- return 0;
-}
-
-static int ipipe_get_otfdpc_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_otfdpc *dpc_param = param;
- struct vpfe_ipipe_otfdpc *otfdpc = &ipipe->config.otfdpc;
-
- memcpy(dpc_param, otfdpc, sizeof(struct vpfe_ipipe_otfdpc));
- return 0;
-}
-
-static int ipipe_validate_nf_params(struct vpfe_ipipe_nf *nf_param)
-{
- int i;
-
- if (nf_param->en > 1 || nf_param->shft_val > D2F_SHFT_VAL_MASK ||
- nf_param->spread_val > D2F_SPR_VAL_MASK ||
- nf_param->apply_lsc_gain > 1 ||
- nf_param->edge_det_min_thr > D2F_EDGE_DET_THR_MASK ||
- nf_param->edge_det_max_thr > D2F_EDGE_DET_THR_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_NF_THR_TABLE_SIZE; i++)
- if (nf_param->thr[i] > D2F_THR_VAL_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_NF_STR_TABLE_SIZE; i++)
- if (nf_param->str[i] > D2F_STR_VAL_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_nf_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_nf *nf_param = param;
- struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
- struct device *dev;
-
- if (id == IPIPE_D2F_2ND)
- nf = &ipipe->config.nf2;
-
- if (!nf_param) {
- memset((void *)nf, 0, sizeof(struct vpfe_ipipe_nf));
- goto success;
- }
-
- dev = ipipe->subdev.v4l2_dev->dev;
- memcpy(nf, nf_param, sizeof(struct vpfe_ipipe_nf));
- if (ipipe_validate_nf_params(nf) < 0) {
- dev_err(dev, "Invalid nf params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_d2f_regs(ipipe->base_addr, id, nf);
-
- return 0;
-}
-
-static int ipipe_set_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_nf_params(ipipe, IPIPE_D2F_1ST, param);
-}
-
-static int ipipe_set_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_nf_params(ipipe, IPIPE_D2F_2ND, param);
-}
-
-static int ipipe_get_nf_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_nf *nf_param = param;
- struct vpfe_ipipe_nf *nf = &ipipe->config.nf1;
-
- if (id == IPIPE_D2F_2ND)
- nf = &ipipe->config.nf2;
-
- memcpy(nf_param, nf, sizeof(struct vpfe_ipipe_nf));
-
- return 0;
-}
-
-static int ipipe_get_nf1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_nf_params(ipipe, IPIPE_D2F_1ST, param);
-}
-
-static int ipipe_get_nf2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_nf_params(ipipe, IPIPE_D2F_2ND, param);
-}
-
-static int ipipe_validate_gic_params(struct vpfe_ipipe_gic *gic)
-{
- if (gic->en > 1 || gic->gain > GIC_GAIN_MASK ||
- gic->thr > GIC_THR_MASK || gic->slope > GIC_SLOPE_MASK ||
- gic->apply_lsc_gain > 1 ||
- gic->nf2_thr_gain.integer > GIC_NFGAN_INT_MASK ||
- gic->nf2_thr_gain.decimal > GIC_NFGAN_DECI_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gic *gic_param = param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
-
- if (!gic_param) {
- memset((void *)gic, 0, sizeof(struct vpfe_ipipe_gic));
- goto success;
- }
-
- memcpy(gic, gic_param, sizeof(struct vpfe_ipipe_gic));
- if (ipipe_validate_gic_params(gic) < 0) {
- dev_err(dev, "Invalid gic params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_gic_regs(ipipe->base_addr, gic);
-
- return 0;
-}
-
-static int ipipe_get_gic_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gic *gic_param = param;
- struct vpfe_ipipe_gic *gic = &ipipe->config.gic;
-
- memcpy(gic_param, gic, sizeof(struct vpfe_ipipe_gic));
-
- return 0;
-}
-
-static int ipipe_validate_wb_params(struct vpfe_ipipe_wb *wbal)
-{
- if (wbal->ofst_r > WB_OFFSET_MASK ||
- wbal->ofst_gr > WB_OFFSET_MASK ||
- wbal->ofst_gb > WB_OFFSET_MASK ||
- wbal->ofst_b > WB_OFFSET_MASK ||
- wbal->gain_r.integer > WB_GAIN_INT_MASK ||
- wbal->gain_r.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_gr.integer > WB_GAIN_INT_MASK ||
- wbal->gain_gr.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_gb.integer > WB_GAIN_INT_MASK ||
- wbal->gain_gb.decimal > WB_GAIN_DECI_MASK ||
- wbal->gain_b.integer > WB_GAIN_INT_MASK ||
- wbal->gain_b.decimal > WB_GAIN_DECI_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_wb *wb_param = param;
- struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
-
- if (!wb_param) {
- const struct vpfe_ipipe_wb wb_defaults = {
- .gain_r = {2, 0x0},
- .gain_gr = {2, 0x0},
- .gain_gb = {2, 0x0},
- .gain_b = {2, 0x0}
- };
- memcpy(wbal, &wb_defaults, sizeof(struct vpfe_ipipe_wb));
- goto success;
- }
-
- memcpy(wbal, wb_param, sizeof(struct vpfe_ipipe_wb));
- if (ipipe_validate_wb_params(wbal) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_wb_regs(ipipe->base_addr, wbal);
-
- return 0;
-}
-
-static int ipipe_get_wb_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_wb *wb_param = param;
- struct vpfe_ipipe_wb *wbal = &ipipe->config.wbal;
-
- memcpy(wb_param, wbal, sizeof(struct vpfe_ipipe_wb));
- return 0;
-}
-
-static int ipipe_validate_cfa_params(struct vpfe_ipipe_cfa *cfa)
-{
- if (cfa->hpf_thr_2dir > CFA_HPF_THR_2DIR_MASK ||
- cfa->hpf_slp_2dir > CFA_HPF_SLOPE_2DIR_MASK ||
- cfa->hp_mix_thr_2dir > CFA_HPF_MIX_THR_2DIR_MASK ||
- cfa->hp_mix_slope_2dir > CFA_HPF_MIX_SLP_2DIR_MASK ||
- cfa->dir_thr_2dir > CFA_DIR_THR_2DIR_MASK ||
- cfa->dir_slope_2dir > CFA_DIR_SLP_2DIR_MASK ||
- cfa->nd_wt_2dir > CFA_ND_WT_2DIR_MASK ||
- cfa->hue_fract_daa > CFA_DAA_HUE_FRA_MASK ||
- cfa->edge_thr_daa > CFA_DAA_EDG_THR_MASK ||
- cfa->thr_min_daa > CFA_DAA_THR_MIN_MASK ||
- cfa->thr_slope_daa > CFA_DAA_THR_SLP_MASK ||
- cfa->slope_min_daa > CFA_DAA_SLP_MIN_MASK ||
- cfa->slope_slope_daa > CFA_DAA_SLP_SLP_MASK ||
- cfa->lp_wt_daa > CFA_DAA_LP_WT_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cfa *cfa_param = param;
- struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
-
- if (!cfa_param) {
- memset(cfa, 0, sizeof(struct vpfe_ipipe_cfa));
- cfa->alg = VPFE_IPIPE_CFA_ALG_2DIRAC;
- goto success;
- }
-
- memcpy(cfa, cfa_param, sizeof(struct vpfe_ipipe_cfa));
- if (ipipe_validate_cfa_params(cfa) < 0)
- return -EINVAL;
-
-success:
- ipipe_set_cfa_regs(ipipe->base_addr, cfa);
-
- return 0;
-}
-
-static int ipipe_get_cfa_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cfa *cfa_param = param;
- struct vpfe_ipipe_cfa *cfa = &ipipe->config.cfa;
-
- memcpy(cfa_param, cfa, sizeof(struct vpfe_ipipe_cfa));
- return 0;
-}
-
-static int
-ipipe_validate_rgb2rgb_params(struct vpfe_ipipe_rgb2rgb *rgb2rgb,
- unsigned int id)
-{
- u32 gain_int_upper = RGB2RGB_1_GAIN_INT_MASK;
- u32 offset_upper = RGB2RGB_1_OFST_MASK;
-
- if (id == IPIPE_RGB2RGB_2) {
- offset_upper = RGB2RGB_2_OFST_MASK;
- gain_int_upper = RGB2RGB_2_GAIN_INT_MASK;
- }
-
- if (rgb2rgb->coef_rr.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rr.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gr.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gr.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_br.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_br.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_rg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_bg.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_bg.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_rb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_rb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_gb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_gb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->coef_bb.decimal > RGB2RGB_GAIN_DECI_MASK ||
- rgb2rgb->coef_bb.integer > gain_int_upper)
- return -EINVAL;
-
- if (rgb2rgb->out_ofst_r > offset_upper ||
- rgb2rgb->out_ofst_g > offset_upper ||
- rgb2rgb->out_ofst_b > offset_upper)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
-
- rgb2rgb_param = param;
-
- if (id == IPIPE_RGB2RGB_2)
- rgb2rgb = &ipipe->config.rgb2rgb2;
-
- if (!rgb2rgb_param) {
- const struct vpfe_ipipe_rgb2rgb rgb2rgb_defaults = {
- .coef_rr = {1, 0}, /* 256 */
- .coef_gr = {0, 0},
- .coef_br = {0, 0},
- .coef_rg = {0, 0},
- .coef_gg = {1, 0}, /* 256 */
- .coef_bg = {0, 0},
- .coef_rb = {0, 0},
- .coef_gb = {0, 0},
- .coef_bb = {1, 0}, /* 256 */
- };
- /* Copy defaults for rgb2rgb conversion */
- memcpy(rgb2rgb, &rgb2rgb_defaults,
- sizeof(struct vpfe_ipipe_rgb2rgb));
- goto success;
- }
-
- memcpy(rgb2rgb, rgb2rgb_param, sizeof(struct vpfe_ipipe_rgb2rgb));
- if (ipipe_validate_rgb2rgb_params(rgb2rgb, id) < 0) {
- dev_err(dev, "Invalid rgb2rgb params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_rgb2rgb_regs(ipipe->base_addr, id, rgb2rgb);
-
- return 0;
-}
-
-static int
-ipipe_set_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
-}
-
-static int
-ipipe_set_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_set_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
-}
-
-static int ipipe_get_rgb2rgb_params(struct vpfe_ipipe_device *ipipe,
- unsigned int id, void *param)
-{
- struct vpfe_ipipe_rgb2rgb *rgb2rgb = &ipipe->config.rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb *rgb2rgb_param;
-
- rgb2rgb_param = param;
-
- if (id == IPIPE_RGB2RGB_2)
- rgb2rgb = &ipipe->config.rgb2rgb2;
-
- memcpy(rgb2rgb_param, rgb2rgb, sizeof(struct vpfe_ipipe_rgb2rgb));
-
- return 0;
-}
-
-static int
-ipipe_get_rgb2rgb_1_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_1, param);
-}
-
-static int
-ipipe_get_rgb2rgb_2_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- return ipipe_get_rgb2rgb_params(ipipe, IPIPE_RGB2RGB_2, param);
-}
-
-static int
-ipipe_validate_gamma_entry(struct vpfe_ipipe_gamma_entry *table, int size)
-{
- int i;
-
- if (!table)
- return -EINVAL;
-
- for (i = 0; i < size; i++)
- if (table[i].slope > GAMMA_MASK ||
- table[i].offset > GAMMA_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_validate_gamma_params(struct vpfe_ipipe_gamma *gamma, struct device *dev)
-{
- int table_size;
- int err;
-
- if (gamma->bypass_r > 1 ||
- gamma->bypass_b > 1 ||
- gamma->bypass_g > 1)
- return -EINVAL;
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return 0;
-
- table_size = gamma->tbl_size;
- if (!gamma->bypass_r) {
- err = ipipe_validate_gamma_entry(gamma->table_r, table_size);
- if (err) {
- dev_err(dev, "GAMMA R - table entry invalid\n");
- return err;
- }
- }
-
- if (!gamma->bypass_b) {
- err = ipipe_validate_gamma_entry(gamma->table_b, table_size);
- if (err) {
- dev_err(dev, "GAMMA B - table entry invalid\n");
- return err;
- }
- }
-
- if (!gamma->bypass_g) {
- err = ipipe_validate_gamma_entry(gamma->table_g, table_size);
- if (err) {
- dev_err(dev, "GAMMA G - table entry invalid\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static int
-ipipe_set_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gamma *gamma_param = param;
- struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- int table_size;
-
- if (!gamma_param) {
- memset(gamma, 0, sizeof(struct vpfe_ipipe_gamma));
- gamma->tbl_sel = VPFE_IPIPE_GAMMA_TBL_ROM;
- goto success;
- }
-
- gamma->bypass_r = gamma_param->bypass_r;
- gamma->bypass_b = gamma_param->bypass_b;
- gamma->bypass_g = gamma_param->bypass_g;
- gamma->tbl_sel = gamma_param->tbl_sel;
- gamma->tbl_size = gamma_param->tbl_size;
-
- if (ipipe_validate_gamma_params(gamma, dev) < 0)
- return -EINVAL;
-
- if (gamma_param->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- goto success;
-
- table_size = gamma->tbl_size;
- if (!gamma_param->bypass_r)
- memcpy(&gamma->table_r, &gamma_param->table_r,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma_param->bypass_b)
- memcpy(&gamma->table_b, &gamma_param->table_b,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma_param->bypass_g)
- memcpy(&gamma->table_g, &gamma_param->table_g,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
-success:
- ipipe_set_gamma_regs(ipipe->base_addr, ipipe->isp5_base_addr, gamma);
-
- return 0;
-}
-
-static int ipipe_get_gamma_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gamma *gamma_param = param;
- struct vpfe_ipipe_gamma *gamma = &ipipe->config.gamma;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- int table_size;
-
- gamma_param->bypass_r = gamma->bypass_r;
- gamma_param->bypass_g = gamma->bypass_g;
- gamma_param->bypass_b = gamma->bypass_b;
- gamma_param->tbl_sel = gamma->tbl_sel;
- gamma_param->tbl_size = gamma->tbl_size;
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return 0;
-
- table_size = gamma->tbl_size;
-
- if (!gamma->bypass_r) {
- dev_err(dev,
- "%s: table ptr empty for R\n", __func__);
- return -EINVAL;
- }
- memcpy(gamma_param->table_r, gamma->table_r,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma->bypass_g) {
- dev_err(dev, "%s: table ptr empty for G\n", __func__);
- return -EINVAL;
- }
- memcpy(gamma_param->table_g, gamma->table_g,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- if (!gamma->bypass_b) {
- dev_err(dev, "%s: table ptr empty for B\n", __func__);
- return -EINVAL;
- }
- memcpy(gamma_param->table_b, gamma->table_b,
- (table_size * sizeof(struct vpfe_ipipe_gamma_entry)));
-
- return 0;
-}
-
-static int ipipe_validate_3d_lut_params(struct vpfe_ipipe_3d_lut *lut)
-{
- int i;
-
- if (!lut->en)
- return 0;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++)
- if (lut->table[i].r > D3_LUT_ENTRY_MASK ||
- lut->table[i].g > D3_LUT_ENTRY_MASK ||
- lut->table[i].b > D3_LUT_ENTRY_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_get_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_3d_lut *lut_param = param;
- struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
-
- lut_param->en = lut->en;
-
- memcpy(lut_param->table, &lut->table,
- (VPFE_IPIPE_MAX_SIZE_3D_LUT *
- sizeof(struct vpfe_ipipe_3d_lut_entry)));
-
- return 0;
-}
-
-static int
-ipipe_set_3d_lut_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_3d_lut *lut_param = param;
- struct vpfe_ipipe_3d_lut *lut = &ipipe->config.lut;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- if (!lut_param) {
- memset(lut, 0, sizeof(struct vpfe_ipipe_3d_lut));
- goto success;
- }
-
- memcpy(lut, lut_param, sizeof(struct vpfe_ipipe_3d_lut));
- if (ipipe_validate_3d_lut_params(lut) < 0) {
- dev_err(dev, "Invalid 3D-LUT Params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_3d_lut_regs(ipipe->base_addr, ipipe->isp5_base_addr, lut);
-
- return 0;
-}
-
-static int ipipe_validate_rgb2yuv_params(struct vpfe_ipipe_rgb2yuv *rgb2yuv)
-{
- if (rgb2yuv->coef_ry.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_ry.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gy.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gy.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_by.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_by.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_rcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_rcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_bcb.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_bcb.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_rcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_rcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_gcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_gcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->coef_bcr.decimal > RGB2YCBCR_COEF_DECI_MASK ||
- rgb2yuv->coef_bcr.integer > RGB2YCBCR_COEF_INT_MASK)
- return -EINVAL;
-
- if (rgb2yuv->out_ofst_y > RGB2YCBCR_OFST_MASK ||
- rgb2yuv->out_ofst_cb > RGB2YCBCR_OFST_MASK ||
- rgb2yuv->out_ofst_cr > RGB2YCBCR_OFST_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_set_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
-
- rgb2yuv_param = param;
- if (!rgb2yuv_param) {
- /* Defaults for rgb2yuv conversion */
- const struct vpfe_ipipe_rgb2yuv rgb2yuv_defaults = {
- .coef_ry = {0, 0x4d},
- .coef_gy = {0, 0x96},
- .coef_by = {0, 0x1d},
- .coef_rcb = {0xf, 0xd5},
- .coef_gcb = {0xf, 0xab},
- .coef_bcb = {0, 0x80},
- .coef_rcr = {0, 0x80},
- .coef_gcr = {0xf, 0x95},
- .coef_bcr = {0xf, 0xeb},
- .out_ofst_cb = 0x80,
- .out_ofst_cr = 0x80,
- };
- /* Copy defaults for rgb2yuv conversion */
- memcpy(rgb2yuv, &rgb2yuv_defaults,
- sizeof(struct vpfe_ipipe_rgb2yuv));
- goto success;
- }
-
- memcpy(rgb2yuv, rgb2yuv_param, sizeof(struct vpfe_ipipe_rgb2yuv));
- if (ipipe_validate_rgb2yuv_params(rgb2yuv) < 0) {
- dev_err(dev, "Invalid rgb2yuv params\n");
- return -EINVAL;
- }
-
-success:
- ipipe_set_rgb2ycbcr_regs(ipipe->base_addr, rgb2yuv);
-
- return 0;
-}
-
-static int
-ipipe_get_rgb2yuv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_rgb2yuv *rgb2yuv = &ipipe->config.rgb2yuv;
- struct vpfe_ipipe_rgb2yuv *rgb2yuv_param;
-
- rgb2yuv_param = param;
- memcpy(rgb2yuv_param, rgb2yuv, sizeof(struct vpfe_ipipe_rgb2yuv));
- return 0;
-}
-
-static int ipipe_validate_gbce_params(struct vpfe_ipipe_gbce *gbce)
-{
- u32 max = GBCE_Y_VAL_MASK;
- int i;
-
- if (!gbce->en)
- return 0;
-
- if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
- max = GBCE_GAIN_VAL_MASK;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; i++)
- if (gbce->table[i] > max)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gbce *gbce_param = param;
- struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- if (!gbce_param) {
- memset(gbce, 0, sizeof(struct vpfe_ipipe_gbce));
- } else {
- memcpy(gbce, gbce_param, sizeof(struct vpfe_ipipe_gbce));
- if (ipipe_validate_gbce_params(gbce) < 0) {
- dev_err(dev, "Invalid gbce params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_gbce_regs(ipipe->base_addr, ipipe->isp5_base_addr, gbce);
-
- return 0;
-}
-
-static int ipipe_get_gbce_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_gbce *gbce_param = param;
- struct vpfe_ipipe_gbce *gbce = &ipipe->config.gbce;
-
- gbce_param->en = gbce->en;
- gbce_param->type = gbce->type;
-
- memcpy(gbce_param->table, gbce->table,
- (VPFE_IPIPE_MAX_SIZE_GBCE_LUT * sizeof(unsigned short)));
-
- return 0;
-}
-
-static int
-ipipe_validate_yuv422_conv_params(struct vpfe_ipipe_yuv422_conv *yuv422_conv)
-{
- if (yuv422_conv->en_chrom_lpf > 1)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-ipipe_set_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
- struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
-
- yuv422_conv_param = param;
- if (!yuv422_conv_param) {
- memset(yuv422_conv, 0, sizeof(struct vpfe_ipipe_yuv422_conv));
- yuv422_conv->chrom_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE;
- } else {
- memcpy(yuv422_conv, yuv422_conv_param,
- sizeof(struct vpfe_ipipe_yuv422_conv));
- if (ipipe_validate_yuv422_conv_params(yuv422_conv) < 0) {
- dev_err(dev, "Invalid yuv422 params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_yuv422_conv_regs(ipipe->base_addr, yuv422_conv);
-
- return 0;
-}
-
-static int
-ipipe_get_yuv422_conv_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yuv422_conv *yuv422_conv = &ipipe->config.yuv422_conv;
- struct vpfe_ipipe_yuv422_conv *yuv422_conv_param;
-
- yuv422_conv_param = param;
- memcpy(yuv422_conv_param, yuv422_conv,
- sizeof(struct vpfe_ipipe_yuv422_conv));
-
- return 0;
-}
-
-static int ipipe_validate_yee_params(struct vpfe_ipipe_yee *yee)
-{
- int i;
-
- if (yee->en > 1 ||
- yee->en_halo_red > 1 ||
- yee->hpf_shft > YEE_HPF_SHIFT_MASK)
- return -EINVAL;
-
- if (yee->hpf_coef_00 > YEE_COEF_MASK ||
- yee->hpf_coef_01 > YEE_COEF_MASK ||
- yee->hpf_coef_02 > YEE_COEF_MASK ||
- yee->hpf_coef_10 > YEE_COEF_MASK ||
- yee->hpf_coef_11 > YEE_COEF_MASK ||
- yee->hpf_coef_12 > YEE_COEF_MASK ||
- yee->hpf_coef_20 > YEE_COEF_MASK ||
- yee->hpf_coef_21 > YEE_COEF_MASK ||
- yee->hpf_coef_22 > YEE_COEF_MASK)
- return -EINVAL;
-
- if (yee->yee_thr > YEE_THR_MASK ||
- yee->es_gain > YEE_ES_GAIN_MASK ||
- yee->es_thr1 > YEE_ES_THR1_MASK ||
- yee->es_thr2 > YEE_THR_MASK ||
- yee->es_gain_grad > YEE_THR_MASK ||
- yee->es_ofst_grad > YEE_THR_MASK)
- return -EINVAL;
-
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_YEE_LUT; i++)
- if (yee->table[i] > YEE_ENTRY_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yee *yee_param = param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
-
- if (!yee_param) {
- memset(yee, 0, sizeof(struct vpfe_ipipe_yee));
- } else {
- memcpy(yee, yee_param, sizeof(struct vpfe_ipipe_yee));
- if (ipipe_validate_yee_params(yee) < 0) {
- dev_err(dev, "Invalid yee params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_ee_regs(ipipe->base_addr, ipipe->isp5_base_addr, yee);
-
- return 0;
-}
-
-static int ipipe_get_yee_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_yee *yee_param = param;
- struct vpfe_ipipe_yee *yee = &ipipe->config.yee;
-
- yee_param->en = yee->en;
- yee_param->en_halo_red = yee->en_halo_red;
- yee_param->merge_meth = yee->merge_meth;
- yee_param->hpf_shft = yee->hpf_shft;
- yee_param->hpf_coef_00 = yee->hpf_coef_00;
- yee_param->hpf_coef_01 = yee->hpf_coef_01;
- yee_param->hpf_coef_02 = yee->hpf_coef_02;
- yee_param->hpf_coef_10 = yee->hpf_coef_10;
- yee_param->hpf_coef_11 = yee->hpf_coef_11;
- yee_param->hpf_coef_12 = yee->hpf_coef_12;
- yee_param->hpf_coef_20 = yee->hpf_coef_20;
- yee_param->hpf_coef_21 = yee->hpf_coef_21;
- yee_param->hpf_coef_22 = yee->hpf_coef_22;
- yee_param->yee_thr = yee->yee_thr;
- yee_param->es_gain = yee->es_gain;
- yee_param->es_thr1 = yee->es_thr1;
- yee_param->es_thr2 = yee->es_thr2;
- yee_param->es_gain_grad = yee->es_gain_grad;
- yee_param->es_ofst_grad = yee->es_ofst_grad;
- memcpy(yee_param->table, &yee->table,
- (VPFE_IPIPE_MAX_SIZE_YEE_LUT * sizeof(short)));
-
- return 0;
-}
-
-static int ipipe_validate_car_params(struct vpfe_ipipe_car *car)
-{
- if (car->en > 1 || car->hpf_shft > CAR_HPF_SHIFT_MASK ||
- car->gain1.shft > CAR_GAIN1_SHFT_MASK ||
- car->gain1.gain_min > CAR_GAIN_MIN_MASK ||
- car->gain2.shft > CAR_GAIN2_SHFT_MASK ||
- car->gain2.gain_min > CAR_GAIN_MIN_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_car_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_car *car_param = param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_car *car = &ipipe->config.car;
-
- if (!car_param) {
- memset(car, 0, sizeof(struct vpfe_ipipe_car));
- } else {
- memcpy(car, car_param, sizeof(struct vpfe_ipipe_car));
- if (ipipe_validate_car_params(car) < 0) {
- dev_err(dev, "Invalid car params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_car_regs(ipipe->base_addr, car);
-
- return 0;
-}
-
-static int ipipe_get_car_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_car *car_param = param;
- struct vpfe_ipipe_car *car = &ipipe->config.car;
-
- memcpy(car_param, car, sizeof(struct vpfe_ipipe_car));
- return 0;
-}
-
-static int ipipe_validate_cgs_params(struct vpfe_ipipe_cgs *cgs)
-{
- if (cgs->en > 1 || cgs->h_shft > CAR_SHIFT_MASK)
- return -EINVAL;
-
- return 0;
-}
-
-static int ipipe_set_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cgs *cgs_param = param;
- struct device *dev = ipipe->subdev.v4l2_dev->dev;
- struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
-
- if (!cgs_param) {
- memset(cgs, 0, sizeof(struct vpfe_ipipe_cgs));
- } else {
- memcpy(cgs, cgs_param, sizeof(struct vpfe_ipipe_cgs));
- if (ipipe_validate_cgs_params(cgs) < 0) {
- dev_err(dev, "Invalid cgs params\n");
- return -EINVAL;
- }
- }
-
- ipipe_set_cgs_regs(ipipe->base_addr, cgs);
-
- return 0;
-}
-
-static int ipipe_get_cgs_params(struct vpfe_ipipe_device *ipipe, void *param)
-{
- struct vpfe_ipipe_cgs *cgs_param = param;
- struct vpfe_ipipe_cgs *cgs = &ipipe->config.cgs;
-
- memcpy(cgs_param, cgs, sizeof(struct vpfe_ipipe_cgs));
-
- return 0;
-}
-
-static const struct ipipe_module_if ipipe_modules[VPFE_IPIPE_MAX_MODULES] = {
- /* VPFE_IPIPE_INPUT_CONFIG */ {
- offsetof(struct ipipe_module_params, input_config),
- FIELD_SIZEOF(struct ipipe_module_params, input_config),
- offsetof(struct vpfe_ipipe_config, input_config),
- ipipe_set_input_config,
- ipipe_get_input_config,
- }, /* VPFE_IPIPE_LUTDPC */ {
- offsetof(struct ipipe_module_params, lutdpc),
- FIELD_SIZEOF(struct ipipe_module_params, lutdpc),
- offsetof(struct vpfe_ipipe_config, lutdpc),
- ipipe_set_lutdpc_params,
- ipipe_get_lutdpc_params,
- }, /* VPFE_IPIPE_OTFDPC */ {
- offsetof(struct ipipe_module_params, otfdpc),
- FIELD_SIZEOF(struct ipipe_module_params, otfdpc),
- offsetof(struct vpfe_ipipe_config, otfdpc),
- ipipe_set_otfdpc_params,
- ipipe_get_otfdpc_params,
- }, /* VPFE_IPIPE_NF1 */ {
- offsetof(struct ipipe_module_params, nf1),
- FIELD_SIZEOF(struct ipipe_module_params, nf1),
- offsetof(struct vpfe_ipipe_config, nf1),
- ipipe_set_nf1_params,
- ipipe_get_nf1_params,
- }, /* VPFE_IPIPE_NF2 */ {
- offsetof(struct ipipe_module_params, nf2),
- FIELD_SIZEOF(struct ipipe_module_params, nf2),
- offsetof(struct vpfe_ipipe_config, nf2),
- ipipe_set_nf2_params,
- ipipe_get_nf2_params,
- }, /* VPFE_IPIPE_WB */ {
- offsetof(struct ipipe_module_params, wbal),
- FIELD_SIZEOF(struct ipipe_module_params, wbal),
- offsetof(struct vpfe_ipipe_config, wbal),
- ipipe_set_wb_params,
- ipipe_get_wb_params,
- }, /* VPFE_IPIPE_RGB2RGB_1 */ {
- offsetof(struct ipipe_module_params, rgb2rgb1),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb1),
- offsetof(struct vpfe_ipipe_config, rgb2rgb1),
- ipipe_set_rgb2rgb_1_params,
- ipipe_get_rgb2rgb_1_params,
- }, /* VPFE_IPIPE_RGB2RGB_2 */ {
- offsetof(struct ipipe_module_params, rgb2rgb2),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2rgb2),
- offsetof(struct vpfe_ipipe_config, rgb2rgb2),
- ipipe_set_rgb2rgb_2_params,
- ipipe_get_rgb2rgb_2_params,
- }, /* VPFE_IPIPE_GAMMA */ {
- offsetof(struct ipipe_module_params, gamma),
- FIELD_SIZEOF(struct ipipe_module_params, gamma),
- offsetof(struct vpfe_ipipe_config, gamma),
- ipipe_set_gamma_params,
- ipipe_get_gamma_params,
- }, /* VPFE_IPIPE_3D_LUT */ {
- offsetof(struct ipipe_module_params, lut),
- FIELD_SIZEOF(struct ipipe_module_params, lut),
- offsetof(struct vpfe_ipipe_config, lut),
- ipipe_set_3d_lut_params,
- ipipe_get_3d_lut_params,
- }, /* VPFE_IPIPE_RGB2YUV */ {
- offsetof(struct ipipe_module_params, rgb2yuv),
- FIELD_SIZEOF(struct ipipe_module_params, rgb2yuv),
- offsetof(struct vpfe_ipipe_config, rgb2yuv),
- ipipe_set_rgb2yuv_params,
- ipipe_get_rgb2yuv_params,
- }, /* VPFE_IPIPE_YUV422_CONV */ {
- offsetof(struct ipipe_module_params, yuv422_conv),
- FIELD_SIZEOF(struct ipipe_module_params, yuv422_conv),
- offsetof(struct vpfe_ipipe_config, yuv422_conv),
- ipipe_set_yuv422_conv_params,
- ipipe_get_yuv422_conv_params,
- }, /* VPFE_IPIPE_YEE */ {
- offsetof(struct ipipe_module_params, yee),
- FIELD_SIZEOF(struct ipipe_module_params, yee),
- offsetof(struct vpfe_ipipe_config, yee),
- ipipe_set_yee_params,
- ipipe_get_yee_params,
- }, /* VPFE_IPIPE_GIC */ {
- offsetof(struct ipipe_module_params, gic),
- FIELD_SIZEOF(struct ipipe_module_params, gic),
- offsetof(struct vpfe_ipipe_config, gic),
- ipipe_set_gic_params,
- ipipe_get_gic_params,
- }, /* VPFE_IPIPE_CFA */ {
- offsetof(struct ipipe_module_params, cfa),
- FIELD_SIZEOF(struct ipipe_module_params, cfa),
- offsetof(struct vpfe_ipipe_config, cfa),
- ipipe_set_cfa_params,
- ipipe_get_cfa_params,
- }, /* VPFE_IPIPE_CAR */ {
- offsetof(struct ipipe_module_params, car),
- FIELD_SIZEOF(struct ipipe_module_params, car),
- offsetof(struct vpfe_ipipe_config, car),
- ipipe_set_car_params,
- ipipe_get_car_params,
- }, /* VPFE_IPIPE_CGS */ {
- offsetof(struct ipipe_module_params, cgs),
- FIELD_SIZEOF(struct ipipe_module_params, cgs),
- offsetof(struct vpfe_ipipe_config, cgs),
- ipipe_set_cgs_params,
- ipipe_get_cgs_params,
- }, /* VPFE_IPIPE_GBCE */ {
- offsetof(struct ipipe_module_params, gbce),
- FIELD_SIZEOF(struct ipipe_module_params, gbce),
- offsetof(struct vpfe_ipipe_config, gbce),
- ipipe_set_gbce_params,
- ipipe_get_gbce_params,
- },
-};
-
-static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- unsigned int i;
- int rval = 0;
- struct ipipe_module_params *params;
-
- for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
- const struct ipipe_module_if *module_if;
- void *from, *to;
- size_t size;
-
- if (!(cfg->flag & BIT(i)))
- continue;
-
- module_if = &ipipe_modules[i];
- from = *(void **)((void *)cfg + module_if->config_offset);
-
- params = kmalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
- to = (void *)params + module_if->param_offset;
- size = module_if->param_size;
-
- if (to && from && size) {
- if (copy_from_user(to, (void __user *)from, size)) {
- rval = -EFAULT;
- goto error_free;
- }
- rval = module_if->set(ipipe, to);
- if (rval)
- goto error_free;
- } else if (to && !from && size) {
- rval = module_if->set(ipipe, NULL);
- if (rval)
- goto error_free;
- }
- kfree(params);
- }
- return rval;
-
-error_free:
- kfree(params);
- return rval;
-}
-
-static int ipipe_g_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- unsigned int i;
- int rval = 0;
-
- for (i = 1; i < ARRAY_SIZE(ipipe_modules); i++) {
- const struct ipipe_module_if *module_if;
- struct ipipe_module_params *params;
- void *from, *to;
- size_t size;
-
- if (!(cfg->flag & BIT(i)))
- continue;
-
- module_if = &ipipe_modules[i];
- to = *(void **)((void *)cfg + module_if->config_offset);
-
- params = kmalloc(sizeof(*params), GFP_KERNEL);
- from = (void *)params + module_if->param_offset;
- size = module_if->param_size;
-
- if (to && from && size) {
- rval = module_if->get(ipipe, from);
- if (rval)
- goto error;
- if (copy_to_user((void __user *)to, from, size)) {
- rval = -EFAULT;
- break;
- }
- }
- kfree(params);
- }
-error:
- return rval;
-}
-
-/*
- * ipipe_ioctl() - Handle ipipe module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case VIDIOC_VPFE_IPIPE_S_CONFIG:
- return ipipe_s_config(sd, arg);
-
- case VIDIOC_VPFE_IPIPE_G_CONFIG:
- return ipipe_g_config(sd, arg);
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- struct vpfe_ipipe_device *ipipe = &vpfe_dev->vpfe_ipipe;
- unsigned char val;
-
- if (ipipe->input == IPIPE_INPUT_NONE)
- return;
-
- /* ipipe is set to single shot */
- if (ipipeif->input == IPIPEIF_INPUT_MEMORY && en) {
- /* for single-shot mode, need to wait for h/w to
- * reset many register bits
- */
- do {
- val = regr_ip(vpfe_dev->vpfe_ipipe.base_addr,
- IPIPE_SRC_EN);
- } while (val);
- }
- regw_ip(vpfe_dev->vpfe_ipipe.base_addr, en, IPIPE_SRC_EN);
-}
-
-/*
- * ipipe_set_stream() - Enable/Disable streaming on the ipipe subdevice
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
-
- if (enable && ipipe->input != IPIPE_INPUT_NONE &&
- ipipe->output != IPIPE_OUTPUT_NONE) {
- if (config_ipipe_hw(ipipe) < 0)
- return -EINVAL;
- }
-
- vpfe_ipipe_enable(vpfe_dev, enable);
-
- return 0;
-}
-
-/*
- * __ipipe_get_format() - helper function for getting ipipe format
- * @ipipe: pointer to ipipe private structure.
- * @pad: pad number.
- * @cfg: V4L2 subdev pad config
- * @which: wanted subdev format.
- *
- */
-static struct v4l2_mbus_framefmt *
-__ipipe_get_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, cfg, pad);
-
- return &ipipe->formats[pad];
-}
-
-/*
- * ipipe_try_format() - Handle try format by pad subdev method
- * @ipipe: VPFE ipipe device.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which : wanted subdev format
- */
-static void
-ipipe_try_format(struct vpfe_ipipe_device *ipipe,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- if (pad == IPIPE_PAD_SINK) {
- for (i = 0; i < ARRAY_SIZE(ipipe_input_fmts); i++)
- if (fmt->code == ipipe_input_fmts[i])
- break;
-
- /* If not found, use SBGGR10 as default */
- if (i >= ARRAY_SIZE(ipipe_input_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
- } else if (pad == IPIPE_PAD_SOURCE) {
- for (i = 0; i < ARRAY_SIZE(ipipe_output_fmts); i++)
- if (fmt->code == ipipe_output_fmts[i])
- break;
-
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ipipe_output_fmts))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
-}
-
-/*
- * ipipe_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int
-ipipe_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipe_get_format(ipipe, cfg, fmt->pad, fmt->which);
- if (!format)
- return -EINVAL;
-
- ipipe_try_format(ipipe, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == IPIPE_PAD_SINK &&
- (ipipe->input == IPIPE_INPUT_CCDC ||
- ipipe->input == IPIPE_INPUT_MEMORY))
- ipipe->formats[fmt->pad] = fmt->format;
- else if (fmt->pad == IPIPE_PAD_SOURCE &&
- ipipe->output == IPIPE_OUTPUT_RESIZER)
- ipipe->formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * ipipe_get_format() - Handle get format by pads subdev method.
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure.
- */
-static int
-ipipe_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- fmt->format = ipipe->formats[fmt->pad];
- else
- fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
-
- return 0;
-}
-
-/*
- * ipipe_enum_frame_size() - enum frame sizes on pads
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fse: pointer to v4l2_subdev_frame_size_enum structure.
- */
-static int
-ipipe_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipe_try_format(ipipe, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * ipipe_enum_mbus_code() - enum mbus codes for pads
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int
-ipipe_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case IPIPE_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipe_input_fmts))
- return -EINVAL;
- code->code = ipipe_input_fmts[code->index];
- break;
-
- case IPIPE_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(ipipe_output_fmts))
- return -EINVAL;
- code->code = ipipe_output_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipe_s_ctrl() - Handle set control subdev method
- * @ctrl: pointer to v4l2 control structure
- */
-static int ipipe_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_ipipe_device *ipipe =
- container_of(ctrl->handler, struct vpfe_ipipe_device, ctrls);
- struct ipipe_lum_adj *lum_adj = &ipipe->config.lum_adj;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- lum_adj->brightness = ctrl->val;
- ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
- break;
-
- case V4L2_CID_CONTRAST:
- lum_adj->contrast = ctrl->val;
- ipipe_set_lum_adj_regs(ipipe->base_addr, lum_adj);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipe_init_formats() - Initialize formats on all pads
- * @sd: pointer to v4l2 subdev structure.
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPE_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPE_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipe_set_format(sd, fh->pad, &format);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops ipipe_v4l2_core_ops = {
- .ioctl = ipipe_ioctl,
-};
-
-static const struct v4l2_ctrl_ops ipipe_ctrl_ops = {
- .s_ctrl = ipipe_s_ctrl,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
- .open = ipipe_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
- .s_stream = ipipe_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
- .enum_mbus_code = ipipe_enum_mbus_code,
- .enum_frame_size = ipipe_enum_frame_size,
- .get_fmt = ipipe_get_format,
- .set_fmt = ipipe_set_format,
-};
-
-/* v4l2 subdev operation */
-static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
- .core = &ipipe_v4l2_core_ops,
- .video = &ipipe_v4l2_video_ops,
- .pad = &ipipe_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * ipipe_link_setup() - Setup ipipe connections
- * @entity: ipipe media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int
-ipipe_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
-
- if (!is_media_entity_v4l2_subdev(remote->entity))
- return -EINVAL;
-
- switch (local->index) {
- case IPIPE_PAD_SINK:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipe->input = IPIPE_INPUT_NONE;
- break;
- }
- if (ipipe->input != IPIPE_INPUT_NONE)
- return -EBUSY;
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
- ipipe->input = IPIPE_INPUT_MEMORY;
- else
- ipipe->input = IPIPE_INPUT_CCDC;
- break;
-
- case IPIPE_PAD_SOURCE:
- /* out to RESIZER */
- if (flags & MEDIA_LNK_FL_ENABLED)
- ipipe->output = IPIPE_OUTPUT_RESIZER;
- else
- ipipe->output = IPIPE_OUTPUT_NONE;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations ipipe_media_ops = {
- .link_setup = ipipe_link_setup,
-};
-
-/*
- * vpfe_ipipe_unregister_entities() - ipipe unregister entity
- * @vpfe_ipipe: pointer to ipipe subdevice structure.
- */
-void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *vpfe_ipipe)
-{
- /* unregister subdev */
- v4l2_device_unregister_subdev(&vpfe_ipipe->subdev);
- /* cleanup entity */
- media_entity_cleanup(&vpfe_ipipe->subdev.entity);
-}
-
-/*
- * vpfe_ipipe_register_entities() - ipipe register entity
- * @ipipe: pointer to ipipe subdevice structure.
- * @vdev: pointer to v4l2 device structure.
- */
-int
-vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
- struct v4l2_device *vdev)
-{
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
- if (ret) {
- pr_err("Failed to register ipipe as v4l2 subdevice\n");
- return ret;
- }
-
- return ret;
-}
-
-#define IPIPE_CONTRAST_HIGH 0xff
-#define IPIPE_BRIGHT_HIGH 0xff
-
-/*
- * vpfe_ipipe_init() - ipipe module initialization.
- * @ipipe: pointer to ipipe subdevice structure.
- * @pdev: platform device pointer.
- */
-int
-vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
-{
- struct media_pad *pads = &ipipe->pads[0];
- struct v4l2_subdev *sd = &ipipe->subdev;
- struct media_entity *me = &sd->entity;
- struct resource *res, *res2, *memres;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (!res)
- return -ENOENT;
-
- memres = request_mem_region(res->start, resource_size(res), res->name);
- if (!memres)
- return -EBUSY;
- ipipe->base_addr = ioremap_nocache(memres->start,
- resource_size(memres));
- if (!ipipe->base_addr)
- goto error_release;
-
- res2 = platform_get_resource(pdev, IORESOURCE_MEM, 6);
- if (!res2)
- goto error_unmap;
- ipipe->isp5_base_addr = ioremap_nocache(res2->start,
- resource_size(res2));
- if (!ipipe->isp5_base_addr)
- goto error_unmap;
-
- v4l2_subdev_init(sd, &ipipe_v4l2_ops);
- sd->internal_ops = &ipipe_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI IPIPE", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, ipipe);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- ipipe->input = IPIPE_INPUT_NONE;
- ipipe->output = IPIPE_OUTPUT_NONE;
-
- me->ops = &ipipe_media_ops;
- v4l2_ctrl_handler_init(&ipipe->ctrls, 2);
- v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0,
- IPIPE_BRIGHT_HIGH, 1, 16);
- v4l2_ctrl_new_std(&ipipe->ctrls, &ipipe_ctrl_ops,
- V4L2_CID_CONTRAST, 0,
- IPIPE_CONTRAST_HIGH, 1, 16);
- v4l2_ctrl_handler_setup(&ipipe->ctrls);
- sd->ctrl_handler = &ipipe->ctrls;
-
- return media_entity_pads_init(me, IPIPE_PADS_NUM, pads);
-
-error_unmap:
- iounmap(ipipe->base_addr);
-error_release:
- release_mem_region(memres->start, resource_size(memres));
- return -ENOMEM;
-}
-
-/*
- * vpfe_ipipe_cleanup() - ipipe subdevice cleanup.
- * @ipipe: pointer to ipipe subdevice
- * @dev: pointer to platform device
- */
-void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- v4l2_ctrl_handler_free(&ipipe->ctrls);
-
- iounmap(ipipe->base_addr);
- iounmap(ipipe->isp5_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res)
- release_mem_region(res->start, resource_size(res));
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
deleted file mode 100644
index 866ae12aeb07..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPE_H
-#define _DAVINCI_VPFE_DM365_IPIPE_H
-
-#include <linux/platform_device.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#include "davinci_vpfe_user.h"
-#include "vpfe_video.h"
-
-enum ipipe_noise_filter {
- IPIPE_D2F_1ST = 0,
- IPIPE_D2F_2ND = 1,
-};
-
-/* Used for driver storage */
-struct ipipe_otfdpc_2_0 {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- struct vpfe_ipipe_otfdpc_2_0_cfg otfdpc_2_0;
-};
-
-struct ipipe_otfdpc_3_0 {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* defect detection method */
- enum vpfe_ipipe_otfdpc_det_meth det_method;
- /* Algorithm used. Applicable only when IPIPE_DPC_OTF_MIN_MAX2 is
- * used
- */
- enum vpfe_ipipe_otfdpc_alg alg;
- struct vpfe_ipipe_otfdpc_3_0_cfg otfdpc_3_0;
-};
-
-/* Structure for configuring Luminance Adjustment module */
-struct ipipe_lum_adj {
- /* Brightness adjustments */
- unsigned char brightness;
- /* contrast adjustments */
- unsigned char contrast;
-};
-
-enum ipipe_rgb2rgb {
- IPIPE_RGB2RGB_1 = 0,
- IPIPE_RGB2RGB_2 = 1,
-};
-
-struct ipipe_module_params {
- __u32 flag;
- struct vpfe_ipipe_input_config input_config;
- struct vpfe_ipipe_lutdpc lutdpc;
- struct vpfe_ipipe_otfdpc otfdpc;
- struct vpfe_ipipe_nf nf1;
- struct vpfe_ipipe_nf nf2;
- struct vpfe_ipipe_gic gic;
- struct vpfe_ipipe_wb wbal;
- struct vpfe_ipipe_cfa cfa;
- struct vpfe_ipipe_rgb2rgb rgb2rgb1;
- struct vpfe_ipipe_rgb2rgb rgb2rgb2;
- struct vpfe_ipipe_gamma gamma;
- struct vpfe_ipipe_3d_lut lut;
- struct vpfe_ipipe_rgb2yuv rgb2yuv;
- struct vpfe_ipipe_gbce gbce;
- struct vpfe_ipipe_yuv422_conv yuv422_conv;
- struct vpfe_ipipe_yee yee;
- struct vpfe_ipipe_car car;
- struct vpfe_ipipe_cgs cgs;
- struct ipipe_lum_adj lum_adj;
-};
-
-#define IPIPE_PAD_SINK 0
-#define IPIPE_PAD_SOURCE 1
-
-#define IPIPE_PADS_NUM 2
-
-#define IPIPE_OUTPUT_NONE 0
-#define IPIPE_OUTPUT_RESIZER (1 << 0)
-
-enum ipipe_input_entity {
- IPIPE_INPUT_NONE = 0,
- IPIPE_INPUT_MEMORY = 1,
- IPIPE_INPUT_CCDC = 2,
-};
-
-
-struct vpfe_ipipe_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPE_PADS_NUM];
- struct v4l2_mbus_framefmt formats[IPIPE_PADS_NUM];
- enum ipipe_input_entity input;
- unsigned int output;
- struct v4l2_ctrl_handler ctrls;
- void __iomem *base_addr;
- void __iomem *isp5_base_addr;
- struct ipipe_module_params config;
-};
-
-struct ipipe_module_if {
- unsigned int param_offset;
- unsigned int param_size;
- unsigned int config_offset;
- int (*set)(struct vpfe_ipipe_device *ipipe, void *param);
- int (*get)(struct vpfe_ipipe_device *ipipe, void *param);
-};
-
-/* data paths */
-enum ipipe_data_paths {
- IPIPE_RAW2YUV,
- /* Bayer RAW input to YCbCr output */
- IPIPE_RAW2RAW,
- /* Bayer Raw to Bayer output */
- IPIPE_RAW2BOX,
- /* Bayer Raw to Boxcar output */
- IPIPE_YUV2YUV
- /* YUV Raw to YUV Raw output */
-};
-
-#define IPIPE_COLPTN_R_Ye 0x0
-#define IPIPE_COLPTN_Gr_Cy 0x1
-#define IPIPE_COLPTN_Gb_G 0x2
-#define IPIPE_COLPTN_B_Mg 0x3
-
-#define COLPAT_EE_SHIFT 0
-#define COLPAT_EO_SHIFT 2
-#define COLPAT_OE_SHIFT 4
-#define COLPAT_OO_SHIFT 6
-
-#define ipipe_sgrbg_pattern \
- (IPIPE_COLPTN_Gr_Cy << COLPAT_EE_SHIFT | \
- IPIPE_COLPTN_R_Ye << COLPAT_EO_SHIFT | \
- IPIPE_COLPTN_B_Mg << COLPAT_OE_SHIFT | \
- IPIPE_COLPTN_Gb_G << COLPAT_OO_SHIFT)
-
-#define ipipe_srggb_pattern \
- (IPIPE_COLPTN_R_Ye << COLPAT_EE_SHIFT | \
- IPIPE_COLPTN_Gr_Cy << COLPAT_EO_SHIFT | \
- IPIPE_COLPTN_Gb_G << COLPAT_OE_SHIFT | \
- IPIPE_COLPTN_B_Mg << COLPAT_OO_SHIFT)
-
-int vpfe_ipipe_register_entities(struct vpfe_ipipe_device *ipipe,
- struct v4l2_device *v4l2_dev);
-int vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev);
-void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *ipipe);
-void vpfe_ipipe_cleanup(struct vpfe_ipipe_device *ipipe,
- struct platform_device *pdev);
-void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPE_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
deleted file mode 100644
index 110473c30577..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ /dev/null
@@ -1,1038 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include "dm365_ipipe_hw.h"
-
-#define IPIPE_MODE_CONTINUOUS 0
-#define IPIPE_MODE_SINGLE_SHOT 1
-
-static void ipipe_clock_enable(void __iomem *base_addr)
-{
- /* enable IPIPE MMR for register write access */
- regw_ip(base_addr, IPIPE_GCK_MMR_DEFAULT, IPIPE_GCK_MMR);
-
- /* enable the clock wb,cfa,dfc,d2f,pre modules */
- regw_ip(base_addr, IPIPE_GCK_PIX_DEFAULT, IPIPE_GCK_PIX);
-}
-
-static void
-rsz_set_common_params(void __iomem *rsz_base, struct resizer_params *params)
-{
- struct rsz_common_params *rsz_common = &params->rsz_common;
- u32 val;
-
- /* Set mode */
- regw_rsz(rsz_base, params->oper_mode, RSZ_SRC_MODE);
-
- /* data source selection and bypass */
- val = (rsz_common->passthrough << RSZ_BYPASS_SHIFT) |
- rsz_common->source;
- regw_rsz(rsz_base, val, RSZ_SRC_FMT0);
-
- /* src image selection */
- val = (rsz_common->raw_flip & 1) |
- (rsz_common->src_img_fmt << RSZ_SRC_IMG_FMT_SHIFT) |
- ((rsz_common->y_c & 1) << RSZ_SRC_Y_C_SEL_SHIFT);
- regw_rsz(rsz_base, val, RSZ_SRC_FMT1);
-
- regw_rsz(rsz_base, rsz_common->vps & IPIPE_RSZ_VPS_MASK, RSZ_SRC_VPS);
- regw_rsz(rsz_base, rsz_common->hps & IPIPE_RSZ_HPS_MASK, RSZ_SRC_HPS);
- regw_rsz(rsz_base, rsz_common->vsz & IPIPE_RSZ_VSZ_MASK, RSZ_SRC_VSZ);
- regw_rsz(rsz_base, rsz_common->hsz & IPIPE_RSZ_HSZ_MASK, RSZ_SRC_HSZ);
- regw_rsz(rsz_base, rsz_common->yuv_y_min, RSZ_YUV_Y_MIN);
- regw_rsz(rsz_base, rsz_common->yuv_y_max, RSZ_YUV_Y_MAX);
- regw_rsz(rsz_base, rsz_common->yuv_c_min, RSZ_YUV_C_MIN);
- regw_rsz(rsz_base, rsz_common->yuv_c_max, RSZ_YUV_C_MAX);
- /* chromatic position */
- regw_rsz(rsz_base, rsz_common->out_chr_pos, RSZ_YUV_PHS);
-}
-
-static void
-rsz_set_rsz_regs(void __iomem *rsz_base, unsigned int rsz_id,
- struct resizer_params *params)
-{
- struct resizer_scale_param *rsc_params;
- struct rsz_ext_mem_param *ext_mem;
- struct resizer_rgb *rgb;
- u32 reg_base;
- u32 val;
-
- rsc_params = &params->rsz_rsc_param[rsz_id];
- rgb = &params->rsz2rgb[rsz_id];
- ext_mem = &params->ext_mem_param[rsz_id];
-
- if (rsz_id == RSZ_A) {
- val = rsc_params->h_flip << RSZA_H_FLIP_SHIFT;
- val |= rsc_params->v_flip << RSZA_V_FLIP_SHIFT;
- reg_base = RSZ_EN_A;
- } else {
- val = rsc_params->h_flip << RSZB_H_FLIP_SHIFT;
- val |= rsc_params->v_flip << RSZB_V_FLIP_SHIFT;
- reg_base = RSZ_EN_B;
- }
- /* update flip settings */
- regw_rsz(rsz_base, val, RSZ_SEQ);
-
- regw_rsz(rsz_base, params->oper_mode, reg_base + RSZ_MODE);
-
- val = (rsc_params->cen << RSZ_CEN_SHIFT) | rsc_params->yen;
- regw_rsz(rsz_base, val, reg_base + RSZ_420);
-
- regw_rsz(rsz_base, rsc_params->i_vps & RSZ_VPS_MASK,
- reg_base + RSZ_I_VPS);
- regw_rsz(rsz_base, rsc_params->i_hps & RSZ_HPS_MASK,
- reg_base + RSZ_I_HPS);
- regw_rsz(rsz_base, rsc_params->o_vsz & RSZ_O_VSZ_MASK,
- reg_base + RSZ_O_VSZ);
- regw_rsz(rsz_base, rsc_params->o_hsz & RSZ_O_HSZ_MASK,
- reg_base + RSZ_O_HSZ);
- regw_rsz(rsz_base, rsc_params->v_phs_y & RSZ_V_PHS_MASK,
- reg_base + RSZ_V_PHS_Y);
- regw_rsz(rsz_base, rsc_params->v_phs_c & RSZ_V_PHS_MASK,
- reg_base + RSZ_V_PHS_C);
-
- /* keep this additional adjustment to zero for now */
- regw_rsz(rsz_base, rsc_params->v_dif & RSZ_V_DIF_MASK,
- reg_base + RSZ_V_DIF);
-
- val = (rsc_params->v_typ_y & 1) |
- ((rsc_params->v_typ_c & 1) << RSZ_TYP_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_V_TYP);
-
- val = (rsc_params->v_lpf_int_y & RSZ_LPF_INT_MASK) |
- ((rsc_params->v_lpf_int_c & RSZ_LPF_INT_MASK) <<
- RSZ_LPF_INT_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_V_LPF);
-
- regw_rsz(rsz_base, rsc_params->h_phs &
- RSZ_H_PHS_MASK, reg_base + RSZ_H_PHS);
-
- regw_rsz(rsz_base, 0, reg_base + RSZ_H_PHS_ADJ);
- regw_rsz(rsz_base, rsc_params->h_dif &
- RSZ_H_DIF_MASK, reg_base + RSZ_H_DIF);
-
- val = (rsc_params->h_typ_y & 1) |
- ((rsc_params->h_typ_c & 1) << RSZ_TYP_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_H_TYP);
-
- val = (rsc_params->h_lpf_int_y & RSZ_LPF_INT_MASK) |
- ((rsc_params->h_lpf_int_c & RSZ_LPF_INT_MASK) <<
- RSZ_LPF_INT_C_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_H_LPF);
-
- regw_rsz(rsz_base, rsc_params->dscale_en & 1, reg_base + RSZ_DWN_EN);
-
- val = (rsc_params->h_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) |
- ((rsc_params->v_dscale_ave_sz & RSZ_DWN_SCALE_AV_SZ_MASK) <<
- RSZ_DWN_SCALE_AV_SZ_V_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_DWN_AV);
-
- /* setting rgb conversion parameters */
- regw_rsz(rsz_base, rgb->rgb_en, reg_base + RSZ_RGB_EN);
-
- val = (rgb->rgb_typ << RSZ_RGB_TYP_SHIFT) |
- (rgb->rgb_msk0 << RSZ_RGB_MSK0_SHIFT) |
- (rgb->rgb_msk1 << RSZ_RGB_MSK1_SHIFT);
- regw_rsz(rsz_base, val, reg_base + RSZ_RGB_TYP);
-
- regw_rsz(rsz_base, rgb->rgb_alpha_val & RSZ_RGB_ALPHA_MASK,
- reg_base + RSZ_RGB_BLD);
-
- /* setting external memory parameters */
- regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_y, reg_base + RSZ_SDR_Y_OFT);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_y,
- reg_base + RSZ_SDR_Y_PTR_S);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_e_y,
- reg_base + RSZ_SDR_Y_PTR_E);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_oft_c, reg_base + RSZ_SDR_C_OFT);
- regw_rsz(rsz_base, ext_mem->rsz_sdr_ptr_s_c,
- reg_base + RSZ_SDR_C_PTR_S);
- regw_rsz(rsz_base, (ext_mem->rsz_sdr_ptr_e_c >> 1),
- reg_base + RSZ_SDR_C_PTR_E);
-}
-
-/*set the registers of either RSZ0 or RSZ1 */
-static void
-ipipe_setup_resizer(void __iomem *rsz_base, struct resizer_params *params)
-{
- /* enable MMR gate to write to Resizer */
- regw_rsz(rsz_base, 1, RSZ_GCK_MMR);
-
- /* Enable resizer if it is not in bypass mode */
- if (params->rsz_common.passthrough)
- regw_rsz(rsz_base, 0, RSZ_GCK_SDR);
- else
- regw_rsz(rsz_base, 1, RSZ_GCK_SDR);
-
- rsz_set_common_params(rsz_base, params);
-
- regw_rsz(rsz_base, params->rsz_en[RSZ_A], RSZ_EN_A);
-
- if (params->rsz_en[RSZ_A])
- /*setting rescale parameters */
- rsz_set_rsz_regs(rsz_base, RSZ_A, params);
-
- regw_rsz(rsz_base, params->rsz_en[RSZ_B], RSZ_EN_B);
-
- if (params->rsz_en[RSZ_B])
- rsz_set_rsz_regs(rsz_base, RSZ_B, params);
-}
-
-static u32 ipipe_get_color_pat(u32 pix)
-{
- switch (pix) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return ipipe_sgrbg_pattern;
-
- default:
- return ipipe_srggb_pattern;
- }
-}
-
-static int ipipe_get_data_path(struct vpfe_ipipe_device *ipipe)
-{
- u32 temp_pix_fmt;
-
- switch (ipipe->formats[IPIPE_PAD_SINK].code) {
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- temp_pix_fmt = MEDIA_BUS_FMT_SGRBG12_1X12;
- break;
-
- default:
- temp_pix_fmt = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- if (temp_pix_fmt == MEDIA_BUS_FMT_SGRBG12_1X12) {
- if (ipipe->formats[IPIPE_PAD_SOURCE].code ==
- MEDIA_BUS_FMT_SGRBG12_1X12)
- return IPIPE_RAW2RAW;
- return IPIPE_RAW2YUV;
- }
-
- return IPIPE_YUV2YUV;
-}
-
-static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipe);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
-
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
- return IPIPE_MODE_SINGLE_SHOT;
- if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
- return IPIPE_MODE_CONTINUOUS;
-
- return -EINVAL;
-}
-
-int config_ipipe_hw(struct vpfe_ipipe_device *ipipe)
-{
- struct vpfe_ipipe_input_config *config = &ipipe->config.input_config;
- void __iomem *ipipe_base = ipipe->base_addr;
- struct v4l2_mbus_framefmt *outformat;
- u32 color_pat;
- int ipipe_mode;
- u32 data_path;
-
- /* enable clock to IPIPE */
- vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
- ipipe_clock_enable(ipipe_base);
-
- if (ipipe->input == IPIPE_INPUT_NONE) {
- regw_ip(ipipe_base, 0, IPIPE_SRC_EN);
- return 0;
- }
-
- ipipe_mode = get_ipipe_mode(ipipe);
- if (ipipe_mode < 0) {
- pr_err("Failed to get ipipe mode");
- return -EINVAL;
- }
- regw_ip(ipipe_base, ipipe_mode, IPIPE_SRC_MODE);
-
- data_path = ipipe_get_data_path(ipipe);
- regw_ip(ipipe_base, data_path, IPIPE_SRC_FMT);
-
- regw_ip(ipipe_base, config->vst & IPIPE_RSZ_VPS_MASK, IPIPE_SRC_VPS);
- regw_ip(ipipe_base, config->hst & IPIPE_RSZ_HPS_MASK, IPIPE_SRC_HPS);
-
- outformat = &ipipe->formats[IPIPE_PAD_SOURCE];
- regw_ip(ipipe_base, (outformat->height + 1) & IPIPE_RSZ_VSZ_MASK,
- IPIPE_SRC_VSZ);
- regw_ip(ipipe_base, (outformat->width + 1) & IPIPE_RSZ_HSZ_MASK,
- IPIPE_SRC_HSZ);
-
- if (data_path == IPIPE_RAW2YUV ||
- data_path == IPIPE_RAW2RAW) {
- color_pat =
- ipipe_get_color_pat(ipipe->formats[IPIPE_PAD_SINK].code);
- regw_ip(ipipe_base, color_pat, IPIPE_SRC_COL);
- }
-
- return 0;
-}
-
-/*
- * config_rsz_hw() - Performs hardware setup of resizer.
- */
-int config_rsz_hw(struct vpfe_resizer_device *resizer,
- struct resizer_params *config)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- void __iomem *ipipe_base = vpfe_dev->vpfe_ipipe.base_addr;
- void __iomem *rsz_base = vpfe_dev->vpfe_resizer.base_addr;
-
- /* enable VPSS clock */
- vpss_enable_clock(VPSS_IPIPE_CLOCK, 1);
- ipipe_clock_enable(ipipe_base);
-
- ipipe_setup_resizer(rsz_base, config);
-
- return 0;
-}
-
-static void
-rsz_set_y_address(void __iomem *rsz_base, unsigned int address,
- unsigned int offset)
-{
- u32 val;
-
- val = address & SET_LOW_ADDR;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_L);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_L);
-
- val = (address & SET_HIGH_ADDR) >> 16;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_BAD_H);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_Y_SAD_H);
-}
-
-static void
-rsz_set_c_address(void __iomem *rsz_base, unsigned int address,
- unsigned int offset)
-{
- u32 val;
-
- val = address & SET_LOW_ADDR;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_L);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_L);
-
- val = (address & SET_HIGH_ADDR) >> 16;
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_BAD_H);
- regw_rsz(rsz_base, val, offset + RSZ_SDR_C_SAD_H);
-}
-
-/*
- * resizer_set_outaddr() - set the address for given resize_no
- * @rsz_base: resizer base address
- * @params: pointer to ipipe_params structure
- * @resize_no: 0 - Resizer-A, 1 - Resizer B
- * @address: the address to set
- */
-int
-resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
- int resize_no, unsigned int address)
-{
- struct resizer_scale_param *rsc_param;
- struct rsz_ext_mem_param *mem_param;
- struct rsz_common_params *rsz_common;
- unsigned int rsz_start_add;
- unsigned int val;
-
- if (resize_no != RSZ_A && resize_no != RSZ_B)
- return -EINVAL;
-
- mem_param = &params->ext_mem_param[resize_no];
- rsc_param = &params->rsz_rsc_param[resize_no];
- rsz_common = &params->rsz_common;
-
- if (resize_no == RSZ_A)
- rsz_start_add = RSZ_EN_A;
- else
- rsz_start_add = RSZ_EN_B;
-
- /* y_c = 0 for y, = 1 for c */
- if (rsz_common->src_img_fmt == RSZ_IMG_420) {
- if (rsz_common->y_c) {
- /* C channel */
- val = address + mem_param->flip_ofst_c;
- rsz_set_c_address(rsz_base, val, rsz_start_add);
- } else {
- val = address + mem_param->flip_ofst_y;
- rsz_set_y_address(rsz_base, val, rsz_start_add);
- }
- } else {
- if (rsc_param->cen && rsc_param->yen) {
- /* 420 */
- val = address + mem_param->c_offset +
- mem_param->flip_ofst_c +
- mem_param->user_y_ofst +
- mem_param->user_c_ofst;
- if (resize_no == RSZ_B)
- val +=
- params->ext_mem_param[RSZ_A].user_y_ofst +
- params->ext_mem_param[RSZ_A].user_c_ofst;
- /* set C address */
- rsz_set_c_address(rsz_base, val, rsz_start_add);
- }
- val = address + mem_param->flip_ofst_y + mem_param->user_y_ofst;
- if (resize_no == RSZ_B)
- val += params->ext_mem_param[RSZ_A].user_y_ofst +
- params->ext_mem_param[RSZ_A].user_c_ofst;
- /* set Y address */
- rsz_set_y_address(rsz_base, val, rsz_start_add);
- }
- /* resizer must be enabled */
- regw_rsz(rsz_base, params->rsz_en[resize_no], rsz_start_add);
-
- return 0;
-}
-
-void
-ipipe_set_lutdpc_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_lutdpc *dpc)
-{
- u32 max_tbl_size = LUT_DPC_MAX_SIZE >> 1;
- u32 lut_start_addr = DPC_TB0_START_ADDR;
- u32 val;
- u32 count;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, dpc->en, DPC_LUT_EN);
-
- if (dpc->en != 1)
- return;
-
- val = LUTDPC_TBL_256_EN | (dpc->repl_white & 1);
- regw_ip(base_addr, val, DPC_LUT_SEL);
- regw_ip(base_addr, LUT_DPC_START_ADDR, DPC_LUT_ADR);
- regw_ip(base_addr, dpc->dpc_size, DPC_LUT_SIZ & LUT_DPC_SIZE_MASK);
-
- for (count = 0; count < dpc->dpc_size; count++) {
- if (count >= max_tbl_size)
- lut_start_addr = DPC_TB1_START_ADDR;
- val = (dpc->table[count].horz_pos & LUT_DPC_H_POS_MASK) |
- ((dpc->table[count].vert_pos & LUT_DPC_V_POS_MASK) <<
- LUT_DPC_V_POS_SHIFT) | (dpc->table[count].method <<
- LUT_DPC_CORR_METH_SHIFT);
- w_ip_table(isp5_base_addr, val, (lut_start_addr +
- ((count % max_tbl_size) << 2)));
- }
-}
-
-static void
-set_dpc_thresholds(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_thr)
-{
- regw_ip(base_addr, dpc_thr->corr_thr.r & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_R);
- regw_ip(base_addr, dpc_thr->corr_thr.gr & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_GR);
- regw_ip(base_addr, dpc_thr->corr_thr.gb & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_GB);
- regw_ip(base_addr, dpc_thr->corr_thr.b & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2C_THR_B);
- regw_ip(base_addr, dpc_thr->det_thr.r & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_R);
- regw_ip(base_addr, dpc_thr->det_thr.gr & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_GR);
- regw_ip(base_addr, dpc_thr->det_thr.gb & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_GB);
- regw_ip(base_addr, dpc_thr->det_thr.b & OTFDPC_DPC2_THR_MASK,
- DPC_OTF_2D_THR_B);
-}
-
-void ipipe_set_otfdpc_regs(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc *otfdpc)
-{
- struct vpfe_ipipe_otfdpc_2_0_cfg *dpc_2_0 = &otfdpc->alg_cfg.dpc_2_0;
- struct vpfe_ipipe_otfdpc_3_0_cfg *dpc_3_0 = &otfdpc->alg_cfg.dpc_3_0;
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- regw_ip(base_addr, (otfdpc->en & 1), DPC_OTF_EN);
- if (!otfdpc->en)
- return;
-
- /* dpc enabled */
- val = (otfdpc->det_method << OTF_DET_METHOD_SHIFT) | otfdpc->alg;
- regw_ip(base_addr, val, DPC_OTF_TYP);
-
- if (otfdpc->det_method == VPFE_IPIPE_DPC_OTF_MIN_MAX) {
- /* ALG= 0, TYP = 0, DPC_OTF_2D_THR_[x]=0
- * DPC_OTF_2C_THR_[x] = Maximum thresohld
- * MinMax method
- */
- dpc_2_0->det_thr.r = dpc_2_0->det_thr.gb =
- dpc_2_0->det_thr.gr = dpc_2_0->det_thr.b = 0;
- set_dpc_thresholds(base_addr, dpc_2_0);
- return;
- }
- /* MinMax2 */
- if (otfdpc->alg == VPFE_IPIPE_OTFDPC_2_0) {
- set_dpc_thresholds(base_addr, dpc_2_0);
- return;
- }
- regw_ip(base_addr, dpc_3_0->act_adj_shf &
- OTF_DPC3_0_SHF_MASK, DPC_OTF_3_SHF);
- /* Detection thresholds */
- regw_ip(base_addr, ((dpc_3_0->det_thr & OTF_DPC3_0_THR_MASK) <<
- OTF_DPC3_0_THR_SHIFT), DPC_OTF_3D_THR);
- regw_ip(base_addr, dpc_3_0->det_slp &
- OTF_DPC3_0_SLP_MASK, DPC_OTF_3D_SLP);
- regw_ip(base_addr, dpc_3_0->det_thr_min &
- OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MIN);
- regw_ip(base_addr, dpc_3_0->det_thr_max &
- OTF_DPC3_0_DET_MASK, DPC_OTF_3D_MAX);
- /* Correction thresholds */
- regw_ip(base_addr, ((dpc_3_0->corr_thr & OTF_DPC3_0_THR_MASK) <<
- OTF_DPC3_0_THR_SHIFT), DPC_OTF_3C_THR);
- regw_ip(base_addr, dpc_3_0->corr_slp &
- OTF_DPC3_0_SLP_MASK, DPC_OTF_3C_SLP);
- regw_ip(base_addr, dpc_3_0->corr_thr_min &
- OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MIN);
- regw_ip(base_addr, dpc_3_0->corr_thr_max &
- OTF_DPC3_0_CORR_MASK, DPC_OTF_3C_MAX);
-}
-
-/* 2D Noise filter */
-void
-ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_nf *noise_filter)
-{
-
- u32 offset = D2F_1ST;
- int count;
- u32 val;
-
- if (id == IPIPE_D2F_2ND)
- offset = D2F_2ND;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, noise_filter->en & 1, offset + D2F_EN);
- if (!noise_filter->en)
- return;
-
- /*noise filter enabled */
- /* Combine all the fields to make D2F_CFG register of IPIPE */
- val = ((noise_filter->spread_val & D2F_SPR_VAL_MASK) <<
- D2F_SPR_VAL_SHIFT) | ((noise_filter->shft_val &
- D2F_SHFT_VAL_MASK) << D2F_SHFT_VAL_SHIFT) |
- (noise_filter->gr_sample_meth << D2F_SAMPLE_METH_SHIFT) |
- ((noise_filter->apply_lsc_gain & 1) <<
- D2F_APPLY_LSC_GAIN_SHIFT) | D2F_USE_SPR_REG_VAL;
- regw_ip(base_addr, val, offset + D2F_TYP);
-
- /* edge detection minimum */
- regw_ip(base_addr, noise_filter->edge_det_min_thr &
- D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MIN);
-
- /* edge detection maximum */
- regw_ip(base_addr, noise_filter->edge_det_max_thr &
- D2F_EDGE_DET_THR_MASK, offset + D2F_EDG_MAX);
-
- for (count = 0; count < VPFE_IPIPE_NF_STR_TABLE_SIZE; count++)
- regw_ip(base_addr,
- (noise_filter->str[count] & D2F_STR_VAL_MASK),
- offset + D2F_STR + count * 4);
-
- for (count = 0; count < VPFE_IPIPE_NF_THR_TABLE_SIZE; count++)
- regw_ip(base_addr, noise_filter->thr[count] & D2F_THR_VAL_MASK,
- offset + D2F_THR + count * 4);
-}
-
-#define IPIPE_U8Q5(decimal, integer) \
- (((decimal & 0x1f) | ((integer & 0x7) << 5)))
-
-/* Green Imbalance Correction */
-void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, gic->en & 1, GIC_EN);
-
- if (!gic->en)
- return;
-
- /*gic enabled */
- val = (gic->wt_fn_type << GIC_TYP_SHIFT) |
- (gic->thr_sel << GIC_THR_SEL_SHIFT) |
- ((gic->apply_lsc_gain & 1) << GIC_APPLY_LSC_GAIN_SHIFT);
- regw_ip(base_addr, val, GIC_TYP);
-
- regw_ip(base_addr, gic->gain & GIC_GAIN_MASK, GIC_GAN);
-
- if (gic->gic_alg != VPFE_IPIPE_GIC_ALG_ADAPT_GAIN) {
- /* Constant Gain. Set threshold to maximum */
- regw_ip(base_addr, GIC_THR_MASK, GIC_THR);
- return;
- }
-
- if (gic->thr_sel == VPFE_IPIPE_GIC_THR_REG) {
- regw_ip(base_addr, gic->thr & GIC_THR_MASK, GIC_THR);
- regw_ip(base_addr, gic->slope & GIC_SLOPE_MASK, GIC_SLP);
- } else {
- /* Use NF thresholds */
- val = IPIPE_U8Q5(gic->nf2_thr_gain.decimal,
- gic->nf2_thr_gain.integer);
- regw_ip(base_addr, val, GIC_NFGAN);
- }
-}
-
-#define IPIPE_U13Q9(decimal, integer) \
- (((decimal & 0x1ff) | ((integer & 0xf) << 9)))
-/* White balance */
-void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- /* Ofsets. S12 */
- regw_ip(base_addr, wb->ofst_r & WB_OFFSET_MASK, WB2_OFT_R);
- regw_ip(base_addr, wb->ofst_gr & WB_OFFSET_MASK, WB2_OFT_GR);
- regw_ip(base_addr, wb->ofst_gb & WB_OFFSET_MASK, WB2_OFT_GB);
- regw_ip(base_addr, wb->ofst_b & WB_OFFSET_MASK, WB2_OFT_B);
-
- /* Gains. U13Q9 */
- val = IPIPE_U13Q9(wb->gain_r.decimal, wb->gain_r.integer);
- regw_ip(base_addr, val, WB2_WGN_R);
-
- val = IPIPE_U13Q9(wb->gain_gr.decimal, wb->gain_gr.integer);
- regw_ip(base_addr, val, WB2_WGN_GR);
-
- val = IPIPE_U13Q9(wb->gain_gb.decimal, wb->gain_gb.integer);
- regw_ip(base_addr, val, WB2_WGN_GB);
-
- val = IPIPE_U13Q9(wb->gain_b.decimal, wb->gain_b.integer);
- regw_ip(base_addr, val, WB2_WGN_B);
-}
-
-/* CFA */
-void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa)
-{
- ipipe_clock_enable(base_addr);
-
- regw_ip(base_addr, cfa->alg, CFA_MODE);
- regw_ip(base_addr, cfa->hpf_thr_2dir & CFA_HPF_THR_2DIR_MASK,
- CFA_2DIR_HPF_THR);
- regw_ip(base_addr, cfa->hpf_slp_2dir & CFA_HPF_SLOPE_2DIR_MASK,
- CFA_2DIR_HPF_SLP);
- regw_ip(base_addr, cfa->hp_mix_thr_2dir & CFA_HPF_MIX_THR_2DIR_MASK,
- CFA_2DIR_MIX_THR);
- regw_ip(base_addr, cfa->hp_mix_slope_2dir & CFA_HPF_MIX_SLP_2DIR_MASK,
- CFA_2DIR_MIX_SLP);
- regw_ip(base_addr, cfa->dir_thr_2dir & CFA_DIR_THR_2DIR_MASK,
- CFA_2DIR_DIR_THR);
- regw_ip(base_addr, cfa->dir_slope_2dir & CFA_DIR_SLP_2DIR_MASK,
- CFA_2DIR_DIR_SLP);
- regw_ip(base_addr, cfa->nd_wt_2dir & CFA_ND_WT_2DIR_MASK,
- CFA_2DIR_NDWT);
- regw_ip(base_addr, cfa->hue_fract_daa & CFA_DAA_HUE_FRA_MASK,
- CFA_MONO_HUE_FRA);
- regw_ip(base_addr, cfa->edge_thr_daa & CFA_DAA_EDG_THR_MASK,
- CFA_MONO_EDG_THR);
- regw_ip(base_addr, cfa->thr_min_daa & CFA_DAA_THR_MIN_MASK,
- CFA_MONO_THR_MIN);
- regw_ip(base_addr, cfa->thr_slope_daa & CFA_DAA_THR_SLP_MASK,
- CFA_MONO_THR_SLP);
- regw_ip(base_addr, cfa->slope_min_daa & CFA_DAA_SLP_MIN_MASK,
- CFA_MONO_SLP_MIN);
- regw_ip(base_addr, cfa->slope_slope_daa & CFA_DAA_SLP_SLP_MASK,
- CFA_MONO_SLP_SLP);
- regw_ip(base_addr, cfa->lp_wt_daa & CFA_DAA_LP_WT_MASK,
- CFA_MONO_LPWT);
-}
-
-void
-ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_rgb2rgb *rgb)
-{
- u32 offset_mask = RGB2RGB_1_OFST_MASK;
- u32 offset = RGB1_MUL_BASE;
- u32 integ_mask = 0xf;
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- if (id == IPIPE_RGB2RGB_2) {
- /*
- * For second RGB module, gain integer is 3 bits instead
- * of 4, offset has 11 bits insread of 13
- */
- offset = RGB2_MUL_BASE;
- integ_mask = 0x7;
- offset_mask = RGB2RGB_2_OFST_MASK;
- }
- /* Gains */
- val = (rgb->coef_rr.decimal & 0xff) |
- ((rgb->coef_rr.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RR);
- val = (rgb->coef_gr.decimal & 0xff) |
- ((rgb->coef_gr.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GR);
- val = (rgb->coef_br.decimal & 0xff) |
- ((rgb->coef_br.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BR);
- val = (rgb->coef_rg.decimal & 0xff) |
- ((rgb->coef_rg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RG);
- val = (rgb->coef_gg.decimal & 0xff) |
- ((rgb->coef_gg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GG);
- val = (rgb->coef_bg.decimal & 0xff) |
- ((rgb->coef_bg.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BG);
- val = (rgb->coef_rb.decimal & 0xff) |
- ((rgb->coef_rb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_RB);
- val = (rgb->coef_gb.decimal & 0xff) |
- ((rgb->coef_gb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_GB);
- val = (rgb->coef_bb.decimal & 0xff) |
- ((rgb->coef_bb.integer & integ_mask) << 8);
- regw_ip(base_addr, val, offset + RGB_MUL_BB);
-
- /* Offsets */
- regw_ip(base_addr, rgb->out_ofst_r & offset_mask, offset + RGB_OFT_OR);
- regw_ip(base_addr, rgb->out_ofst_g & offset_mask, offset + RGB_OFT_OG);
- regw_ip(base_addr, rgb->out_ofst_b & offset_mask, offset + RGB_OFT_OB);
-}
-
-static void
-ipipe_update_gamma_tbl(void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gamma_entry *table, int size, u32 addr)
-{
- int count;
- u32 val;
-
- for (count = 0; count < size; count++) {
- val = table[count].slope & GAMMA_MASK;
- val |= (table[count].offset & GAMMA_MASK) << GAMMA_SHIFT;
- w_ip_table(isp5_base_addr, val, (addr + (count * 4)));
- }
-}
-
-void
-ipipe_set_gamma_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gamma *gamma)
-{
- int table_size;
- u32 val;
-
- ipipe_clock_enable(base_addr);
- val = (gamma->bypass_r << GAMMA_BYPR_SHIFT) |
- (gamma->bypass_b << GAMMA_BYPG_SHIFT) |
- (gamma->bypass_g << GAMMA_BYPB_SHIFT) |
- (gamma->tbl_sel << GAMMA_TBL_SEL_SHIFT) |
- (gamma->tbl_size << GAMMA_TBL_SIZE_SHIFT);
-
- regw_ip(base_addr, val, GMM_CFG);
-
- if (gamma->tbl_sel != VPFE_IPIPE_GAMMA_TBL_RAM)
- return;
-
- table_size = gamma->tbl_size;
-
- if (!gamma->bypass_r)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_r,
- table_size, GAMMA_R_START_ADDR);
- if (!gamma->bypass_b)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_b,
- table_size, GAMMA_B_START_ADDR);
- if (!gamma->bypass_g)
- ipipe_update_gamma_tbl(isp5_base_addr, gamma->table_g,
- table_size, GAMMA_G_START_ADDR);
-}
-
-void
-ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_3d_lut *lut_3d)
-{
- struct vpfe_ipipe_3d_lut_entry *tbl;
- u32 bnk_index;
- u32 tbl_index;
- u32 val;
- u32 i;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, lut_3d->en, D3LUT_EN);
-
- if (!lut_3d->en)
- return;
-
- /* valid table */
- tbl = lut_3d->table;
- for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
- /*
- * Each entry has 0-9 (B), 10-19 (G) and
- * 20-29 R values
- */
- val = tbl[i].b & D3_LUT_ENTRY_MASK;
- val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
- D3_LUT_ENTRY_G_SHIFT;
- val |= (tbl[i].r & D3_LUT_ENTRY_MASK) <<
- D3_LUT_ENTRY_R_SHIFT;
- bnk_index = i % 4;
- tbl_index = i >> 2;
- tbl_index <<= 2;
- if (bnk_index == 0)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB0_START_ADDR);
- else if (bnk_index == 1)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB1_START_ADDR);
- else if (bnk_index == 2)
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB2_START_ADDR);
- else
- w_ip_table(isp5_base_addr, val,
- tbl_index + D3L_TB3_START_ADDR);
- }
-}
-
-/* Lumina adjustments */
-void
-ipipe_set_lum_adj_regs(void __iomem *base_addr, struct ipipe_lum_adj *lum_adj)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- /* combine fields of YUV_ADJ to set brightness and contrast */
- val = lum_adj->contrast << LUM_ADJ_CONTR_SHIFT |
- lum_adj->brightness << LUM_ADJ_BRIGHT_SHIFT;
- regw_ip(base_addr, val, YUV_ADJ);
-}
-
-inline u32 ipipe_s12q8(unsigned short decimal, short integer)
-{
- return (decimal & 0xff) | ((integer & 0xf) << 8);
-}
-
-void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
- struct vpfe_ipipe_rgb2yuv *yuv)
-{
- u32 val;
-
- /* S10Q8 */
- ipipe_clock_enable(base_addr);
- val = ipipe_s12q8(yuv->coef_ry.decimal, yuv->coef_ry.integer);
- regw_ip(base_addr, val, YUV_MUL_RY);
- val = ipipe_s12q8(yuv->coef_gy.decimal, yuv->coef_gy.integer);
- regw_ip(base_addr, val, YUV_MUL_GY);
- val = ipipe_s12q8(yuv->coef_by.decimal, yuv->coef_by.integer);
- regw_ip(base_addr, val, YUV_MUL_BY);
- val = ipipe_s12q8(yuv->coef_rcb.decimal, yuv->coef_rcb.integer);
- regw_ip(base_addr, val, YUV_MUL_RCB);
- val = ipipe_s12q8(yuv->coef_gcb.decimal, yuv->coef_gcb.integer);
- regw_ip(base_addr, val, YUV_MUL_GCB);
- val = ipipe_s12q8(yuv->coef_bcb.decimal, yuv->coef_bcb.integer);
- regw_ip(base_addr, val, YUV_MUL_BCB);
- val = ipipe_s12q8(yuv->coef_rcr.decimal, yuv->coef_rcr.integer);
- regw_ip(base_addr, val, YUV_MUL_RCR);
- val = ipipe_s12q8(yuv->coef_gcr.decimal, yuv->coef_gcr.integer);
- regw_ip(base_addr, val, YUV_MUL_GCR);
- val = ipipe_s12q8(yuv->coef_bcr.decimal, yuv->coef_bcr.integer);
- regw_ip(base_addr, val, YUV_MUL_BCR);
- regw_ip(base_addr, yuv->out_ofst_y & RGB2YCBCR_OFST_MASK, YUV_OFT_Y);
- regw_ip(base_addr, yuv->out_ofst_cb & RGB2YCBCR_OFST_MASK, YUV_OFT_CB);
- regw_ip(base_addr, yuv->out_ofst_cr & RGB2YCBCR_OFST_MASK, YUV_OFT_CR);
-}
-
-/* YUV 422 conversion */
-void
-ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
- struct vpfe_ipipe_yuv422_conv *conv)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
-
- /* Combine all the fields to make YUV_PHS register of IPIPE */
- val = (conv->chrom_pos << 0) | (conv->en_chrom_lpf << 1);
- regw_ip(base_addr, val, YUV_PHS);
-}
-
-void
-ipipe_set_gbce_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_gbce *gbce)
-{
- unsigned int count;
- u32 mask = GBCE_Y_VAL_MASK;
-
- if (gbce->type == VPFE_IPIPE_GBCE_GAIN_TBL)
- mask = GBCE_GAIN_VAL_MASK;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, gbce->en & 1, GBCE_EN);
-
- if (!gbce->en)
- return;
-
- regw_ip(base_addr, gbce->type, GBCE_TYP);
-
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_GBCE_LUT; count += 2)
- w_ip_table(isp5_base_addr, ((gbce->table[count + 1] & mask) <<
- GBCE_ENTRY_SHIFT) | (gbce->table[count] & mask),
- ((count/2) << 2) + GBCE_TB_START_ADDR);
-}
-
-void
-ipipe_set_ee_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
- struct vpfe_ipipe_yee *ee)
-{
- unsigned int count;
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, ee->en, YEE_EN);
-
- if (!ee->en)
- return;
-
- val = ee->en_halo_red & 1;
- val |= ee->merge_meth << YEE_HALO_RED_EN_SHIFT;
- regw_ip(base_addr, val, YEE_TYP);
-
- regw_ip(base_addr, ee->hpf_shft, YEE_SHF);
- regw_ip(base_addr, ee->hpf_coef_00 & YEE_COEF_MASK, YEE_MUL_00);
- regw_ip(base_addr, ee->hpf_coef_01 & YEE_COEF_MASK, YEE_MUL_01);
- regw_ip(base_addr, ee->hpf_coef_02 & YEE_COEF_MASK, YEE_MUL_02);
- regw_ip(base_addr, ee->hpf_coef_10 & YEE_COEF_MASK, YEE_MUL_10);
- regw_ip(base_addr, ee->hpf_coef_11 & YEE_COEF_MASK, YEE_MUL_11);
- regw_ip(base_addr, ee->hpf_coef_12 & YEE_COEF_MASK, YEE_MUL_12);
- regw_ip(base_addr, ee->hpf_coef_20 & YEE_COEF_MASK, YEE_MUL_20);
- regw_ip(base_addr, ee->hpf_coef_21 & YEE_COEF_MASK, YEE_MUL_21);
- regw_ip(base_addr, ee->hpf_coef_22 & YEE_COEF_MASK, YEE_MUL_22);
- regw_ip(base_addr, ee->yee_thr & YEE_THR_MASK, YEE_THR);
- regw_ip(base_addr, ee->es_gain & YEE_ES_GAIN_MASK, YEE_E_GAN);
- regw_ip(base_addr, ee->es_thr1 & YEE_ES_THR1_MASK, YEE_E_THR1);
- regw_ip(base_addr, ee->es_thr2 & YEE_THR_MASK, YEE_E_THR2);
- regw_ip(base_addr, ee->es_gain_grad & YEE_THR_MASK, YEE_G_GAN);
- regw_ip(base_addr, ee->es_ofst_grad & YEE_THR_MASK, YEE_G_OFT);
-
- for (count = 0; count < VPFE_IPIPE_MAX_SIZE_YEE_LUT; count += 2)
- w_ip_table(isp5_base_addr, ((ee->table[count + 1] &
- YEE_ENTRY_MASK) << YEE_ENTRY_SHIFT) |
- (ee->table[count] & YEE_ENTRY_MASK),
- ((count/2) << 2) + YEE_TB_START_ADDR);
-}
-
-/* Chromatic Artifact Correction. CAR */
-static void ipipe_set_mf(void __iomem *base_addr)
-{
- /* typ to dynamic switch */
- regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
- /* Set SW0 to maximum */
- regw_ip(base_addr, CAR_MF_THR, CAR_SW);
-}
-
-static void
-ipipe_set_gain_ctrl(void __iomem *base_addr, struct vpfe_ipipe_car *car)
-{
- regw_ip(base_addr, VPFE_IPIPE_CAR_CHR_GAIN_CTRL, CAR_TYP);
- regw_ip(base_addr, car->hpf, CAR_HPF_TYP);
- regw_ip(base_addr, car->hpf_shft & CAR_HPF_SHIFT_MASK, CAR_HPF_SHF);
- regw_ip(base_addr, car->hpf_thr, CAR_HPF_THR);
- regw_ip(base_addr, car->gain1.gain, CAR_GN1_GAN);
- regw_ip(base_addr, car->gain1.shft & CAR_GAIN1_SHFT_MASK, CAR_GN1_SHF);
- regw_ip(base_addr, car->gain1.gain_min & CAR_GAIN_MIN_MASK,
- CAR_GN1_MIN);
- regw_ip(base_addr, car->gain2.gain, CAR_GN2_GAN);
- regw_ip(base_addr, car->gain2.shft & CAR_GAIN2_SHFT_MASK, CAR_GN2_SHF);
- regw_ip(base_addr, car->gain2.gain_min & CAR_GAIN_MIN_MASK,
- CAR_GN2_MIN);
-}
-
-void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car)
-{
- u32 val;
-
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, car->en, CAR_EN);
-
- if (!car->en)
- return;
-
- switch (car->meth) {
- case VPFE_IPIPE_CAR_MED_FLTR:
- ipipe_set_mf(base_addr);
- break;
-
- case VPFE_IPIPE_CAR_CHR_GAIN_CTRL:
- ipipe_set_gain_ctrl(base_addr, car);
- break;
-
- default:
- /* Dynamic switch between MF and Gain Ctrl. */
- ipipe_set_mf(base_addr);
- ipipe_set_gain_ctrl(base_addr, car);
- /* Set the threshold for switching between
- * the two Here we overwrite the MF SW0 value
- */
- regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
- val = car->sw1;
- val <<= CAR_SW1_SHIFT;
- val |= car->sw0;
- regw_ip(base_addr, val, CAR_SW);
- }
-}
-
-/* Chromatic Gain Suppression */
-void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs)
-{
- ipipe_clock_enable(base_addr);
- regw_ip(base_addr, cgs->en, CGS_EN);
-
- if (!cgs->en)
- return;
-
- /* Set the bright side parameters */
- regw_ip(base_addr, cgs->h_thr, CGS_GN1_H_THR);
- regw_ip(base_addr, cgs->h_slope, CGS_GN1_H_GAN);
- regw_ip(base_addr, cgs->h_shft & CAR_SHIFT_MASK, CGS_GN1_H_SHF);
- regw_ip(base_addr, cgs->h_min, CGS_GN1_H_MIN);
-}
-
-void rsz_src_enable(void __iomem *rsz_base, int enable)
-{
- regw_rsz(rsz_base, enable, RSZ_SRC_EN);
-}
-
-int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable)
-{
- if (rsz_id == RSZ_A) {
- regw_rsz(rsz_base, enable, RSZ_EN_A);
- /* We always enable RSZ_A. RSZ_B is enable upon request from
- * application. So enable RSZ_SRC_EN along with RSZ_A
- */
- regw_rsz(rsz_base, enable, RSZ_SRC_EN);
- } else if (rsz_id == RSZ_B) {
- regw_rsz(rsz_base, enable, RSZ_EN_B);
- } else {
- BUG();
- }
-
- return 0;
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
deleted file mode 100644
index 16b6a14b7058..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ /dev/null
@@ -1,556 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPE_HW_H
-#define _DAVINCI_VPFE_DM365_IPIPE_HW_H
-
-#include "vpfe_mc_capture.h"
-
-#define SET_LOW_ADDR 0x0000ffff
-#define SET_HIGH_ADDR 0xffff0000
-
-/* Below are the internal tables */
-#define DPC_TB0_START_ADDR 0x8000
-#define DPC_TB1_START_ADDR 0x8400
-
-#define GAMMA_R_START_ADDR 0xa800
-#define GAMMA_G_START_ADDR 0xb000
-#define GAMMA_B_START_ADDR 0xb800
-
-/* RAM table addresses for edge enhancement correction*/
-#define YEE_TB_START_ADDR 0x8800
-
-/* RAM table address for GBC LUT */
-#define GBCE_TB_START_ADDR 0x9000
-
-/* RAM table for 3D NF LUT */
-#define D3L_TB0_START_ADDR 0x9800
-#define D3L_TB1_START_ADDR 0x9c00
-#define D3L_TB2_START_ADDR 0xa000
-#define D3L_TB3_START_ADDR 0xa400
-
-/* IPIPE Register Offsets from the base address */
-#define IPIPE_SRC_EN 0x0000
-#define IPIPE_SRC_MODE 0x0004
-#define IPIPE_SRC_FMT 0x0008
-#define IPIPE_SRC_COL 0x000c
-#define IPIPE_SRC_VPS 0x0010
-#define IPIPE_SRC_VSZ 0x0014
-#define IPIPE_SRC_HPS 0x0018
-#define IPIPE_SRC_HSZ 0x001c
-
-#define IPIPE_SEL_SBU 0x0020
-
-#define IPIPE_DMA_STA 0x0024
-#define IPIPE_GCK_MMR 0x0028
-#define IPIPE_GCK_PIX 0x002c
-#define IPIPE_RESERVED0 0x0030
-
-/* Defect Correction */
-#define DPC_LUT_EN 0x0034
-#define DPC_LUT_SEL 0x0038
-#define DPC_LUT_ADR 0x003c
-#define DPC_LUT_SIZ 0x0040
-#define DPC_OTF_EN 0x0044
-#define DPC_OTF_TYP 0x0048
-#define DPC_OTF_2D_THR_R 0x004c
-#define DPC_OTF_2D_THR_GR 0x0050
-#define DPC_OTF_2D_THR_GB 0x0054
-#define DPC_OTF_2D_THR_B 0x0058
-#define DPC_OTF_2C_THR_R 0x005c
-#define DPC_OTF_2C_THR_GR 0x0060
-#define DPC_OTF_2C_THR_GB 0x0064
-#define DPC_OTF_2C_THR_B 0x0068
-#define DPC_OTF_3_SHF 0x006c
-#define DPC_OTF_3D_THR 0x0070
-#define DPC_OTF_3D_SLP 0x0074
-#define DPC_OTF_3D_MIN 0x0078
-#define DPC_OTF_3D_MAX 0x007c
-#define DPC_OTF_3C_THR 0x0080
-#define DPC_OTF_3C_SLP 0x0084
-#define DPC_OTF_3C_MIN 0x0088
-#define DPC_OTF_3C_MAX 0x008c
-
-/* Lense Shading Correction */
-#define LSC_VOFT 0x90
-#define LSC_VA2 0x94
-#define LSC_VA1 0x98
-#define LSC_VS 0x9c
-#define LSC_HOFT 0xa0
-#define LSC_HA2 0xa4
-#define LSC_HA1 0xa8
-#define LSC_HS 0xac
-#define LSC_GAIN_R 0xb0
-#define LSC_GAIN_GR 0xb4
-#define LSC_GAIN_GB 0xb8
-#define LSC_GAIN_B 0xbc
-#define LSC_OFT_R 0xc0
-#define LSC_OFT_GR 0xc4
-#define LSC_OFT_GB 0xc8
-#define LSC_OFT_B 0xcc
-#define LSC_SHF 0xd0
-#define LSC_MAX 0xd4
-
-/* Noise Filter 1. Ofsets from start address given */
-#define D2F_1ST 0xd8
-#define D2F_EN 0x0
-#define D2F_TYP 0x4
-#define D2F_THR 0x8
-#define D2F_STR 0x28
-#define D2F_SPR 0x48
-#define D2F_EDG_MIN 0x68
-#define D2F_EDG_MAX 0x6c
-
-/* Noise Filter 2 */
-#define D2F_2ND 0x148
-
-/* GIC */
-#define GIC_EN 0x1b8
-#define GIC_TYP 0x1bc
-#define GIC_GAN 0x1c0
-#define GIC_NFGAN 0x1c4
-#define GIC_THR 0x1c8
-#define GIC_SLP 0x1cc
-
-/* White Balance */
-#define WB2_OFT_R 0x1d0
-#define WB2_OFT_GR 0x1d4
-#define WB2_OFT_GB 0x1d8
-#define WB2_OFT_B 0x1dc
-#define WB2_WGN_R 0x1e0
-#define WB2_WGN_GR 0x1e4
-#define WB2_WGN_GB 0x1e8
-#define WB2_WGN_B 0x1ec
-
-/* CFA interpolation */
-#define CFA_MODE 0x1f0
-#define CFA_2DIR_HPF_THR 0x1f4
-#define CFA_2DIR_HPF_SLP 0x1f8
-#define CFA_2DIR_MIX_THR 0x1fc
-#define CFA_2DIR_MIX_SLP 0x200
-#define CFA_2DIR_DIR_THR 0x204
-#define CFA_2DIR_DIR_SLP 0x208
-#define CFA_2DIR_NDWT 0x20c
-#define CFA_MONO_HUE_FRA 0x210
-#define CFA_MONO_EDG_THR 0x214
-#define CFA_MONO_THR_MIN 0x218
-#define CFA_MONO_THR_SLP 0x21c
-#define CFA_MONO_SLP_MIN 0x220
-#define CFA_MONO_SLP_SLP 0x224
-#define CFA_MONO_LPWT 0x228
-
-/* RGB to RGB conversiona - 1st */
-#define RGB1_MUL_BASE 0x22c
-/* Offsets from base */
-#define RGB_MUL_RR 0x0
-#define RGB_MUL_GR 0x4
-#define RGB_MUL_BR 0x8
-#define RGB_MUL_RG 0xc
-#define RGB_MUL_GG 0x10
-#define RGB_MUL_BG 0x14
-#define RGB_MUL_RB 0x18
-#define RGB_MUL_GB 0x1c
-#define RGB_MUL_BB 0x20
-#define RGB_OFT_OR 0x24
-#define RGB_OFT_OG 0x28
-#define RGB_OFT_OB 0x2c
-
-/* Gamma */
-#define GMM_CFG 0x25c
-
-/* RGB to RGB conversiona - 2nd */
-#define RGB2_MUL_BASE 0x260
-
-/* 3D LUT */
-#define D3LUT_EN 0x290
-
-/* RGB to YUV(YCbCr) conversion */
-#define YUV_ADJ 0x294
-#define YUV_MUL_RY 0x298
-#define YUV_MUL_GY 0x29c
-#define YUV_MUL_BY 0x2a0
-#define YUV_MUL_RCB 0x2a4
-#define YUV_MUL_GCB 0x2a8
-#define YUV_MUL_BCB 0x2ac
-#define YUV_MUL_RCR 0x2b0
-#define YUV_MUL_GCR 0x2b4
-#define YUV_MUL_BCR 0x2b8
-#define YUV_OFT_Y 0x2bc
-#define YUV_OFT_CB 0x2c0
-#define YUV_OFT_CR 0x2c4
-#define YUV_PHS 0x2c8
-
-/* Global Brightness and Contrast */
-#define GBCE_EN 0x2cc
-#define GBCE_TYP 0x2d0
-
-/* Edge Enhancer */
-#define YEE_EN 0x2d4
-#define YEE_TYP 0x2d8
-#define YEE_SHF 0x2dc
-#define YEE_MUL_00 0x2e0
-#define YEE_MUL_01 0x2e4
-#define YEE_MUL_02 0x2e8
-#define YEE_MUL_10 0x2ec
-#define YEE_MUL_11 0x2f0
-#define YEE_MUL_12 0x2f4
-#define YEE_MUL_20 0x2f8
-#define YEE_MUL_21 0x2fc
-#define YEE_MUL_22 0x300
-#define YEE_THR 0x304
-#define YEE_E_GAN 0x308
-#define YEE_E_THR1 0x30c
-#define YEE_E_THR2 0x310
-#define YEE_G_GAN 0x314
-#define YEE_G_OFT 0x318
-
-/* Chroma Artifact Reduction */
-#define CAR_EN 0x31c
-#define CAR_TYP 0x320
-#define CAR_SW 0x324
-#define CAR_HPF_TYP 0x328
-#define CAR_HPF_SHF 0x32c
-#define CAR_HPF_THR 0x330
-#define CAR_GN1_GAN 0x334
-#define CAR_GN1_SHF 0x338
-#define CAR_GN1_MIN 0x33c
-#define CAR_GN2_GAN 0x340
-#define CAR_GN2_SHF 0x344
-#define CAR_GN2_MIN 0x348
-
-/* Chroma Gain Suppression */
-#define CGS_EN 0x34c
-#define CGS_GN1_L_THR 0x350
-#define CGS_GN1_L_GAN 0x354
-#define CGS_GN1_L_SHF 0x358
-#define CGS_GN1_L_MIN 0x35c
-#define CGS_GN1_H_THR 0x360
-#define CGS_GN1_H_GAN 0x364
-#define CGS_GN1_H_SHF 0x368
-#define CGS_GN1_H_MIN 0x36c
-#define CGS_GN2_L_THR 0x370
-#define CGS_GN2_L_GAN 0x374
-#define CGS_GN2_L_SHF 0x378
-#define CGS_GN2_L_MIN 0x37c
-
-/* Resizer */
-#define RSZ_SRC_EN 0x0
-#define RSZ_SRC_MODE 0x4
-#define RSZ_SRC_FMT0 0x8
-#define RSZ_SRC_FMT1 0xc
-#define RSZ_SRC_VPS 0x10
-#define RSZ_SRC_VSZ 0x14
-#define RSZ_SRC_HPS 0x18
-#define RSZ_SRC_HSZ 0x1c
-#define RSZ_DMA_RZA 0x20
-#define RSZ_DMA_RZB 0x24
-#define RSZ_DMA_STA 0x28
-#define RSZ_GCK_MMR 0x2c
-#define RSZ_RESERVED0 0x30
-#define RSZ_GCK_SDR 0x34
-#define RSZ_IRQ_RZA 0x38
-#define RSZ_IRQ_RZB 0x3c
-#define RSZ_YUV_Y_MIN 0x40
-#define RSZ_YUV_Y_MAX 0x44
-#define RSZ_YUV_C_MIN 0x48
-#define RSZ_YUV_C_MAX 0x4c
-#define RSZ_YUV_PHS 0x50
-#define RSZ_SEQ 0x54
-
-/* Resizer Rescale Parameters */
-#define RSZ_EN_A 0x58
-#define RSZ_EN_B 0xe8
-/*
- * offset of the registers to be added with base register of
- * either RSZ0 or RSZ1
- */
-#define RSZ_MODE 0x4
-#define RSZ_420 0x8
-#define RSZ_I_VPS 0xc
-#define RSZ_I_HPS 0x10
-#define RSZ_O_VSZ 0x14
-#define RSZ_O_HSZ 0x18
-#define RSZ_V_PHS_Y 0x1c
-#define RSZ_V_PHS_C 0x20
-#define RSZ_V_DIF 0x24
-#define RSZ_V_TYP 0x28
-#define RSZ_V_LPF 0x2c
-#define RSZ_H_PHS 0x30
-#define RSZ_H_PHS_ADJ 0x34
-#define RSZ_H_DIF 0x38
-#define RSZ_H_TYP 0x3c
-#define RSZ_H_LPF 0x40
-#define RSZ_DWN_EN 0x44
-#define RSZ_DWN_AV 0x48
-
-/* Resizer RGB Conversion Parameters */
-#define RSZ_RGB_EN 0x4c
-#define RSZ_RGB_TYP 0x50
-#define RSZ_RGB_BLD 0x54
-
-/* Resizer External Memory Parameters */
-#define RSZ_SDR_Y_BAD_H 0x58
-#define RSZ_SDR_Y_BAD_L 0x5c
-#define RSZ_SDR_Y_SAD_H 0x60
-#define RSZ_SDR_Y_SAD_L 0x64
-#define RSZ_SDR_Y_OFT 0x68
-#define RSZ_SDR_Y_PTR_S 0x6c
-#define RSZ_SDR_Y_PTR_E 0x70
-#define RSZ_SDR_C_BAD_H 0x74
-#define RSZ_SDR_C_BAD_L 0x78
-#define RSZ_SDR_C_SAD_H 0x7c
-#define RSZ_SDR_C_SAD_L 0x80
-#define RSZ_SDR_C_OFT 0x84
-#define RSZ_SDR_C_PTR_S 0x88
-#define RSZ_SDR_C_PTR_E 0x8c
-
-/* Macro for resizer */
-#define RSZ_YUV_Y_MIN 0x40
-#define RSZ_YUV_Y_MAX 0x44
-#define RSZ_YUV_C_MIN 0x48
-#define RSZ_YUV_C_MAX 0x4c
-
-#define IPIPE_GCK_MMR_DEFAULT 1
-#define IPIPE_GCK_PIX_DEFAULT 0xe
-#define RSZ_GCK_MMR_DEFAULT 1
-#define RSZ_GCK_SDR_DEFAULT 1
-
-/* LUTDPC */
-#define LUTDPC_TBL_256_EN 0
-#define LUTDPC_INF_TBL_EN 1
-#define LUT_DPC_START_ADDR 0
-#define LUT_DPC_H_POS_MASK 0x1fff
-#define LUT_DPC_V_POS_MASK 0x1fff
-#define LUT_DPC_V_POS_SHIFT 13
-#define LUT_DPC_CORR_METH_SHIFT 26
-#define LUT_DPC_MAX_SIZE 256
-#define LUT_DPC_SIZE_MASK 0x3ff
-
-/* OTFDPC */
-#define OTFDPC_DPC2_THR_MASK 0xfff
-#define OTF_DET_METHOD_SHIFT 1
-#define OTF_DPC3_0_SHF_MASK 3
-#define OTF_DPC3_0_THR_SHIFT 6
-#define OTF_DPC3_0_THR_MASK 0x3f
-#define OTF_DPC3_0_SLP_MASK 0x3f
-#define OTF_DPC3_0_DET_MASK 0xfff
-#define OTF_DPC3_0_CORR_MASK 0xfff
-
-/* NF (D2F) */
-#define D2F_SPR_VAL_MASK 0x1f
-#define D2F_SPR_VAL_SHIFT 0
-#define D2F_SHFT_VAL_MASK 3
-#define D2F_SHFT_VAL_SHIFT 5
-#define D2F_SAMPLE_METH_SHIFT 7
-#define D2F_APPLY_LSC_GAIN_SHIFT 8
-#define D2F_USE_SPR_REG_VAL 0
-#define D2F_STR_VAL_MASK 0x1f
-#define D2F_THR_VAL_MASK 0x3ff
-#define D2F_EDGE_DET_THR_MASK 0x7ff
-
-/* Green Imbalance Correction */
-#define GIC_TYP_SHIFT 0
-#define GIC_THR_SEL_SHIFT 1
-#define GIC_APPLY_LSC_GAIN_SHIFT 2
-#define GIC_GAIN_MASK 0xff
-#define GIC_THR_MASK 0xfff
-#define GIC_SLOPE_MASK 0xfff
-#define GIC_NFGAN_INT_MASK 7
-#define GIC_NFGAN_DECI_MASK 0x1f
-
-/* WB */
-#define WB_OFFSET_MASK 0xfff
-#define WB_GAIN_INT_MASK 0xf
-#define WB_GAIN_DECI_MASK 0x1ff
-
-/* CFA */
-#define CFA_HPF_THR_2DIR_MASK 0x1fff
-#define CFA_HPF_SLOPE_2DIR_MASK 0x3ff
-#define CFA_HPF_MIX_THR_2DIR_MASK 0x1fff
-#define CFA_HPF_MIX_SLP_2DIR_MASK 0x3ff
-#define CFA_DIR_THR_2DIR_MASK 0x3ff
-#define CFA_DIR_SLP_2DIR_MASK 0x7f
-#define CFA_ND_WT_2DIR_MASK 0x3f
-#define CFA_DAA_HUE_FRA_MASK 0x3f
-#define CFA_DAA_EDG_THR_MASK 0xff
-#define CFA_DAA_THR_MIN_MASK 0x3ff
-#define CFA_DAA_THR_SLP_MASK 0x3ff
-#define CFA_DAA_SLP_MIN_MASK 0x3ff
-#define CFA_DAA_SLP_SLP_MASK 0x3ff
-#define CFA_DAA_LP_WT_MASK 0x3f
-
-/* RGB2RGB */
-#define RGB2RGB_1_OFST_MASK 0x1fff
-#define RGB2RGB_1_GAIN_INT_MASK 0xf
-#define RGB2RGB_GAIN_DECI_MASK 0xff
-#define RGB2RGB_2_OFST_MASK 0x7ff
-#define RGB2RGB_2_GAIN_INT_MASK 0x7
-
-/* Gamma */
-#define GAMMA_BYPR_SHIFT 0
-#define GAMMA_BYPG_SHIFT 1
-#define GAMMA_BYPB_SHIFT 2
-#define GAMMA_TBL_SEL_SHIFT 4
-#define GAMMA_TBL_SIZE_SHIFT 5
-#define GAMMA_MASK 0x3ff
-#define GAMMA_SHIFT 10
-
-/* 3D LUT */
-#define D3_LUT_ENTRY_MASK 0x3ff
-#define D3_LUT_ENTRY_R_SHIFT 20
-#define D3_LUT_ENTRY_G_SHIFT 10
-#define D3_LUT_ENTRY_B_SHIFT 0
-
-/* Lumina adj */
-#define LUM_ADJ_CONTR_SHIFT 0
-#define LUM_ADJ_BRIGHT_SHIFT 8
-
-/* RGB2YCbCr */
-#define RGB2YCBCR_OFST_MASK 0x7ff
-#define RGB2YCBCR_COEF_INT_MASK 0xf
-#define RGB2YCBCR_COEF_DECI_MASK 0xff
-
-/* GBCE */
-#define GBCE_Y_VAL_MASK 0xff
-#define GBCE_GAIN_VAL_MASK 0x3ff
-#define GBCE_ENTRY_SHIFT 10
-
-/* Edge Enhancements */
-#define YEE_HALO_RED_EN_SHIFT 1
-#define YEE_HPF_SHIFT_MASK 0xf
-#define YEE_COEF_MASK 0x3ff
-#define YEE_THR_MASK 0x3f
-#define YEE_ES_GAIN_MASK 0xfff
-#define YEE_ES_THR1_MASK 0xfff
-#define YEE_ENTRY_SHIFT 9
-#define YEE_ENTRY_MASK 0x1ff
-
-/* CAR */
-#define CAR_MF_THR 0xff
-#define CAR_SW1_SHIFT 8
-#define CAR_GAIN1_SHFT_MASK 7
-#define CAR_GAIN_MIN_MASK 0x1ff
-#define CAR_GAIN2_SHFT_MASK 0xf
-#define CAR_HPF_SHIFT_MASK 3
-
-/* CGS */
-#define CAR_SHIFT_MASK 3
-
-/* Resizer */
-#define RSZ_BYPASS_SHIFT 1
-#define RSZ_SRC_IMG_FMT_SHIFT 1
-#define RSZ_SRC_Y_C_SEL_SHIFT 2
-#define IPIPE_RSZ_VPS_MASK 0xffff
-#define IPIPE_RSZ_HPS_MASK 0xffff
-#define IPIPE_RSZ_VSZ_MASK 0x1fff
-#define IPIPE_RSZ_HSZ_MASK 0x1fff
-#define RSZ_HPS_MASK 0x1fff
-#define RSZ_VPS_MASK 0x1fff
-#define RSZ_O_HSZ_MASK 0x1fff
-#define RSZ_O_VSZ_MASK 0x1fff
-#define RSZ_V_PHS_MASK 0x3fff
-#define RSZ_V_DIF_MASK 0x3fff
-
-#define RSZA_H_FLIP_SHIFT 0
-#define RSZA_V_FLIP_SHIFT 1
-#define RSZB_H_FLIP_SHIFT 2
-#define RSZB_V_FLIP_SHIFT 3
-#define RSZ_A 0
-#define RSZ_B 1
-#define RSZ_CEN_SHIFT 1
-#define RSZ_YEN_SHIFT 0
-#define RSZ_TYP_Y_SHIFT 0
-#define RSZ_TYP_C_SHIFT 1
-#define RSZ_LPF_INT_MASK 0x3f
-#define RSZ_LPF_INT_C_SHIFT 6
-#define RSZ_H_PHS_MASK 0x3fff
-#define RSZ_H_DIF_MASK 0x3fff
-#define RSZ_DIFF_DOWN_THR 256
-#define RSZ_DWN_SCALE_AV_SZ_V_SHIFT 3
-#define RSZ_DWN_SCALE_AV_SZ_MASK 7
-#define RSZ_RGB_MSK1_SHIFT 2
-#define RSZ_RGB_MSK0_SHIFT 1
-#define RSZ_RGB_TYP_SHIFT 0
-#define RSZ_RGB_ALPHA_MASK 0xff
-
-static inline u32 regr_ip(void __iomem *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline void regw_ip(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-}
-
-static inline u32 w_ip_table(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-
- return val;
-}
-
-static inline u32 regr_rsz(void __iomem *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline u32 regw_rsz(void __iomem *addr, u32 val, u32 offset)
-{
- writel(val, addr + offset);
-
- return val;
-}
-
-int config_ipipe_hw(struct vpfe_ipipe_device *ipipe);
-int resizer_set_outaddr(void __iomem *rsz_base, struct resizer_params *params,
- int resize_no, unsigned int address);
-int rsz_enable(void __iomem *rsz_base, int rsz_id, int enable);
-void rsz_src_enable(void __iomem *rsz_base, int enable);
-void rsz_set_in_pix_format(unsigned char y_c);
-int config_rsz_hw(struct vpfe_resizer_device *resizer,
- struct resizer_params *config);
-void ipipe_set_d2f_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_nf *noise_filter);
-void ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
- struct vpfe_ipipe_rgb2rgb *rgb);
-void ipipe_set_yuv422_conv_regs(void __iomem *base_addr,
- struct vpfe_ipipe_yuv422_conv *conv);
-void ipipe_set_lum_adj_regs(void __iomem *base_addr,
- struct ipipe_lum_adj *lum_adj);
-void ipipe_set_rgb2ycbcr_regs(void __iomem *base_addr,
- struct vpfe_ipipe_rgb2yuv *yuv);
-void ipipe_set_lutdpc_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_lutdpc *lutdpc);
-void ipipe_set_otfdpc_regs(void __iomem *base_addr,
- struct vpfe_ipipe_otfdpc *otfdpc);
-void ipipe_set_3d_lut_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_3d_lut *lut_3d);
-void ipipe_set_gamma_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_gamma *gamma);
-void ipipe_set_ee_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_yee *ee);
-void ipipe_set_gbce_regs(void __iomem *base_addr,
- void __iomem *isp5_base_addr, struct vpfe_ipipe_gbce *gbce);
-void ipipe_set_gic_regs(void __iomem *base_addr, struct vpfe_ipipe_gic *gic);
-void ipipe_set_cfa_regs(void __iomem *base_addr, struct vpfe_ipipe_cfa *cfa);
-void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car);
-void ipipe_set_cgs_regs(void __iomem *base_addr, struct vpfe_ipipe_cgs *cgs);
-void ipipe_set_wb_regs(void __iomem *base_addr, struct vpfe_ipipe_wb *wb);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPE_HW_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
deleted file mode 100644
index 51d4cd1bdb97..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ /dev/null
@@ -1,1070 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include "dm365_ipipeif.h"
-#include "vpfe_mc_capture.h"
-
-static const unsigned int ipipeif_input_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SBGGR8_1X8,
-};
-
-static const unsigned int ipipeif_output_fmts[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
-};
-
-static int
-ipipeif_get_pack_mode(u32 in_pix_fmt)
-{
- switch (in_pix_fmt) {
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- return IPIPEIF_5_1_PACK_8_BIT;
-
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- return IPIPEIF_5_1_PACK_8_BIT_A_LAW;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return IPIPEIF_5_1_PACK_16_BIT;
-
- case MEDIA_BUS_FMT_SBGGR12_1X12:
- return IPIPEIF_5_1_PACK_12_BIT;
-
- default:
- return IPIPEIF_5_1_PACK_16_BIT;
- }
-}
-
-static inline u32 ipipeif_read(void __iomem *addr, u32 offset)
-{
- return readl(addr + offset);
-}
-
-static inline void ipipeif_write(u32 val, void __iomem *addr, u32 offset)
-{
- writel(val, addr + offset);
-}
-
-static void ipipeif_config_dpc(void __iomem *addr, struct ipipeif_dpc *dpc)
-{
- u32 val = 0;
-
- if (dpc->en) {
- val = (dpc->en & 1) << IPIPEIF_DPC2_EN_SHIFT;
- val |= dpc->thr & IPIPEIF_DPC2_THR_MASK;
- }
- ipipeif_write(val, addr, IPIPEIF_DPC2);
-}
-
-#define IPIPEIF_MODE_CONTINUOUS 0
-#define IPIPEIF_MODE_ONE_SHOT 1
-
-static int get_oneshot_mode(enum ipipeif_input_entity input)
-{
- if (input == IPIPEIF_INPUT_MEMORY)
- return IPIPEIF_MODE_ONE_SHOT;
- if (input == IPIPEIF_INPUT_ISIF)
- return IPIPEIF_MODE_CONTINUOUS;
-
- return -EINVAL;
-}
-
-static int
-ipipeif_get_cfg_src1(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- if (ipipeif->input == IPIPEIF_INPUT_MEMORY &&
- (informat->code == MEDIA_BUS_FMT_Y8_1X8 ||
- informat->code == MEDIA_BUS_FMT_UV8_1X8))
- return IPIPEIF_CCDC;
-
- return IPIPEIF_SRC1_PARALLEL_PORT;
-}
-
-static int
-ipipeif_get_data_shift(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
-
- switch (informat->code) {
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return IPIPEIF_5_1_BITS11_0;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- return IPIPEIF_5_1_BITS11_0;
-
- default:
- return IPIPEIF_5_1_BITS7_0;
- }
-}
-
-static enum ipipeif_input_source
-ipipeif_get_source(struct vpfe_ipipeif_device *ipipeif)
-{
- struct v4l2_mbus_framefmt *informat;
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- if (ipipeif->input == IPIPEIF_INPUT_ISIF)
- return IPIPEIF_CCDC;
-
- if (informat->code == MEDIA_BUS_FMT_UYVY8_2X8)
- return IPIPEIF_SDRAM_YUV;
-
- return IPIPEIF_SDRAM_RAW;
-}
-
-void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif)
-{
- struct vpfe_video_device *video_in = &ipipeif->video_in;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return;
-
- spin_lock(&video_in->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_in);
- video_in->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_in);
- spin_unlock(&video_in->dma_queue_lock);
-}
-
-int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
-
- return ipipeif->config.decimation;
-}
-
-int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
-
- return ipipeif->config.rsz;
-}
-
-#define RD_DATA_15_2 0x7
-
-/*
- * ipipeif_hw_setup() - This function sets up IPIPEIF
- * @sd: pointer to v4l2 subdev structure
- * return -EINVAL or zero on success
- */
-static int ipipeif_hw_setup(struct v4l2_subdev *sd)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *informat, *outformat;
- struct ipipeif_params params = ipipeif->config;
- enum ipipeif_input_source ipipeif_source;
- u32 isif_port_if;
- void __iomem *ipipeif_base_addr;
- unsigned long val;
- int data_shift;
- int pack_mode;
- int source1;
- int tmp;
-
- ipipeif_base_addr = ipipeif->ipipeif_base_addr;
-
- /* Enable clock to IPIPEIF and IPIPE */
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
-
- informat = &ipipeif->formats[IPIPEIF_PAD_SINK];
- outformat = &ipipeif->formats[IPIPEIF_PAD_SOURCE];
-
- /* Combine all the fields to make CFG1 register of IPIPEIF */
- tmp = val = get_oneshot_mode(ipipeif->input);
- if (tmp < 0) {
- dev_err(&sd->devnode->dev, "ipipeif: links setup required");
- return -EINVAL;
- }
- val <<= ONESHOT_SHIFT;
-
- ipipeif_source = ipipeif_get_source(ipipeif);
- val |= ipipeif_source << INPSRC_SHIFT;
-
- val |= params.clock_select << CLKSEL_SHIFT;
- val |= params.avg_filter << AVGFILT_SHIFT;
- val |= params.decimation << DECIM_SHIFT;
-
- pack_mode = ipipeif_get_pack_mode(informat->code);
- val |= pack_mode << PACK8IN_SHIFT;
-
- source1 = ipipeif_get_cfg_src1(ipipeif);
- val |= source1 << INPSRC1_SHIFT;
-
- data_shift = ipipeif_get_data_shift(ipipeif);
- if (ipipeif_source != IPIPEIF_SDRAM_YUV)
- val |= data_shift << DATASFT_SHIFT;
- else
- val &= ~(RD_DATA_15_2 << DATASFT_SHIFT);
-
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG1);
-
- switch (ipipeif_source) {
- case IPIPEIF_CCDC:
- ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
- break;
-
- case IPIPEIF_SDRAM_RAW:
- case IPIPEIF_CCDC_DARKFM:
- ipipeif_write(ipipeif->gain, ipipeif_base_addr, IPIPEIF_GAIN);
- /* fall through */
- case IPIPEIF_SDRAM_YUV:
- val |= data_shift << DATASFT_SHIFT;
- ipipeif_write(params.ppln, ipipeif_base_addr, IPIPEIF_PPLN);
- ipipeif_write(params.lpfr, ipipeif_base_addr, IPIPEIF_LPFR);
- ipipeif_write(informat->width, ipipeif_base_addr, IPIPEIF_HNUM);
- ipipeif_write(informat->height,
- ipipeif_base_addr, IPIPEIF_VNUM);
- break;
-
- default:
- return -EINVAL;
- }
-
- /*check if decimation is enable or not */
- if (params.decimation)
- ipipeif_write(params.rsz, ipipeif_base_addr, IPIPEIF_RSZ);
-
- /* Setup sync alignment and initial rsz position */
- val = params.if_5_1.align_sync & 1;
- val <<= IPIPEIF_INIRSZ_ALNSYNC_SHIFT;
- val |= params.if_5_1.rsz_start & IPIPEIF_INIRSZ_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_INIRSZ);
- isif_port_if = informat->code;
-
- if (isif_port_if == MEDIA_BUS_FMT_Y8_1X8)
- isif_port_if = MEDIA_BUS_FMT_YUYV8_1X16;
- else if (isif_port_if == MEDIA_BUS_FMT_UV8_1X8)
- isif_port_if = MEDIA_BUS_FMT_SGRBG12_1X12;
-
- /* Enable DPCM decompression */
- switch (ipipeif_source) {
- case IPIPEIF_SDRAM_RAW:
- val = 0;
- if (outformat->code == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) {
- val = 1;
- val |= (IPIPEIF_DPCM_8BIT_10BIT & 1) <<
- IPIPEIF_DPCM_BITS_SHIFT;
- val |= (ipipeif->dpcm_predictor & 1) <<
- IPIPEIF_DPCM_PRED_SHIFT;
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DPCM);
-
- /* set DPC */
- ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
-
- ipipeif_write(params.if_5_1.clip,
- ipipeif_base_addr, IPIPEIF_OCLIP);
-
- /* fall through for SDRAM YUV mode */
- /* configure CFG2 */
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CFG2);
- switch (isif_port_if) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
- set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
-
- default:
- clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
- clear_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
- }
- /* fall through */
-
- case IPIPEIF_SDRAM_YUV:
- /* Set clock divider */
- if (params.clock_select == IPIPEIF_SDRAM_CLK) {
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_CLKDIV);
- val |= (params.if_5_1.clk_div.m - 1) <<
- IPIPEIF_CLKDIV_M_SHIFT;
- val |= (params.if_5_1.clk_div.n - 1);
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CLKDIV);
- }
- break;
-
- case IPIPEIF_CCDC:
- case IPIPEIF_CCDC_DARKFM:
- /* set DPC */
- ipipeif_config_dpc(ipipeif_base_addr, &params.if_5_1.dpc);
-
- /* Set DF gain & threshold control */
- val = 0;
- if (params.if_5_1.df_gain_en) {
- val = params.if_5_1.df_gain_thr &
- IPIPEIF_DF_GAIN_THR_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGTH);
- val = (params.if_5_1.df_gain_en & 1) <<
- IPIPEIF_DF_GAIN_EN_SHIFT;
- val |= params.if_5_1.df_gain &
- IPIPEIF_DF_GAIN_MASK;
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_DFSGVL);
- /* configure CFG2 */
- val = VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_HDPOL_SHIFT;
- val |= VPFE_PINPOL_POSITIVE << IPIPEIF_CFG2_VDPOL_SHIFT;
-
- switch (isif_port_if) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- clear_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
- set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
- break;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- set_bit(IPIPEIF_CFG2_YUV8_SHIFT, &val);
- set_bit(IPIPEIF_CFG2_YUV16_SHIFT, &val);
- val |= IPIPEIF_CBCR_Y << IPIPEIF_CFG2_YUV8P_SHIFT;
- break;
-
- default:
- /* Bayer */
- ipipeif_write(params.if_5_1.clip, ipipeif_base_addr,
- IPIPEIF_OCLIP);
- }
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-ipipeif_set_config(struct v4l2_subdev *sd, struct ipipeif_params *config)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct device *dev = ipipeif->subdev.v4l2_dev->dev;
-
- if (!config) {
- dev_err(dev, "Invalid configuration pointer\n");
- return -EINVAL;
- }
-
- ipipeif->config.clock_select = config->clock_select;
- ipipeif->config.ppln = config->ppln;
- ipipeif->config.lpfr = config->lpfr;
- ipipeif->config.rsz = config->rsz;
- ipipeif->config.decimation = config->decimation;
- if (ipipeif->config.decimation &&
- (ipipeif->config.rsz < IPIPEIF_RSZ_MIN ||
- ipipeif->config.rsz > IPIPEIF_RSZ_MAX)) {
- dev_err(dev, "rsz range is %d to %d\n",
- IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
- return -EINVAL;
- }
-
- ipipeif->config.avg_filter = config->avg_filter;
-
- ipipeif->config.if_5_1.df_gain_thr = config->if_5_1.df_gain_thr;
- ipipeif->config.if_5_1.df_gain = config->if_5_1.df_gain;
- ipipeif->config.if_5_1.df_gain_en = config->if_5_1.df_gain_en;
-
- ipipeif->config.if_5_1.rsz_start = config->if_5_1.rsz_start;
- ipipeif->config.if_5_1.align_sync = config->if_5_1.align_sync;
- ipipeif->config.if_5_1.clip = config->if_5_1.clip;
-
- ipipeif->config.if_5_1.dpc.en = config->if_5_1.dpc.en;
- ipipeif->config.if_5_1.dpc.thr = config->if_5_1.dpc.thr;
-
- ipipeif->config.if_5_1.clk_div.m = config->if_5_1.clk_div.m;
- ipipeif->config.if_5_1.clk_div.n = config->if_5_1.clk_div.n;
-
- return 0;
-}
-
-static int
-ipipeif_get_config(struct v4l2_subdev *sd, void *arg)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct ipipeif_params *config = arg;
- struct device *dev = ipipeif->subdev.v4l2_dev->dev;
-
- if (!arg) {
- dev_err(dev, "Invalid configuration pointer\n");
- return -EINVAL;
- }
-
- config->clock_select = ipipeif->config.clock_select;
- config->ppln = ipipeif->config.ppln;
- config->lpfr = ipipeif->config.lpfr;
- config->rsz = ipipeif->config.rsz;
- config->decimation = ipipeif->config.decimation;
- config->avg_filter = ipipeif->config.avg_filter;
-
- config->if_5_1.df_gain_thr = ipipeif->config.if_5_1.df_gain_thr;
- config->if_5_1.df_gain = ipipeif->config.if_5_1.df_gain;
- config->if_5_1.df_gain_en = ipipeif->config.if_5_1.df_gain_en;
-
- config->if_5_1.rsz_start = ipipeif->config.if_5_1.rsz_start;
- config->if_5_1.align_sync = ipipeif->config.if_5_1.align_sync;
- config->if_5_1.clip = ipipeif->config.if_5_1.clip;
-
- config->if_5_1.dpc.en = ipipeif->config.if_5_1.dpc.en;
- config->if_5_1.dpc.thr = ipipeif->config.if_5_1.dpc.thr;
-
- config->if_5_1.clk_div.m = ipipeif->config.if_5_1.clk_div.m;
- config->if_5_1.clk_div.n = ipipeif->config.if_5_1.clk_div.n;
-
- return 0;
-}
-
-/*
- * ipipeif_ioctl() - Handle ipipeif module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long ipipeif_ioctl(struct v4l2_subdev *sd,
- unsigned int cmd, void *arg)
-{
- struct ipipeif_params *config = arg;
- int ret = -ENOIOCTLCMD;
-
- switch (cmd) {
- case VIDIOC_VPFE_IPIPEIF_S_CONFIG:
- ret = ipipeif_set_config(sd, config);
- break;
-
- case VIDIOC_VPFE_IPIPEIF_G_CONFIG:
- ret = ipipeif_get_config(sd, arg);
- break;
- }
- return ret;
-}
-
-/*
- * ipipeif_s_ctrl() - Handle set control subdev method
- * @ctrl: pointer to v4l2 control structure
- */
-static int ipipeif_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_ipipeif_device *ipipeif =
- container_of(ctrl->handler, struct vpfe_ipipeif_device, ctrls);
-
- switch (ctrl->id) {
- case VPFE_CID_DPCM_PREDICTOR:
- ipipeif->dpcm_predictor = ctrl->val;
- break;
-
- case V4L2_CID_GAIN:
- ipipeif->gain = ctrl->val;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-#define ENABLE_IPIPEIF 0x1
-
-void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- void __iomem *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
- unsigned char val;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return;
-
- do {
- val = ipipeif_read(ipipeif_base_addr, IPIPEIF_ENABLE);
- } while (val & 0x1);
-
- ipipeif_write(ENABLE_IPIPEIF, ipipeif_base_addr, IPIPEIF_ENABLE);
-}
-
-/*
- * ipipeif_set_stream() - Enable/Disable streaming on ipipeif subdev
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
- int ret = 0;
-
- if (!enable)
- return ret;
-
- ret = ipipeif_hw_setup(sd);
- if (!ret)
- vpfe_ipipeif_enable(vpfe_dev);
-
- return ret;
-}
-
-/*
- * ipipeif_enum_mbus_code() - Handle pixel format enumeration
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- * return -EINVAL or zero on success
- */
-static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case IPIPEIF_PAD_SINK:
- if (code->index >= ARRAY_SIZE(ipipeif_input_fmts))
- return -EINVAL;
-
- code->code = ipipeif_input_fmts[code->index];
- break;
-
- case IPIPEIF_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(ipipeif_output_fmts))
- return -EINVAL;
-
- code->code = ipipeif_output_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * ipipeif_get_format() - Handle get format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- */
-static int
-ipipeif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- fmt->format = ipipeif->formats[fmt->pad];
- else
- fmt->format = *(v4l2_subdev_get_try_format(sd, cfg, fmt->pad));
-
- return 0;
-}
-
-#define MIN_OUT_WIDTH 32
-#define MIN_OUT_HEIGHT 32
-
-/*
- * ipipeif_try_format() - Handle try format by pad subdev method
- * @ipipeif: VPFE ipipeif device.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which : wanted subdev format
- */
-static void
-ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- if (pad == IPIPEIF_PAD_SINK) {
- for (i = 0; i < ARRAY_SIZE(ipipeif_input_fmts); i++)
- if (fmt->code == ipipeif_input_fmts[i])
- break;
-
- /* If not found, use SBGGR10 as default */
- if (i >= ARRAY_SIZE(ipipeif_input_fmts))
- fmt->code = MEDIA_BUS_FMT_SGRBG12_1X12;
- } else if (pad == IPIPEIF_PAD_SOURCE) {
- for (i = 0; i < ARRAY_SIZE(ipipeif_output_fmts); i++)
- if (fmt->code == ipipeif_output_fmts[i])
- break;
-
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(ipipeif_output_fmts))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_HEIGHT, max_out_width);
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_WIDTH, max_out_height);
-}
-
-static int
-ipipeif_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- ipipeif_try_format(ipipeif, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * __ipipeif_get_format() - helper function for getting ipipeif format
- * @ipipeif: pointer to ipipeif private structure.
- * @cfg: V4L2 subdev pad config
- * @pad: pad number.
- * @which: wanted subdev format.
- *
- */
-static struct v4l2_mbus_framefmt *
-__ipipeif_get_format(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, cfg, pad);
-
- return &ipipeif->formats[pad];
-}
-
-/*
- * ipipeif_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int
-ipipeif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __ipipeif_get_format(ipipeif, cfg, fmt->pad, fmt->which);
- if (!format)
- return -EINVAL;
-
- ipipeif_try_format(ipipeif, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == IPIPEIF_PAD_SINK &&
- ipipeif->input != IPIPEIF_INPUT_NONE)
- ipipeif->formats[fmt->pad] = fmt->format;
- else if (fmt->pad == IPIPEIF_PAD_SOURCE &&
- ipipeif->output != IPIPEIF_OUTPUT_NONE)
- ipipeif->formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static void ipipeif_set_default_config(struct vpfe_ipipeif_device *ipipeif)
-{
-#define WIDTH_I 640
-#define HEIGHT_I 480
-
- const struct ipipeif_params ipipeif_defaults = {
- .clock_select = IPIPEIF_SDRAM_CLK,
- .ppln = WIDTH_I + 8,
- .lpfr = HEIGHT_I + 10,
- .rsz = 16, /* resize ratio 16/rsz */
- .decimation = IPIPEIF_DECIMATION_OFF,
- .avg_filter = IPIPEIF_AVG_OFF,
- .if_5_1 = {
- .clk_div = {
- .m = 1, /* clock = sdram clock * (m/n) */
- .n = 6
- },
- .clip = 4095,
- },
- };
- memcpy(&ipipeif->config, &ipipeif_defaults,
- sizeof(struct ipipeif_params));
-}
-
-/*
- * ipipeif_init_formats() - Initialize formats on all pads
- * @sd: VPFE ipipeif V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-ipipeif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPEIF_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = IPIPEIF_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- ipipeif_set_format(sd, fh->pad, &format);
-
- ipipeif_set_default_config(ipipeif);
-
- return 0;
-}
-
-/*
- * ipipeif_video_in_queue() - ipipeif video in queue
- * @vpfe_dev: vpfe device pointer
- * @addr: buffer address
- */
-static int
-ipipeif_video_in_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
-{
- struct vpfe_ipipeif_device *ipipeif = &vpfe_dev->vpfe_ipipeif;
- void __iomem *ipipeif_base_addr = ipipeif->ipipeif_base_addr;
- unsigned int adofs;
- u32 val;
-
- if (ipipeif->input != IPIPEIF_INPUT_MEMORY)
- return -EINVAL;
-
- switch (ipipeif->formats[IPIPEIF_PAD_SINK].code) {
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_UV8_1X8:
- case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
- adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width;
- break;
-
- default:
- adofs = ipipeif->formats[IPIPEIF_PAD_SINK].width << 1;
- break;
- }
-
- /* adjust the line len to be a multiple of 32 */
- adofs += 31;
- adofs &= ~0x1f;
- val = (adofs >> 5) & IPIPEIF_ADOFS_LSB_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADOFS);
-
- /* lower sixteen bit */
- val = (addr >> IPIPEIF_ADDRL_SHIFT) & IPIPEIF_ADDRL_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRL);
-
- /* upper next seven bit */
- val = (addr >> IPIPEIF_ADDRU_SHIFT) & IPIPEIF_ADDRU_MASK;
- ipipeif_write(val, ipipeif_base_addr, IPIPEIF_ADDRU);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops ipipeif_v4l2_core_ops = {
- .ioctl = ipipeif_ioctl,
-};
-
-static const struct v4l2_ctrl_ops ipipeif_ctrl_ops = {
- .s_ctrl = ipipeif_s_ctrl,
-};
-
-static const struct v4l2_ctrl_config vpfe_ipipeif_dpcm_pred = {
- .ops = &ipipeif_ctrl_ops,
- .id = VPFE_CID_DPCM_PREDICTOR,
- .name = "DPCM Predictor",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
- .open = ipipeif_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
- .s_stream = ipipeif_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
- .enum_mbus_code = ipipeif_enum_mbus_code,
- .enum_frame_size = ipipeif_enum_frame_size,
- .get_fmt = ipipeif_get_format,
- .set_fmt = ipipeif_set_format,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
- .core = &ipipeif_v4l2_core_ops,
- .video = &ipipeif_v4l2_video_ops,
- .pad = &ipipeif_v4l2_pad_ops,
-};
-
-static const struct vpfe_video_operations video_in_ops = {
- .queue = ipipeif_video_in_queue,
-};
-
-static int
-ipipeif_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe = to_vpfe_device(ipipeif);
- unsigned int index = local->index;
-
- /* FIXME: this is actually a hack! */
- if (is_media_entity_v4l2_subdev(remote->entity))
- index |= 2 << 16;
-
- switch (index) {
- case IPIPEIF_PAD_SINK:
- /* Single shot mode */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->input = IPIPEIF_INPUT_NONE;
- break;
- }
- ipipeif->input = IPIPEIF_INPUT_MEMORY;
- break;
-
- case IPIPEIF_PAD_SINK | 2 << 16:
- /* read from isif */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->input = IPIPEIF_INPUT_NONE;
- break;
- }
- if (ipipeif->input != IPIPEIF_INPUT_NONE)
- return -EBUSY;
-
- ipipeif->input = IPIPEIF_INPUT_ISIF;
- break;
-
- case IPIPEIF_PAD_SOURCE | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ipipeif->output = IPIPEIF_OUTPUT_NONE;
- break;
- }
- if (remote->entity == &vpfe->vpfe_ipipe.subdev.entity)
- /* connencted to ipipe */
- ipipeif->output = IPIPEIF_OUTPUT_IPIPE;
- else if (remote->entity == &vpfe->vpfe_resizer.crop_resizer.subdev.entity)
- /* connected to resizer */
- ipipeif->output = IPIPEIF_OUTPUT_RESIZER;
- else
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations ipipeif_media_ops = {
- .link_setup = ipipeif_link_setup,
-};
-
-/*
- * vpfe_ipipeif_unregister_entities() - Unregister entity
- * @ipipeif - pointer to ipipeif subdevice structure.
- */
-void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif)
-{
- /* unregister video device */
- vpfe_video_unregister(&ipipeif->video_in);
-
- /* unregister subdev */
- v4l2_device_unregister_subdev(&ipipeif->subdev);
- /* cleanup entity */
- media_entity_cleanup(&ipipeif->subdev.entity);
-}
-
-int
-vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(ipipeif);
- unsigned int flags;
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
- if (ret < 0)
- return ret;
-
- ret = vpfe_video_register(&ipipeif->video_in, vdev);
- if (ret) {
- pr_err("Failed to register ipipeif video-in device\n");
- goto fail;
- }
- ipipeif->video_in.vpfe_dev = vpfe_dev;
-
- flags = 0;
- ret = media_create_pad_link(&ipipeif->video_in.video_dev.entity, 0,
- &ipipeif->subdev.entity, 0, flags);
- if (ret < 0)
- goto fail;
-
- return 0;
-fail:
- v4l2_device_unregister_subdev(&ipipeif->subdev);
-
- return ret;
-}
-
-#define IPIPEIF_GAIN_HIGH 0x3ff
-#define IPIPEIF_DEFAULT_GAIN 0x200
-
-int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &ipipeif->subdev;
- struct media_pad *pads = &ipipeif->pads[0];
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (!res)
- return -ENOENT;
-
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res)
- return -EBUSY;
-
- ipipeif->ipipeif_base_addr = ioremap_nocache(res->start, res_len);
- if (!ipipeif->ipipeif_base_addr) {
- ret = -EBUSY;
- goto fail;
- }
-
- v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
-
- sd->internal_ops = &ipipeif_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI IPIPEIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
-
- v4l2_set_subdevdata(sd, ipipeif);
-
- sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[IPIPEIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ipipeif->input = IPIPEIF_INPUT_NONE;
- ipipeif->output = IPIPEIF_OUTPUT_NONE;
- me->ops = &ipipeif_media_ops;
-
- ret = media_entity_pads_init(me, IPIPEIF_NUM_PADS, pads);
- if (ret)
- goto fail;
-
- v4l2_ctrl_handler_init(&ipipeif->ctrls, 2);
- v4l2_ctrl_new_std(&ipipeif->ctrls, &ipipeif_ctrl_ops,
- V4L2_CID_GAIN, 0,
- IPIPEIF_GAIN_HIGH, 1, IPIPEIF_DEFAULT_GAIN);
- v4l2_ctrl_new_custom(&ipipeif->ctrls, &vpfe_ipipeif_dpcm_pred, NULL);
- v4l2_ctrl_handler_setup(&ipipeif->ctrls);
- sd->ctrl_handler = &ipipeif->ctrls;
-
- ipipeif->video_in.ops = &video_in_ops;
- ipipeif->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = vpfe_video_init(&ipipeif->video_in, "IPIPEIF");
- if (ret) {
- pr_err("Failed to init IPIPEIF video-in device\n");
- goto fail;
- }
- ipipeif_set_default_config(ipipeif);
- return 0;
-fail:
- release_mem_region(res->start, res_len);
- return ret;
-}
-
-void
-vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- v4l2_ctrl_handler_free(&ipipeif->ctrls);
- iounmap(ipipeif->ipipeif_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
deleted file mode 100644
index 4d126fc871f3..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_H
-#define _DAVINCI_VPFE_DM365_IPIPEIF_H
-
-#include <linux/platform_device.h>
-
-#include <media/davinci/vpss.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#include "dm365_ipipeif_user.h"
-#include "vpfe_video.h"
-
-/* IPIPE base specific types */
-enum ipipeif_data_shift {
- IPIPEIF_BITS15_2 = 0,
- IPIPEIF_BITS14_1 = 1,
- IPIPEIF_BITS13_0 = 2,
- IPIPEIF_BITS12_0 = 3,
- IPIPEIF_BITS11_0 = 4,
- IPIPEIF_BITS10_0 = 5,
- IPIPEIF_BITS9_0 = 6,
-};
-
-enum ipipeif_clkdiv {
- IPIPEIF_DIVIDE_HALF = 0,
- IPIPEIF_DIVIDE_THIRD = 1,
- IPIPEIF_DIVIDE_FOURTH = 2,
- IPIPEIF_DIVIDE_FIFTH = 3,
- IPIPEIF_DIVIDE_SIXTH = 4,
- IPIPEIF_DIVIDE_EIGHTH = 5,
- IPIPEIF_DIVIDE_SIXTEENTH = 6,
- IPIPEIF_DIVIDE_THIRTY = 7,
-};
-
-enum ipipeif_pack_mode {
- IPIPEIF_PACK_16_BIT = 0,
- IPIPEIF_PACK_8_BIT = 1,
-};
-
-enum ipipeif_5_1_pack_mode {
- IPIPEIF_5_1_PACK_16_BIT = 0,
- IPIPEIF_5_1_PACK_8_BIT = 1,
- IPIPEIF_5_1_PACK_8_BIT_A_LAW = 2,
- IPIPEIF_5_1_PACK_12_BIT = 3
-};
-
-enum ipipeif_input_source {
- IPIPEIF_CCDC = 0,
- IPIPEIF_SDRAM_RAW = 1,
- IPIPEIF_CCDC_DARKFM = 2,
- IPIPEIF_SDRAM_YUV = 3,
-};
-
-enum ipipeif_ialaw {
- IPIPEIF_ALAW_OFF = 0,
- IPIPEIF_ALAW_ON = 1,
-};
-
-enum ipipeif_input_src1 {
- IPIPEIF_SRC1_PARALLEL_PORT = 0,
- IPIPEIF_SRC1_SDRAM_RAW = 1,
- IPIPEIF_SRC1_ISIF_DARKFM = 2,
- IPIPEIF_SRC1_SDRAM_YUV = 3,
-};
-
-enum ipipeif_dfs_dir {
- IPIPEIF_PORT_MINUS_SDRAM = 0,
- IPIPEIF_SDRAM_MINUS_PORT = 1,
-};
-
-enum ipipeif_chroma_phase {
- IPIPEIF_CBCR_Y = 0,
- IPIPEIF_Y_CBCR = 1,
-};
-
-enum ipipeif_dpcm_type {
- IPIPEIF_DPCM_8BIT_10BIT = 0,
- IPIPEIF_DPCM_8BIT_12BIT = 1,
-};
-
-/* data shift for IPIPE 5.1 */
-enum ipipeif_5_1_data_shift {
- IPIPEIF_5_1_BITS11_0 = 0,
- IPIPEIF_5_1_BITS10_0 = 1,
- IPIPEIF_5_1_BITS9_0 = 2,
- IPIPEIF_5_1_BITS8_0 = 3,
- IPIPEIF_5_1_BITS7_0 = 4,
- IPIPEIF_5_1_BITS15_4 = 5,
-};
-
-#define IPIPEIF_PAD_SINK 0
-#define IPIPEIF_PAD_SOURCE 1
-
-#define IPIPEIF_NUM_PADS 2
-
-enum ipipeif_input_entity {
- IPIPEIF_INPUT_NONE = 0,
- IPIPEIF_INPUT_ISIF = 1,
- IPIPEIF_INPUT_MEMORY = 2,
-};
-
-enum ipipeif_output_entity {
- IPIPEIF_OUTPUT_NONE = 0,
- IPIPEIF_OUTPUT_IPIPE = 1,
- IPIPEIF_OUTPUT_RESIZER = 2,
-};
-
-struct vpfe_ipipeif_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[IPIPEIF_NUM_PADS];
- struct v4l2_mbus_framefmt formats[IPIPEIF_NUM_PADS];
- enum ipipeif_input_entity input;
- unsigned int output;
- struct vpfe_video_device video_in;
- struct v4l2_ctrl_handler ctrls;
- void __iomem *ipipeif_base_addr;
- struct ipipeif_params config;
- int dpcm_predictor;
- int gain;
-};
-
-/* IPIPEIF Register Offsets from the base address */
-#define IPIPEIF_ENABLE 0x00
-#define IPIPEIF_CFG1 0x04
-#define IPIPEIF_PPLN 0x08
-#define IPIPEIF_LPFR 0x0c
-#define IPIPEIF_HNUM 0x10
-#define IPIPEIF_VNUM 0x14
-#define IPIPEIF_ADDRU 0x18
-#define IPIPEIF_ADDRL 0x1c
-#define IPIPEIF_ADOFS 0x20
-#define IPIPEIF_RSZ 0x24
-#define IPIPEIF_GAIN 0x28
-
-/* Below registers are available only on IPIPE 5.1 */
-#define IPIPEIF_DPCM 0x2c
-#define IPIPEIF_CFG2 0x30
-#define IPIPEIF_INIRSZ 0x34
-#define IPIPEIF_OCLIP 0x38
-#define IPIPEIF_DTUDF 0x3c
-#define IPIPEIF_CLKDIV 0x40
-#define IPIPEIF_DPC1 0x44
-#define IPIPEIF_DPC2 0x48
-#define IPIPEIF_DFSGVL 0x4c
-#define IPIPEIF_DFSGTH 0x50
-#define IPIPEIF_RSZ3A 0x54
-#define IPIPEIF_INIRSZ3A 0x58
-#define IPIPEIF_RSZ_MIN 16
-#define IPIPEIF_RSZ_MAX 112
-#define IPIPEIF_RSZ_CONST 16
-
-#define IPIPEIF_ADOFS_LSB_MASK 0x1ff
-#define IPIPEIF_ADOFS_LSB_SHIFT 5
-#define IPIPEIF_ADOFS_MSB_MASK 0x200
-#define IPIPEIF_ADDRU_MASK 0x7ff
-#define IPIPEIF_ADDRL_SHIFT 5
-#define IPIPEIF_ADDRL_MASK 0xffff
-#define IPIPEIF_ADDRU_SHIFT 21
-#define IPIPEIF_ADDRMSB_SHIFT 31
-#define IPIPEIF_ADDRMSB_LEFT_SHIFT 10
-
-/* CFG1 Masks and shifts */
-#define ONESHOT_SHIFT 0
-#define DECIM_SHIFT 1
-#define INPSRC_SHIFT 2
-#define CLKDIV_SHIFT 4
-#define AVGFILT_SHIFT 7
-#define PACK8IN_SHIFT 8
-#define IALAW_SHIFT 9
-#define CLKSEL_SHIFT 10
-#define DATASFT_SHIFT 11
-#define INPSRC1_SHIFT 14
-
-/* DPC2 */
-#define IPIPEIF_DPC2_EN_SHIFT 12
-#define IPIPEIF_DPC2_THR_MASK 0xfff
-/* Applicable for IPIPE 5.1 */
-#define IPIPEIF_DF_GAIN_EN_SHIFT 10
-#define IPIPEIF_DF_GAIN_MASK 0x3ff
-#define IPIPEIF_DF_GAIN_THR_MASK 0xfff
-/* DPCM */
-#define IPIPEIF_DPCM_BITS_SHIFT 2
-#define IPIPEIF_DPCM_PRED_SHIFT 1
-/* CFG2 */
-#define IPIPEIF_CFG2_HDPOL_SHIFT 1
-#define IPIPEIF_CFG2_VDPOL_SHIFT 2
-#define IPIPEIF_CFG2_YUV8_SHIFT 6
-#define IPIPEIF_CFG2_YUV16_SHIFT 3
-#define IPIPEIF_CFG2_YUV8P_SHIFT 7
-
-/* INIRSZ */
-#define IPIPEIF_INIRSZ_ALNSYNC_SHIFT 13
-#define IPIPEIF_INIRSZ_MASK 0x1fff
-
-/* CLKDIV */
-#define IPIPEIF_CLKDIV_M_SHIFT 8
-
-void vpfe_ipipeif_enable(struct vpfe_device *vpfe_dev);
-void vpfe_ipipeif_ss_buffer_isr(struct vpfe_ipipeif_device *ipipeif);
-int vpfe_ipipeif_decimation_enabled(struct vpfe_device *vpfe_dev);
-int vpfe_ipipeif_get_rsz(struct vpfe_device *vpfe_dev);
-void vpfe_ipipeif_cleanup(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev);
-int vpfe_ipipeif_init(struct vpfe_ipipeif_device *ipipeif,
- struct platform_device *pdev);
-int vpfe_ipipeif_register_entities(struct vpfe_ipipeif_device *ipipeif,
- struct v4l2_device *vdev);
-void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif);
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h b/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
deleted file mode 100644
index 046dbdec67d8..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif_user.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
-#define _DAVINCI_VPFE_DM365_IPIPEIF_USER_H
-
-/* clockdiv for IPIPE 5.1 */
-struct ipipeif_5_1_clkdiv {
- unsigned char m;
- unsigned char n;
-};
-
-enum ipipeif_decimation {
- IPIPEIF_DECIMATION_OFF,
- IPIPEIF_DECIMATION_ON
-};
-
-/* DPC at the if for IPIPE 5.1 */
-struct ipipeif_dpc {
- /* 0 - disable, 1 - enable */
- unsigned char en;
- /* threshold */
- unsigned short thr;
-};
-
-enum ipipeif_clock {
- IPIPEIF_PIXCEL_CLK,
- IPIPEIF_SDRAM_CLK
-};
-
-enum ipipeif_avg_filter {
- IPIPEIF_AVG_OFF,
- IPIPEIF_AVG_ON
-};
-
-struct ipipeif_5_1 {
- struct ipipeif_5_1_clkdiv clk_div;
- /* Defect pixel correction */
- struct ipipeif_dpc dpc;
- /* clipped to this value */
- unsigned short clip;
- /* Align HSync and VSync to rsz_start */
- unsigned char align_sync;
- /* resizer start position */
- unsigned int rsz_start;
- /* DF gain enable */
- unsigned char df_gain_en;
- /* DF gain value */
- unsigned short df_gain;
- /* DF gain threshold value */
- unsigned short df_gain_thr;
-};
-
-struct ipipeif_params {
- enum ipipeif_clock clock_select;
- unsigned int ppln;
- unsigned int lpfr;
- unsigned char rsz;
- enum ipipeif_decimation decimation;
- enum ipipeif_avg_filter avg_filter;
- /* IPIPE 5.1 */
- struct ipipeif_5_1 if_5_1;
-};
-
-/*
- * Private IOCTL
- * VIDIOC_VPFE_IPIPEIF_S_CONFIG: Set IPIEIF configuration
- * VIDIOC_VPFE_IPIPEIF_G_CONFIG: Get IPIEIF configuration
- */
-#define VIDIOC_VPFE_IPIPEIF_S_CONFIG \
- _IOWR('I', BASE_VIDIOC_PRIVATE + 1, struct ipipeif_params)
-#define VIDIOC_VPFE_IPIPEIF_G_CONFIG \
- _IOWR('I', BASE_VIDIOC_PRIVATE + 2, struct ipipeif_params)
-
-#endif /* _DAVINCI_VPFE_DM365_IPIPEIF_USER_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
deleted file mode 100644
index 05a997f7aa5d..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ /dev/null
@@ -1,2097 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include <linux/delay.h>
-#include "dm365_isif.h"
-#include "vpfe_mc_capture.h"
-
-#define MAX_WIDTH 4096
-#define MAX_HEIGHT 4096
-
-static const unsigned int isif_fmts[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YUYV8_1X16,
- MEDIA_BUS_FMT_YUYV10_1X20,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
- MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
-};
-
-#define ISIF_COLPTN_R_Ye 0x0
-#define ISIF_COLPTN_Gr_Cy 0x1
-#define ISIF_COLPTN_Gb_G 0x2
-#define ISIF_COLPTN_B_Mg 0x3
-
-#define ISIF_CCOLP_CP01_0 0
-#define ISIF_CCOLP_CP03_2 2
-#define ISIF_CCOLP_CP05_4 4
-#define ISIF_CCOLP_CP07_6 6
-#define ISIF_CCOLP_CP11_0 8
-#define ISIF_CCOLP_CP13_2 10
-#define ISIF_CCOLP_CP15_4 12
-#define ISIF_CCOLP_CP17_6 14
-
-static const u32 isif_sgrbg_pattern =
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP01_0 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP03_2 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP05_4 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP07_6 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP11_0 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP13_2 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP15_4 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP17_6;
-
-static const u32 isif_srggb_pattern =
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP01_0 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP03_2 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP05_4 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP07_6 |
- ISIF_COLPTN_R_Ye << ISIF_CCOLP_CP11_0 |
- ISIF_COLPTN_Gr_Cy << ISIF_CCOLP_CP13_2 |
- ISIF_COLPTN_Gb_G << ISIF_CCOLP_CP15_4 |
- ISIF_COLPTN_B_Mg << ISIF_CCOLP_CP17_6;
-
-static inline u32 isif_read(void __iomem *base_addr, u32 offset)
-{
- return readl(base_addr + offset);
-}
-
-static inline void isif_write(void __iomem *base_addr, u32 val, u32 offset)
-{
- writel(val, base_addr + offset);
-}
-
-static inline u32 isif_merge(void __iomem *base_addr, u32 mask, u32 val,
- u32 offset)
-{
- u32 new_val = (isif_read(base_addr, offset) & ~mask) | (val & mask);
-
- isif_write(base_addr, new_val, offset);
-
- return new_val;
-}
-
-static void isif_enable_output_to_sdram(struct vpfe_isif_device *isif, int en)
-{
- isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_WEN_MASK,
- en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
-}
-
-static inline void
-isif_regw_lin_tbl(struct vpfe_isif_device *isif, u32 val, u32 offset, int i)
-{
- if (!i)
- writel(val, isif->isif_cfg.linear_tbl0_addr + offset);
- else
- writel(val, isif->isif_cfg.linear_tbl1_addr + offset);
-}
-
-static void isif_disable_all_modules(struct vpfe_isif_device *isif)
-{
- /* disable BC */
- isif_write(isif->isif_cfg.base_addr, 0, CLAMPCFG);
- /* disable vdfc */
- isif_write(isif->isif_cfg.base_addr, 0, DFCCTL);
- /* disable CSC */
- isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
- /* disable linearization */
- isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
-}
-
-static void isif_enable(struct vpfe_isif_device *isif, int en)
-{
- if (!en)
- /* Before disable isif, disable all ISIF modules */
- isif_disable_all_modules(isif);
-
- /*
- * wait for next VD. Assume lowest scan rate is 12 Hz. So
- * 100 msec delay is good enough
- */
- msleep(100);
- isif_merge(isif->isif_cfg.base_addr, ISIF_SYNCEN_VDHDEN_MASK,
- en, SYNCEN);
-}
-
-/*
- * ISIF helper functions
- */
-
-#define DM365_ISIF_MDFS_OFFSET 15
-#define DM365_ISIF_MDFS_MASK 0x1
-
-/* get field id in isif hardware */
-enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
- u32 field_status;
-
- field_status = isif_read(isif->isif_cfg.base_addr, MODESET);
- return (field_status >> DM365_ISIF_MDFS_OFFSET) &
- DM365_ISIF_MDFS_MASK;
-}
-
-static int
-isif_set_pixel_format(struct vpfe_isif_device *isif, unsigned int pixfmt)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
- if (pixfmt == V4L2_PIX_FMT_SBGGR16)
- isif->isif_cfg.data_pack = ISIF_PACK_16BIT;
- else if ((pixfmt == V4L2_PIX_FMT_SGRBG10DPCM8) ||
- (pixfmt == V4L2_PIX_FMT_SGRBG10ALAW8))
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
- else
- return -EINVAL;
-
- isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
- isif->isif_cfg.bayer.v4l2_pix_fmt = pixfmt;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
- else
- return -EINVAL;
-
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
- isif->isif_cfg.ycbcr.v4l2_pix_fmt = pixfmt;
- }
-
- return 0;
-}
-
-static int
-isif_set_frame_format(struct vpfe_isif_device *isif,
- enum isif_frmfmt frm_fmt)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
- isif->isif_cfg.bayer.frm_fmt = frm_fmt;
- else
- isif->isif_cfg.ycbcr.frm_fmt = frm_fmt;
-
- return 0;
-}
-
-static int isif_set_image_window(struct vpfe_isif_device *isif)
-{
- struct v4l2_rect *win = &isif->crop;
-
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12) {
- isif->isif_cfg.bayer.win.top = win->top;
- isif->isif_cfg.bayer.win.left = win->left;
- isif->isif_cfg.bayer.win.width = win->width;
- isif->isif_cfg.bayer.win.height = win->height;
- return 0;
- }
- isif->isif_cfg.ycbcr.win.top = win->top;
- isif->isif_cfg.ycbcr.win.left = win->left;
- isif->isif_cfg.ycbcr.win.width = win->width;
- isif->isif_cfg.ycbcr.win.height = win->height;
-
- return 0;
-}
-
-static int
-isif_set_buftype(struct vpfe_isif_device *isif, enum isif_buftype buf_type)
-{
- if (isif->formats[ISIF_PAD_SINK].code == MEDIA_BUS_FMT_SGRBG12_1X12)
- isif->isif_cfg.bayer.buf_type = buf_type;
- else
- isif->isif_cfg.ycbcr.buf_type = buf_type;
-
- return 0;
-}
-
-/* configure format in isif hardware */
-static int
-isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
-{
- struct vpfe_isif_device *vpfe_isif = &vpfe_dev->vpfe_isif;
- enum isif_frmfmt frm_fmt = ISIF_FRMFMT_INTERLACED;
- struct v4l2_pix_format format;
- int ret = 0;
-
- v4l2_fill_pix_format(&format, &vpfe_dev->vpfe_isif.formats[pad]);
- mbus_to_pix(&vpfe_dev->vpfe_isif.formats[pad], &format);
-
- if (isif_set_pixel_format(vpfe_isif, format.pixelformat) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Failed to set pixel format in isif\n");
- return -EINVAL;
- }
-
- /* call for s_crop will override these values */
- vpfe_isif->crop.left = 0;
- vpfe_isif->crop.top = 0;
- vpfe_isif->crop.width = format.width;
- vpfe_isif->crop.height = format.height;
-
- /* configure the image window */
- isif_set_image_window(vpfe_isif);
-
- switch (vpfe_dev->vpfe_isif.formats[pad].field) {
- case V4L2_FIELD_INTERLACED:
- /* do nothing, since it is default */
- ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_INTERLEAVED);
- break;
-
- case V4L2_FIELD_NONE:
- frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
- /* buffer type only applicable for interlaced scan */
- break;
-
- case V4L2_FIELD_SEQ_TB:
- ret = isif_set_buftype(vpfe_isif, ISIF_BUFTYPE_FLD_SEPARATED);
- break;
-
- default:
- return -EINVAL;
- }
-
- /* set the frame format */
- if (!ret)
- ret = isif_set_frame_format(vpfe_isif, frm_fmt);
-
- return ret;
-}
-
-/*
- * isif_try_format() - Try video format on a pad
- * @isif: VPFE isif device
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- */
-static void
-isif_try_format(struct vpfe_isif_device *isif,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- unsigned int width = fmt->format.width;
- unsigned int height = fmt->format.height;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(isif_fmts); i++) {
- if (fmt->format.code == isif_fmts[i])
- break;
- }
-
- /* If not found, use YUYV8_2x8 as default */
- if (i >= ARRAY_SIZE(isif_fmts))
- fmt->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
-
- /* Clamp the size. */
- fmt->format.width = clamp_t(u32, width, 32, MAX_WIDTH);
- fmt->format.height = clamp_t(u32, height, 32, MAX_HEIGHT);
-
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- if (fmt->pad == ISIF_PAD_SOURCE)
- fmt->format.width &= ~15;
-}
-
-/*
- * vpfe_isif_buffer_isr() - isif module non-progressive buffer scheduling isr
- * @isif: Pointer to isif subdevice.
- */
-void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- struct vpfe_video_device *video = &isif->video_out;
- enum v4l2_field field;
- int fid;
-
- if (!video->started)
- return;
-
- field = video->fmt.fmt.pix.field;
-
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- if (video->cur_frm != video->next_frm)
- vpfe_video_process_buffer_complete(video);
- return;
- }
-
- /* interlaced or TB capture check which field we
- * are in hardware
- */
- fid = vpfe_isif_get_fid(vpfe_dev);
-
- /* switch the software maintained field id */
- video->field_id ^= 1;
- if (fid == video->field_id) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the current
- * frame and move on
- */
- if (video->cur_frm != video->next_frm)
- vpfe_video_process_buffer_complete(video);
- /*
- * based on whether the two fields are stored
- * interleavely or separately in memory,
- * reconfigure the ISIF memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_video_schedule_bottom_field(video);
- return;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the
- * empty queue if no frame is available hold on
- * to the current buffer
- */
- spin_lock(&video->dma_queue_lock);
- if (!list_empty(&video->dma_queue) &&
- video->cur_frm == video->next_frm)
- vpfe_video_schedule_next_buffer(video);
- spin_unlock(&video->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- video->field_id = fid;
- }
-}
-
-/*
- * vpfe_isif_vidint1_isr() - ISIF module progressive buffer scheduling isr
- * @isif: Pointer to isif subdevice.
- */
-void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif)
-{
- struct vpfe_video_device *video = &isif->video_out;
-
- if (!video->started)
- return;
-
- spin_lock(&video->dma_queue_lock);
- if (video->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&video->dma_queue) && video->cur_frm == video->next_frm)
- vpfe_video_schedule_next_buffer(video);
-
- spin_unlock(&video->dma_queue_lock);
-}
-
-/*
- * VPFE video operations
- */
-
-static int isif_video_queue(struct vpfe_device *vpfe_dev, unsigned long addr)
-{
- struct vpfe_isif_device *isif = &vpfe_dev->vpfe_isif;
-
- isif_write(isif->isif_cfg.base_addr, (addr >> 21) &
- ISIF_CADU_BITS, CADU);
- isif_write(isif->isif_cfg.base_addr, (addr >> 5) &
- ISIF_CADL_BITS, CADL);
-
- return 0;
-}
-
-static const struct vpfe_video_operations isif_video_ops = {
- .queue = isif_video_queue,
-};
-
-/*
- * V4L2 subdev operations
- */
-
-/* Parameter operations */
-static int isif_get_params(struct v4l2_subdev *sd, void *params)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
-
- /* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
- return -EINVAL;
- memcpy(params, &isif->isif_cfg.bayer.config_params,
- sizeof(isif->isif_cfg.bayer.config_params));
- return 0;
-}
-
-static int isif_validate_df_csc_params(const struct vpfe_isif_df_csc *df_csc)
-{
- const struct vpfe_isif_color_space_conv *csc;
- int err = -EINVAL;
- int i;
-
- if (!df_csc->df_or_csc) {
- /* csc configuration */
- csc = &df_csc->csc;
- if (csc->en) {
- for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++)
- if (csc->coeff[i].integer >
- ISIF_CSC_COEF_INTEG_MASK ||
- csc->coeff[i].decimal >
- ISIF_CSC_COEF_DECIMAL_MASK) {
- pr_err("Invalid CSC coefficients\n");
- return err;
- }
- }
- }
- if (df_csc->start_pix > ISIF_DF_CSC_SPH_MASK) {
- pr_err("Invalid df_csc start pix value\n");
- return err;
- }
-
- if (df_csc->num_pixels > ISIF_DF_NUMPIX) {
- pr_err("Invalid df_csc num pixels value\n");
- return err;
- }
-
- if (df_csc->start_line > ISIF_DF_CSC_LNH_MASK) {
- pr_err("Invalid df_csc start_line value\n");
- return err;
- }
-
- if (df_csc->num_lines > ISIF_DF_NUMLINES) {
- pr_err("Invalid df_csc num_lines value\n");
- return err;
- }
-
- return 0;
-}
-
-#define DM365_ISIF_MAX_VDFLSFT 4
-#define DM365_ISIF_MAX_VDFSLV 4095
-#define DM365_ISIF_MAX_DFCMEM0 0x1fff
-#define DM365_ISIF_MAX_DFCMEM1 0x1fff
-
-static int isif_validate_dfc_params(const struct vpfe_isif_dfc *dfc)
-{
- int err = -EINVAL;
- int i;
-
- if (!dfc->en)
- return 0;
-
- if (dfc->corr_whole_line > 1) {
- pr_err("Invalid corr_whole_line value\n");
- return err;
- }
-
- if (dfc->def_level_shift > DM365_ISIF_MAX_VDFLSFT) {
- pr_err("Invalid def_level_shift value\n");
- return err;
- }
-
- if (dfc->def_sat_level > DM365_ISIF_MAX_VDFSLV) {
- pr_err("Invalid def_sat_level value\n");
- return err;
- }
-
- if (!dfc->num_vdefects ||
- dfc->num_vdefects > VPFE_ISIF_VDFC_TABLE_SIZE) {
- pr_err("Invalid num_vdefects value\n");
- return err;
- }
-
- for (i = 0; i < VPFE_ISIF_VDFC_TABLE_SIZE; i++) {
- if (dfc->table[i].pos_vert > DM365_ISIF_MAX_DFCMEM0) {
- pr_err("Invalid pos_vert value\n");
- return err;
- }
- if (dfc->table[i].pos_horz > DM365_ISIF_MAX_DFCMEM1) {
- pr_err("Invalid pos_horz value\n");
- return err;
- }
- }
-
- return 0;
-}
-
-#define DM365_ISIF_MAX_CLVRV 0xfff
-#define DM365_ISIF_MAX_CLDC 0x1fff
-#define DM365_ISIF_MAX_CLHSH 0x1fff
-#define DM365_ISIF_MAX_CLHSV 0x1fff
-#define DM365_ISIF_MAX_CLVSH 0x1fff
-#define DM365_ISIF_MAX_CLVSV 0x1fff
-#define DM365_ISIF_MAX_HEIGHT_BLACK_REGION 0x1fff
-
-static int isif_validate_bclamp_params(const struct vpfe_isif_black_clamp *bclamp)
-{
- int err = -EINVAL;
-
- if (bclamp->dc_offset > DM365_ISIF_MAX_CLDC) {
- pr_err("Invalid bclamp dc_offset value\n");
- return err;
- }
- if (!bclamp->en)
- return 0;
- if (bclamp->horz.clamp_pix_limit > 1) {
- pr_err("Invalid bclamp horz clamp_pix_limit value\n");
- return err;
- }
- if (bclamp->horz.win_count_calc < 1 ||
- bclamp->horz.win_count_calc > 32) {
- pr_err("Invalid bclamp horz win_count_calc value\n");
- return err;
- }
- if (bclamp->horz.win_start_h_calc > DM365_ISIF_MAX_CLHSH) {
- pr_err("Invalid bclamp win_start_v_calc value\n");
- return err;
- }
-
- if (bclamp->horz.win_start_v_calc > DM365_ISIF_MAX_CLHSV) {
- pr_err("Invalid bclamp win_start_v_calc value\n");
- return err;
- }
- if (bclamp->vert.reset_clamp_val > DM365_ISIF_MAX_CLVRV) {
- pr_err("Invalid bclamp reset_clamp_val value\n");
- return err;
- }
- if (bclamp->vert.ob_v_sz_calc > DM365_ISIF_MAX_HEIGHT_BLACK_REGION) {
- pr_err("Invalid bclamp ob_v_sz_calc value\n");
- return err;
- }
- if (bclamp->vert.ob_start_h > DM365_ISIF_MAX_CLVSH) {
- pr_err("Invalid bclamp ob_start_h value\n");
- return err;
- }
- if (bclamp->vert.ob_start_v > DM365_ISIF_MAX_CLVSV) {
- pr_err("Invalid bclamp ob_start_h value\n");
- return err;
- }
- return 0;
-}
-
-static int
-isif_validate_raw_params(const struct vpfe_isif_raw_config *params)
-{
- int ret;
-
- ret = isif_validate_df_csc_params(&params->df_csc);
- if (ret)
- return ret;
- ret = isif_validate_dfc_params(&params->dfc);
- if (ret)
- return ret;
- return isif_validate_bclamp_params(&params->bclamp);
-}
-
-static int isif_set_params(struct v4l2_subdev *sd, const struct vpfe_isif_raw_config *params)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- int ret = -EINVAL;
-
- /* only raw module parameters can be set through the IOCTL */
- if (isif->formats[ISIF_PAD_SINK].code != MEDIA_BUS_FMT_SGRBG12_1X12)
- return ret;
-
- if (!isif_validate_raw_params(params)) {
- memcpy(&isif->isif_cfg.bayer.config_params, params,
- sizeof(*params));
- ret = 0;
- }
- return ret;
-}
-/*
- * isif_ioctl() - isif module private ioctl's
- * @sd: VPFE isif V4L2 subdevice
- * @cmd: ioctl command
- * @arg: ioctl argument
- *
- * Return 0 on success or a negative error code otherwise.
- */
-static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
- return isif_set_params(sd, arg);
-
- case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
- return isif_get_params(sd, arg);
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static void isif_config_gain_offset(struct vpfe_isif_device *isif)
-{
- struct vpfe_isif_gain_offsets_adj *gain_off_ptr =
- &isif->isif_cfg.bayer.config_params.gain_offset;
- void __iomem *base = isif->isif_cfg.base_addr;
- u32 val;
-
- val = ((gain_off_ptr->gain_sdram_en & 1) << GAIN_SDRAM_EN_SHIFT) |
- ((gain_off_ptr->gain_ipipe_en & 1) << GAIN_IPIPE_EN_SHIFT) |
- ((gain_off_ptr->gain_h3a_en & 1) << GAIN_H3A_EN_SHIFT) |
- ((gain_off_ptr->offset_sdram_en & 1) << OFST_SDRAM_EN_SHIFT) |
- ((gain_off_ptr->offset_ipipe_en & 1) << OFST_IPIPE_EN_SHIFT) |
- ((gain_off_ptr->offset_h3a_en & 1) << OFST_H3A_EN_SHIFT);
- isif_merge(base, GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
-
- isif_write(base, isif->isif_cfg.isif_gain_params.cr_gain, CRGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cgr_gain, CGRGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cgb_gain, CGBGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.cb_gain, CBGAIN);
- isif_write(base, isif->isif_cfg.isif_gain_params.offset & OFFSET_MASK,
- COFSTA);
-
-}
-
-static void isif_config_bclamp(struct vpfe_isif_device *isif,
- struct vpfe_isif_black_clamp *bc)
-{
- u32 val;
-
- /**
- * DC Offset is always added to image data irrespective of bc enable
- * status
- */
- val = bc->dc_offset & ISIF_BC_DCOFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLDCOFST);
-
- if (!bc->en)
- return;
-
- val = (bc->bc_mode_color & ISIF_BC_MODE_COLOR_MASK) <<
- ISIF_BC_MODE_COLOR_SHIFT;
-
- /* Enable BC and horizontal clamp calculation parameters */
- val = val | 1 | ((bc->horz.mode & ISIF_HORZ_BC_MODE_MASK) <<
- ISIF_HORZ_BC_MODE_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CLAMPCFG);
-
- if (bc->horz.mode != VPFE_ISIF_HORZ_BC_DISABLE) {
- /*
- * Window count for calculation
- * Base window selection
- * pixel limit
- * Horizontal size of window
- * vertical size of the window
- * Horizontal start position of the window
- * Vertical start position of the window
- */
- val = (bc->horz.win_count_calc & ISIF_HORZ_BC_WIN_COUNT_MASK) |
- ((bc->horz.base_win_sel_calc & 1) <<
- ISIF_HORZ_BC_WIN_SEL_SHIFT) |
- ((bc->horz.clamp_pix_limit & 1) <<
- ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
- ((bc->horz.win_h_sz_calc &
- ISIF_HORZ_BC_WIN_H_SIZE_MASK) <<
- ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
- ((bc->horz.win_v_sz_calc &
- ISIF_HORZ_BC_WIN_V_SIZE_MASK) <<
- ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN0);
-
- val = bc->horz.win_start_h_calc & ISIF_HORZ_BC_WIN_START_H_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN1);
-
- val = bc->horz.win_start_v_calc & ISIF_HORZ_BC_WIN_START_V_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLHWIN2);
- }
-
- /* vertical clamp calculation parameters */
- /* OB H Valid */
- val = bc->vert.ob_h_sz_calc & ISIF_VERT_BC_OB_H_SZ_MASK;
-
- /* Reset clamp value sel for previous line */
- val |= (bc->vert.reset_val_sel & ISIF_VERT_BC_RST_VAL_SEL_MASK) <<
- ISIF_VERT_BC_RST_VAL_SEL_SHIFT;
-
- /* Line average coefficient */
- val |= bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN0);
-
- /* Configured reset value */
- if (bc->vert.reset_val_sel == VPFE_ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL) {
- val = bc->vert.reset_clamp_val & ISIF_VERT_BC_RST_VAL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVRV);
- }
-
- /* Optical Black horizontal start position */
- val = bc->vert.ob_start_h & ISIF_VERT_BC_OB_START_HORZ_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN1);
-
- /* Optical Black vertical start position */
- val = bc->vert.ob_start_v & ISIF_VERT_BC_OB_START_VERT_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN2);
-
- val = bc->vert.ob_v_sz_calc & ISIF_VERT_BC_OB_VERT_SZ_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLVWIN3);
-
- /* Vertical start position for BC subtraction */
- val = bc->vert_start_sub & ISIF_BC_VERT_START_SUB_V_MASK;
- isif_write(isif->isif_cfg.base_addr, val, CLSV);
-}
-
-/* This function will configure the window size to be capture in ISIF reg */
-static void
-isif_setwin(struct vpfe_isif_device *isif, struct v4l2_rect *image_win,
- enum isif_frmfmt frm_fmt, int ppc, int mode)
-{
- int horz_nr_pixels;
- int vert_nr_lines;
- int horz_start;
- int vert_start;
- int mid_img;
-
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- isif_write(isif->isif_cfg.base_addr,
- horz_start & START_PX_HOR_MASK, SPH);
- isif_write(isif->isif_cfg.base_addr,
- horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
- vert_start = image_win->top;
-
- if (frm_fmt == ISIF_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- } else {
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- isif_write(isif->isif_cfg.base_addr, mid_img, VDINT1);
- }
-
- if (!mode)
- isif_write(isif->isif_cfg.base_addr, 0, VDINT0);
- else
- isif_write(isif->isif_cfg.base_addr, vert_nr_lines, VDINT0);
- isif_write(isif->isif_cfg.base_addr,
- vert_start & START_VER_ONE_MASK, SLV0);
- isif_write(isif->isif_cfg.base_addr,
- vert_start & START_VER_TWO_MASK, SLV1);
- isif_write(isif->isif_cfg.base_addr,
- vert_nr_lines & NUM_LINES_VER, LNV);
-}
-
-#define DM365_ISIF_DFCMWR_MEMORY_WRITE 1
-#define DM365_ISIF_DFCMRD_MEMORY_READ 0x2
-
-static void
-isif_config_dfc(struct vpfe_isif_device *isif, struct vpfe_isif_dfc *vdfc)
-{
-#define DFC_WRITE_WAIT_COUNT 1000
- u32 count = DFC_WRITE_WAIT_COUNT;
- u32 val;
- int i;
-
- if (!vdfc->en)
- return;
-
- /* Correction mode */
- val = (vdfc->corr_mode & ISIF_VDFC_CORR_MOD_MASK) <<
- ISIF_VDFC_CORR_MOD_SHIFT;
-
- /* Correct whole line or partial */
- if (vdfc->corr_whole_line)
- val |= BIT(ISIF_VDFC_CORR_WHOLE_LN_SHIFT);
-
- /* level shift value */
- val |= (vdfc->def_level_shift & ISIF_VDFC_LEVEL_SHFT_MASK) <<
- ISIF_VDFC_LEVEL_SHFT_SHIFT;
-
- isif_write(isif->isif_cfg.base_addr, val, DFCCTL);
-
- /* Defect saturation level */
- val = vdfc->def_sat_level & ISIF_VDFC_SAT_LEVEL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, VDFSATLV);
-
- isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_vert &
- ISIF_VDFC_POS_MASK, DFCMEM0);
- isif_write(isif->isif_cfg.base_addr, vdfc->table[0].pos_horz &
- ISIF_VDFC_POS_MASK, DFCMEM1);
- if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_at_pos, DFCMEM2);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_up_pixels, DFCMEM3);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[0].level_low_pixels, DFCMEM4);
- }
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- /* set DFCMARST and set DFCMWR */
- val |= BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
-
- while (count && (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x01))
- count--;
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
-
- for (i = 1; i < vdfc->num_vdefects; i++) {
- isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_vert &
- ISIF_VDFC_POS_MASK, DFCMEM0);
-
- isif_write(isif->isif_cfg.base_addr, vdfc->table[i].pos_horz &
- ISIF_VDFC_POS_MASK, DFCMEM1);
-
- if (vdfc->corr_mode == VPFE_ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == VPFE_ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_at_pos, DFCMEM2);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_up_pixels, DFCMEM3);
- isif_write(isif->isif_cfg.base_addr,
- vdfc->table[i].level_low_pixels, DFCMEM4);
- }
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- /* clear DFCMARST and set DFCMWR */
- val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, DFCMEMCTL);
-
- count = DFC_WRITE_WAIT_COUNT;
- while (count && (isif_read(isif->isif_cfg.base_addr,
- DFCMEMCTL) & 0x01))
- count--;
-
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
- }
- if (vdfc->num_vdefects < VPFE_ISIF_VDFC_TABLE_SIZE) {
- /* Extra cycle needed */
- isif_write(isif->isif_cfg.base_addr, 0, DFCMEM0);
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_MAX_DFCMEM1, DFCMEM1);
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_DFCMWR_MEMORY_WRITE, DFCMEMCTL);
- }
- /* enable VDFC */
- isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
- (1 << ISIF_VDFC_EN_SHIFT), DFCCTL);
-
- isif_merge(isif->isif_cfg.base_addr, (1 << ISIF_VDFC_EN_SHIFT),
- (0 << ISIF_VDFC_EN_SHIFT), DFCCTL);
-
- isif_write(isif->isif_cfg.base_addr, 0x6, DFCMEMCTL);
- for (i = 0; i < vdfc->num_vdefects; i++) {
- count = DFC_WRITE_WAIT_COUNT;
- while (count &&
- (isif_read(isif->isif_cfg.base_addr, DFCMEMCTL) & 0x2))
- count--;
- val = isif_read(isif->isif_cfg.base_addr, DFCMEMCTL);
- if (!count) {
- pr_debug("defect table write timeout !!\n");
- return;
- }
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_DFCMRD_MEMORY_READ, DFCMEMCTL);
- }
-}
-
-static void
-isif_config_csc(struct vpfe_isif_device *isif, struct vpfe_isif_df_csc *df_csc)
-{
- u32 val1;
- u32 val2;
- u32 i;
-
- if (!df_csc->csc.en) {
- isif_write(isif->isif_cfg.base_addr, 0, CSCCTL);
- return;
- }
- /* initialize all bits to 0 */
- val1 = 0;
- for (i = 0; i < VPFE_ISIF_CSC_NUM_COEFF; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = ((df_csc->csc.coeff[i].integer &
- ISIF_CSC_COEF_INTEG_MASK) <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- ((df_csc->csc.coeff[i].decimal &
- ISIF_CSC_COEF_DECIMAL_MASK));
- } else {
-
- /* CSCM - MSB */
- val2 = ((df_csc->csc.coeff[i].integer &
- ISIF_CSC_COEF_INTEG_MASK) <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- ((df_csc->csc.coeff[i].decimal &
- ISIF_CSC_COEF_DECIMAL_MASK));
- val2 <<= ISIF_CSCM_MSB_SHIFT;
- val2 |= val1;
- isif_write(isif->isif_cfg.base_addr, val2,
- (CSCM0 + ((i-1) << 1)));
- }
- }
- /* program the active area */
- isif_write(isif->isif_cfg.base_addr, df_csc->start_pix &
- ISIF_DF_CSC_SPH_MASK, FMTSPH);
- /*
- * one extra pixel as required for CSC. Actually number of
- * pixel - 1 should be configured in this register. So we
- * need to subtract 1 before writing to FMTSPH, but we will
- * not do this since csc requires one extra pixel
- */
- isif_write(isif->isif_cfg.base_addr, df_csc->num_pixels &
- ISIF_DF_CSC_SPH_MASK, FMTLNH);
- isif_write(isif->isif_cfg.base_addr, df_csc->start_line &
- ISIF_DF_CSC_SPH_MASK, FMTSLV);
- /*
- * one extra line as required for CSC. See reason documented for
- * num_pixels
- */
- isif_write(isif->isif_cfg.base_addr, df_csc->num_lines &
- ISIF_DF_CSC_SPH_MASK, FMTLNV);
- /* Enable CSC */
- isif_write(isif->isif_cfg.base_addr, 1, CSCCTL);
-}
-
-static void
-isif_config_linearization(struct vpfe_isif_device *isif,
- struct vpfe_isif_linearize *linearize)
-{
- u32 val;
- u32 i;
-
- if (!linearize->en) {
- isif_write(isif->isif_cfg.base_addr, 0, LINCFG0);
- return;
- }
- /* shift value for correction */
- val = (linearize->corr_shft & ISIF_LIN_CORRSFT_MASK) <<
- ISIF_LIN_CORRSFT_SHIFT;
- /* enable */
- val |= 1;
- isif_write(isif->isif_cfg.base_addr, val, LINCFG0);
- /* Scale factor */
- val = (linearize->scale_fact.integer & 1) <<
- ISIF_LIN_SCALE_FACT_INTEG_SHIFT;
- val |= linearize->scale_fact.decimal & ISIF_LIN_SCALE_FACT_DECIMAL_MASK;
- isif_write(isif->isif_cfg.base_addr, val, LINCFG1);
-
- for (i = 0; i < VPFE_ISIF_LINEAR_TAB_SIZE; i++) {
- val = linearize->table[i] & ISIF_LIN_ENTRY_MASK;
- if (i%2)
- isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 1);
- else
- isif_regw_lin_tbl(isif, val, ((i >> 1) << 2), 0);
- }
-}
-
-static void
-isif_config_culling(struct vpfe_isif_device *isif, struct vpfe_isif_cul *cul)
-{
- u32 val;
-
- /* Horizontal pattern */
- val = cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT;
- val |= cul->hcpat_odd;
- isif_write(isif->isif_cfg.base_addr, val, CULH);
- /* vertical pattern */
- isif_write(isif->isif_cfg.base_addr, cul->vcpat, CULV);
- /* LPF */
- isif_merge(isif->isif_cfg.base_addr, ISIF_LPF_MASK << ISIF_LPF_SHIFT,
- cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
-}
-
-static int isif_get_pix_fmt(u32 mbus_code)
-{
- switch (mbus_code) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return ISIF_PIXFMT_RAW;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- case MEDIA_BUS_FMT_Y8_1X8:
- return ISIF_PIXFMT_YCBCR_8BIT;
-
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- return ISIF_PIXFMT_YCBCR_16BIT;
-
- default:
- break;
- }
- return -EINVAL;
-}
-
-#define ISIF_INTERLACE_INVERSE_MODE 0x4b6d
-#define ISIF_INTERLACE_NON_INVERSE_MODE 0x0b6d
-#define ISIF_PROGRESSIVE_INVERSE_MODE 0x4000
-#define ISIF_PROGRESSIVE_NON_INVERSE_MODE 0x0000
-
-static int isif_config_raw(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct isif_params_raw *params = &isif->isif_cfg.bayer;
- struct vpfe_isif_raw_config *module_params =
- &isif->isif_cfg.bayer.config_params;
- struct v4l2_mbus_framefmt *format;
- int pix_fmt;
- u32 val;
-
- format = &isif->formats[ISIF_PAD_SINK];
-
- /* In case of user has set BT656IF earlier, it should be reset
- * when configuring for raw input.
- */
- isif_write(isif->isif_cfg.base_addr, 0, REC656IF);
- /* Configure CCDCFG register
- * Set CCD Not to swap input since input is RAW data
- * Set FID detection function to Latch at V-Sync
- * Set WENLOG - isif valid area
- * Set TRGSEL
- * Set EXTRG
- * Packed to 8 or 16 bits
- */
- val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
- ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
- ISIF_CCDCFG_EXTRG_DISABLE | (isif->isif_cfg.data_pack &
- ISIF_DATA_PACK_MASK);
- isif_write(isif->isif_cfg.base_addr, val, CCDCFG);
-
- pix_fmt = isif_get_pix_fmt(format->code);
- if (pix_fmt < 0) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /*
- * Configure the vertical sync polarity(MODESET.VDPOL)
- * Configure the horizontal sync polarity (MODESET.HDPOL)
- * Configure frame id polarity (MODESET.FLDPOL)
- * Configure data polarity
- * Configure External WEN Selection
- * Configure frame format(progressive or interlace)
- * Configure pixel format (Input mode)
- * Configure the data shift
- */
- val = ISIF_VDHDOUT_INPUT | ((params->vd_pol & ISIF_VD_POL_MASK) <<
- ISIF_VD_POL_SHIFT) | ((params->hd_pol & ISIF_HD_POL_MASK) <<
- ISIF_HD_POL_SHIFT) | ((params->fid_pol & ISIF_FID_POL_MASK) <<
- ISIF_FID_POL_SHIFT) | ((ISIF_DATAPOL_NORMAL &
- ISIF_DATAPOL_MASK) << ISIF_DATAPOL_SHIFT) | ((ISIF_EXWEN_DISABLE &
- ISIF_EXWEN_MASK) << ISIF_EXWEN_SHIFT) | ((params->frm_fmt &
- ISIF_FRM_FMT_MASK) << ISIF_FRM_FMT_SHIFT) | ((pix_fmt &
- ISIF_INPUT_MASK) << ISIF_INPUT_SHIFT);
-
- /* currently only MEDIA_BUS_FMT_SGRBG12_1X12 is
- * supported. shift appropriately depending on
- * different MBUS fmt's added
- */
- if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- val |= ((VPFE_ISIF_NO_SHIFT &
- ISIF_DATASFT_MASK) << ISIF_DATASFT_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, MODESET);
- /*
- * Configure GAMMAWD register
- * CFA pattern setting
- */
- val = (params->cfa_pat & ISIF_GAMMAWD_CFA_MASK) <<
- ISIF_GAMMAWD_CFA_SHIFT;
- /* Gamma msb */
- if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10ALAW8)
- val = val | ISIF_ALAW_ENABLE;
-
- val = val | ((params->data_msb & ISIF_ALAW_GAMA_WD_MASK) <<
- ISIF_ALAW_GAMA_WD_SHIFT);
-
- isif_write(isif->isif_cfg.base_addr, val, CGAMMAWD);
- /* Configure DPCM compression settings */
- if (params->v4l2_pix_fmt == V4L2_PIX_FMT_SGRBG10DPCM8) {
- val = BIT(ISIF_DPCM_EN_SHIFT);
- val |= (params->dpcm_predictor &
- ISIF_DPCM_PREDICTOR_MASK) << ISIF_DPCM_PREDICTOR_SHIFT;
- }
- isif_write(isif->isif_cfg.base_addr, val, MISC);
- /* Configure Gain & Offset */
- isif_config_gain_offset(isif);
- /* Configure Color pattern */
- if (format->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- val = isif_sgrbg_pattern;
- else
- /* default set to rggb */
- val = isif_srggb_pattern;
-
- isif_write(isif->isif_cfg.base_addr, val, CCOLP);
-
- /* Configure HSIZE register */
- val = (params->horz_flip_en & ISIF_HSIZE_FLIP_MASK) <<
- ISIF_HSIZE_FLIP_SHIFT;
-
- /* calculate line offset in 32 bytes based on pack value */
- if (isif->isif_cfg.data_pack == ISIF_PACK_8BIT)
- val |= ((params->win.width + 31) >> 5) & ISIF_LINEOFST_MASK;
- else if (isif->isif_cfg.data_pack == ISIF_PACK_12BIT)
- val |= ((((params->win.width + (params->win.width >> 2)) +
- 31) >> 5) & ISIF_LINEOFST_MASK);
- else
- val |= (((params->win.width * 2) + 31) >> 5) &
- ISIF_LINEOFST_MASK;
- isif_write(isif->isif_cfg.base_addr, val, HSIZE);
- /* Configure SDOFST register */
- if (params->frm_fmt == ISIF_FRMFMT_INTERLACED) {
- if (params->image_invert_en)
- /* For interlace inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_INTERLACE_INVERSE_MODE, SDOFST);
- else
- /* For interlace non inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_INTERLACE_NON_INVERSE_MODE, SDOFST);
- } else if (params->frm_fmt == ISIF_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_en)
- isif_write(isif->isif_cfg.base_addr,
- ISIF_PROGRESSIVE_INVERSE_MODE, SDOFST);
- else
- /* For progessive non inverse mode */
- isif_write(isif->isif_cfg.base_addr,
- ISIF_PROGRESSIVE_NON_INVERSE_MODE, SDOFST);
- }
- /* Configure video window */
- isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
- /* Configure Black Clamp */
- isif_config_bclamp(isif, &module_params->bclamp);
- /* Configure Vertical Defection Pixel Correction */
- isif_config_dfc(isif, &module_params->dfc);
- if (!module_params->df_csc.df_or_csc)
- /* Configure Color Space Conversion */
- isif_config_csc(isif, &module_params->df_csc);
-
- isif_config_linearization(isif, &module_params->linearize);
- /* Configure Culling */
- isif_config_culling(isif, &module_params->culling);
- /* Configure Horizontal and vertical offsets(DFC,LSC,Gain) */
- val = module_params->horz_offset & ISIF_DATA_H_OFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, DATAHOFST);
-
- val = module_params->vert_offset & ISIF_DATA_V_OFFSET_MASK;
- isif_write(isif->isif_cfg.base_addr, val, DATAVOFST);
-
- return 0;
-}
-
-#define DM365_ISIF_HSIZE_MASK 0xffffffe0
-#define DM365_ISIF_SDOFST_2_LINES 0x00000249
-
-/* This function will configure ISIF for YCbCr parameters. */
-static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct isif_ycbcr_config *params = &isif->isif_cfg.ycbcr;
- struct v4l2_mbus_framefmt *format;
- int pix_fmt;
- u32 modeset;
- u32 ccdcfg;
-
- format = &isif->formats[ISIF_PAD_SINK];
- /*
- * first reset the ISIF
- * all registers have default values after reset
- * This is important since we assume default values to be set in
- * a lot of registers that we didn't touch
- */
- /* start with all bits zero */
- ccdcfg = 0;
- modeset = 0;
- pix_fmt = isif_get_pix_fmt(format->code);
- if (pix_fmt < 0) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* configure pixel format or input mode */
- modeset = modeset | ((pix_fmt & ISIF_INPUT_MASK) <<
- ISIF_INPUT_SHIFT) | ((params->frm_fmt & ISIF_FRM_FMT_MASK) <<
- ISIF_FRM_FMT_SHIFT) | (((params->fid_pol &
- ISIF_FID_POL_MASK) << ISIF_FID_POL_SHIFT)) |
- (((params->hd_pol & ISIF_HD_POL_MASK) << ISIF_HD_POL_SHIFT)) |
- (((params->vd_pol & ISIF_VD_POL_MASK) << ISIF_VD_POL_SHIFT));
- /* pack the data to 8-bit CCDCCFG */
- switch (format->code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- modeset |= ((VPFE_PINPOL_NEGATIVE & ISIF_VD_POL_MASK) <<
- ISIF_VD_POL_SHIFT);
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_2X10:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* setup BT.656, embedded sync */
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- /* enable 10 bit mode in ccdcfg */
- ccdcfg = ccdcfg | ISIF_PACK_8BIT | ISIF_YCINSWP_YCBCR |
- ISIF_BW656_ENABLE;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_1X20:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- isif_write(isif->isif_cfg.base_addr, 3, REC656IF);
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- ccdcfg |= ISIF_PACK_8BIT;
- ccdcfg |= ISIF_YCINSWP_YCBCR;
- if (pix_fmt != ISIF_PIXFMT_YCBCR_8BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
-
- case MEDIA_BUS_FMT_YUYV8_1X16:
- if (pix_fmt != ISIF_PIXFMT_YCBCR_16BIT) {
- pr_debug("Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
-
- default:
- /* should never come here */
- pr_debug("Invalid interface type\n");
- return -EINVAL;
- }
- isif_write(isif->isif_cfg.base_addr, modeset, MODESET);
- /* Set up pix order */
- ccdcfg |= (params->pix_order & ISIF_PIX_ORDER_MASK) <<
- ISIF_PIX_ORDER_SHIFT;
- isif_write(isif->isif_cfg.base_addr, ccdcfg, CCDCFG);
- /* configure video window */
- if (format->code == MEDIA_BUS_FMT_YUYV10_1X20 ||
- format->code == MEDIA_BUS_FMT_YUYV8_1X16)
- isif_setwin(isif, &params->win, params->frm_fmt, 1, mode);
- else
- isif_setwin(isif, &params->win, params->frm_fmt, 2, mode);
-
- /*
- * configure the horizontal line offset
- * this is done by rounding up width to a multiple of 16 pixels
- * and multiply by two to account for y:cb:cr 4:2:2 data
- */
- isif_write(isif->isif_cfg.base_addr,
- ((((params->win.width * 2) + 31) &
- DM365_ISIF_HSIZE_MASK) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if (params->frm_fmt == ISIF_FRMFMT_INTERLACED &&
- params->buf_type == ISIF_BUFTYPE_FLD_INTERLEAVED)
- /* two fields are interleaved in memory */
- isif_write(isif->isif_cfg.base_addr,
- DM365_ISIF_SDOFST_2_LINES, SDOFST);
- return 0;
-}
-
-static int isif_configure(struct v4l2_subdev *sd, int mode)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = &isif->formats[ISIF_PAD_SINK];
-
- switch (format->code) {
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- return isif_config_raw(sd, mode);
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV10_1X20:
- return isif_config_ycbcr(sd, mode);
-
- default:
- break;
- }
- return -EINVAL;
-}
-
-/*
- * isif_set_stream() - Enable/Disable streaming on the ISIF module
- * @sd: VPFE ISIF V4L2 subdevice
- * @enable: Enable/disable stream
- */
-static int isif_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- int ret;
-
- if (enable) {
- ret = isif_configure(sd,
- (isif->output == ISIF_OUTPUT_MEMORY) ? 0 : 1);
- if (ret)
- return ret;
- if (isif->output == ISIF_OUTPUT_MEMORY)
- isif_enable_output_to_sdram(isif, 1);
- isif_enable(isif, 1);
- } else {
- isif_enable(isif, 0);
- isif_enable_output_to_sdram(isif, 0);
- }
-
- return 0;
-}
-
-/*
- * __isif_get_format() - helper function for getting isif format
- * @isif: pointer to isif private structure.
- * @pad: pad number.
- * @cfg: V4L2 subdev pad config
- * @which: wanted subdev format.
- */
-static struct v4l2_mbus_framefmt *
-__isif_get_format(struct vpfe_isif_device *isif,
- struct v4l2_subdev_pad_config *cfg, unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&isif->subdev, cfg, pad);
-
- return &isif->formats[pad];
-}
-
-/*
- * isif_set_format() - set format on pad
- * @sd : VPFE ISIF device
- * @cfg : V4L2 subdev pad config
- * @fmt : pointer to v4l2 subdev format structure
- *
- * Return 0 on success or -EINVAL if format or pad is invalid
- */
-static int
-isif_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- struct v4l2_mbus_framefmt *format;
-
- format = __isif_get_format(isif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- isif_try_format(isif, cfg, fmt);
- memcpy(format, &fmt->format, sizeof(*format));
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (fmt->pad == ISIF_PAD_SOURCE)
- return isif_config_format(vpfe_dev, fmt->pad);
-
- return 0;
-}
-
-/*
- * isif_get_format() - Retrieve the video format on a pad
- * @sd: VPFE ISIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- *
- * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
- * to the format type.
- */
-static int
-isif_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __isif_get_format(vpfe_isif, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- memcpy(&fmt->format, format, sizeof(fmt->format));
-
- return 0;
-}
-
-/*
- * isif_enum_frame_size() - enum frame sizes on pads
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_frame_size_enum structure
- */
-static int
-isif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.pad = fse->pad;
- format.format.code = fse->code;
- format.format.width = 1;
- format.format.height = 1;
- format.which = fse->which;
- isif_try_format(isif, cfg, &format);
- fse->min_width = format.format.width;
- fse->min_height = format.format.height;
-
- if (format.format.code != fse->code)
- return -EINVAL;
-
- format.pad = fse->pad;
- format.format.code = fse->code;
- format.format.width = -1;
- format.format.height = -1;
- format.which = fse->which;
- isif_try_format(isif, cfg, &format);
- fse->max_width = format.format.width;
- fse->max_height = format.format.height;
-
- return 0;
-}
-
-/*
- * isif_enum_mbus_code() - enum mbus codes for pads
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int
-isif_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- switch (code->pad) {
- case ISIF_PAD_SINK:
- case ISIF_PAD_SOURCE:
- if (code->index >= ARRAY_SIZE(isif_fmts))
- return -EINVAL;
- code->code = isif_fmts[code->index];
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * isif_pad_set_selection() - set crop rectangle on pad
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- *
- * Return 0 on success, -EINVAL if pad is invalid
- */
-static int
-isif_pad_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- /* check whether it's a valid pad and target */
- if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- format = __isif_get_format(vpfe_isif, cfg, sel->pad, sel->which);
- if (format == NULL)
- return -EINVAL;
-
- /* check wether crop rect is within limits */
- if (sel->r.top < 0 || sel->r.left < 0 ||
- (sel->r.left + sel->r.width >
- vpfe_isif->formats[ISIF_PAD_SINK].width) ||
- (sel->r.top + sel->r.height >
- vpfe_isif->formats[ISIF_PAD_SINK].height)) {
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = format->width;
- sel->r.height = format->height;
- }
- /* adjust the width to 16 pixel boundary */
- sel->r.width = (sel->r.width + 15) & ~0xf;
- vpfe_isif->crop = sel->r;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- isif_set_image_window(vpfe_isif);
- } else {
- struct v4l2_rect *rect;
-
- rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
- memcpy(rect, &vpfe_isif->crop, sizeof(*rect));
- }
- return 0;
-}
-
-/*
- * isif_pad_get_selection() - get crop rectangle on pad
- * @sd: VPFE isif V4L2 subdevice
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- *
- * Return 0 on success, -EINVAL if pad is invalid
- */
-static int
-isif_pad_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- struct vpfe_isif_device *vpfe_isif = v4l2_get_subdevdata(sd);
-
- /* check whether it's a valid pad and target */
- if (sel->pad != ISIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *rect;
-
- rect = v4l2_subdev_get_try_crop(sd, cfg, ISIF_PAD_SINK);
- memcpy(&sel->r, rect, sizeof(*rect));
- } else {
- sel->r = vpfe_isif->crop;
- }
-
- return 0;
-}
-
-/*
- * isif_init_formats() - Initialize formats on all pads
- * @sd: VPFE isif V4L2 subdevice
- * @fh: V4L2 subdev file handle
- *
- * Initialize all pad formats with default values. Try formats are initialized
- * on the file handle.
- */
-static int
-isif_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- struct v4l2_subdev_format format;
- struct v4l2_subdev_selection sel;
-
- memset(&format, 0, sizeof(format));
- format.pad = ISIF_PAD_SINK;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = MAX_WIDTH;
- format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = ISIF_PAD_SOURCE;
- format.which = V4L2_SUBDEV_FORMAT_TRY;
- format.format.code = MEDIA_BUS_FMT_SGRBG12_1X12;
- format.format.width = MAX_WIDTH;
- format.format.height = MAX_HEIGHT;
- isif_set_format(sd, fh->pad, &format);
-
- memset(&sel, 0, sizeof(sel));
- sel.pad = ISIF_PAD_SINK;
- sel.which = V4L2_SUBDEV_FORMAT_TRY;
- sel.target = V4L2_SEL_TGT_CROP;
- sel.r.width = MAX_WIDTH;
- sel.r.height = MAX_HEIGHT;
- isif_pad_set_selection(sd, fh->pad, &sel);
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops isif_v4l2_core_ops = {
- .ioctl = isif_ioctl,
-};
-
-/* subdev file operations */
-static const struct v4l2_subdev_internal_ops isif_v4l2_internal_ops = {
- .open = isif_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops isif_v4l2_video_ops = {
- .s_stream = isif_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops isif_v4l2_pad_ops = {
- .enum_mbus_code = isif_enum_mbus_code,
- .enum_frame_size = isif_enum_frame_size,
- .get_fmt = isif_get_format,
- .set_fmt = isif_set_format,
- .set_selection = isif_pad_set_selection,
- .get_selection = isif_pad_get_selection,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops isif_v4l2_ops = {
- .core = &isif_v4l2_core_ops,
- .video = &isif_v4l2_video_ops,
- .pad = &isif_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * isif_link_setup() - Setup isif connections
- * @entity: isif media entity
- * @local: Pad at the local end of the link
- * @remote: Pad at the remote end of the link
- * @flags: Link flags
- *
- * return -EINVAL or zero on success
- */
-static int
-isif_link_setup(struct media_entity *entity, const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_isif_device *isif = v4l2_get_subdevdata(sd);
- unsigned int index = local->index;
-
- /* FIXME: this is actually a hack! */
- if (is_media_entity_v4l2_subdev(remote->entity))
- index |= 2 << 16;
-
- switch (index) {
- case ISIF_PAD_SINK | 2 << 16:
- /* read from decoder/sensor */
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- isif->input = ISIF_INPUT_NONE;
- break;
- }
- if (isif->input != ISIF_INPUT_NONE)
- return -EBUSY;
- isif->input = ISIF_INPUT_PARALLEL;
- break;
-
- case ISIF_PAD_SOURCE:
- /* write to memory */
- if (flags & MEDIA_LNK_FL_ENABLED)
- isif->output = ISIF_OUTPUT_MEMORY;
- else
- isif->output = ISIF_OUTPUT_NONE;
- break;
-
- case ISIF_PAD_SOURCE | 2 << 16:
- if (flags & MEDIA_LNK_FL_ENABLED)
- isif->output = ISIF_OUTPUT_IPIPEIF;
- else
- isif->output = ISIF_OUTPUT_NONE;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-static const struct media_entity_operations isif_media_ops = {
- .link_setup = isif_link_setup,
-};
-
-/*
- * vpfe_isif_unregister_entities() - isif unregister entity
- * @isif - pointer to isif subdevice structure.
- */
-void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif)
-{
- vpfe_video_unregister(&isif->video_out);
- /* unregister subdev */
- v4l2_device_unregister_subdev(&isif->subdev);
- /* cleanup entity */
- media_entity_cleanup(&isif->subdev.entity);
-}
-
-static void isif_restore_defaults(struct vpfe_isif_device *isif)
-{
- enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
- int i;
-
- memset(&isif->isif_cfg.bayer.config_params, 0,
- sizeof(struct vpfe_isif_raw_config));
-
- isif->isif_cfg.bayer.config_params.linearize.corr_shft =
- VPFE_ISIF_NO_SHIFT;
- isif->isif_cfg.bayer.config_params.linearize.scale_fact.integer = 1;
- isif->isif_cfg.bayer.config_params.culling.hcpat_odd =
- ISIF_CULLING_HCAPT_ODD;
- isif->isif_cfg.bayer.config_params.culling.hcpat_even =
- ISIF_CULLING_HCAPT_EVEN;
- isif->isif_cfg.bayer.config_params.culling.vcpat = ISIF_CULLING_VCAPT;
- /* Enable clock to ISIF, IPIPEIF and BL */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
- vpss_enable_clock(VPSS_BL_CLOCK, 1);
-
- /* set all registers to default value */
- for (i = 0; i <= 0x1f8; i += 4)
- isif_write(isif->isif_cfg.base_addr, 0, i);
- /* no culling support */
- isif_write(isif->isif_cfg.base_addr, 0xffff, CULH);
- isif_write(isif->isif_cfg.base_addr, 0xff, CULV);
-
- /* Set default offset and gain */
- isif_config_gain_offset(isif);
- vpss_select_ccdc_source(source);
-}
-
-/*
- * vpfe_isif_register_entities() - isif register entity
- * @isif - pointer to isif subdevice structure.
- * @vdev: pointer to v4l2 device structure.
- */
-int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(isif);
- unsigned int flags;
- int ret;
-
- /* Register the subdev */
- ret = v4l2_device_register_subdev(vdev, &isif->subdev);
- if (ret < 0)
- return ret;
-
- isif_restore_defaults(isif);
- ret = vpfe_video_register(&isif->video_out, vdev);
- if (ret) {
- pr_err("Failed to register isif video out device\n");
- goto out_video_register;
- }
- isif->video_out.vpfe_dev = vpfe_dev;
- flags = 0;
- /* connect isif to video node */
- ret = media_create_pad_link(&isif->subdev.entity, 1,
- &isif->video_out.video_dev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
- return 0;
-out_create_link:
- vpfe_video_unregister(&isif->video_out);
-out_video_register:
- v4l2_device_unregister_subdev(&isif->subdev);
- return ret;
-}
-
-/* -------------------------------------------------------------------
- * V4L2 subdev control operations
- */
-
-static int vpfe_isif_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vpfe_isif_device *isif =
- container_of(ctrl->handler, struct vpfe_isif_device, ctrls);
- struct isif_oper_config *config = &isif->isif_cfg;
-
- switch (ctrl->id) {
- case VPFE_CID_DPCM_PREDICTOR:
- config->bayer.dpcm_predictor = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CRGAIN:
- config->isif_gain_params.cr_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CGRGAIN:
- config->isif_gain_params.cgr_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CGBGAIN:
- config->isif_gain_params.cgb_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_CBGAIN:
- config->isif_gain_params.cb_gain = ctrl->val;
- break;
-
- case VPFE_ISIF_CID_GAIN_OFFSET:
- config->isif_gain_params.offset = ctrl->val;
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct v4l2_ctrl_ops vpfe_isif_ctrl_ops = {
- .s_ctrl = vpfe_isif_s_ctrl,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_dpcm_pred = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_CID_DPCM_PREDICTOR,
- .name = "DPCM Predictor",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_crgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CRGAIN,
- .name = "CRGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cgrgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CGRGAIN,
- .name = "CGRGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cgbgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CGBGAIN,
- .name = "CGBGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_cbgain = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_CBGAIN,
- .name = "CBGAIN",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static const struct v4l2_ctrl_config vpfe_isif_gain_offset = {
- .ops = &vpfe_isif_ctrl_ops,
- .id = VPFE_ISIF_CID_GAIN_OFFSET,
- .name = "Gain Offset",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = (1 << 12) - 1,
- .step = 1,
- .def = 0,
-};
-
-static void isif_remove(struct vpfe_isif_device *isif,
- struct platform_device *pdev)
-{
- struct resource *res;
- int i = 0;
-
- iounmap(isif->isif_cfg.base_addr);
- iounmap(isif->isif_cfg.linear_tbl0_addr);
- iounmap(isif->isif_cfg.linear_tbl1_addr);
-
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res)
- release_mem_region(res->start,
- resource_size(res));
- i++;
- }
-}
-
-static void isif_config_defaults(struct vpfe_isif_device *isif)
-{
- isif->isif_cfg.ycbcr.v4l2_pix_fmt = V4L2_PIX_FMT_UYVY;
- isif->isif_cfg.ycbcr.pix_fmt = ISIF_PIXFMT_YCBCR_8BIT;
- isif->isif_cfg.ycbcr.frm_fmt = ISIF_FRMFMT_INTERLACED;
- isif->isif_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.ycbcr.pix_order = ISIF_PIXORDER_CBYCRY;
- isif->isif_cfg.ycbcr.buf_type = ISIF_BUFTYPE_FLD_INTERLEAVED;
-
- isif->isif_cfg.bayer.v4l2_pix_fmt = V4L2_PIX_FMT_SGRBG10ALAW8;
- isif->isif_cfg.bayer.pix_fmt = ISIF_PIXFMT_RAW;
- isif->isif_cfg.bayer.frm_fmt = ISIF_FRMFMT_PROGRESSIVE;
- isif->isif_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
- isif->isif_cfg.bayer.cfa_pat = ISIF_CFA_PAT_MOSAIC;
- isif->isif_cfg.bayer.data_msb = ISIF_BIT_MSB_11;
- isif->isif_cfg.data_pack = ISIF_PACK_8BIT;
-}
-/*
- * vpfe_isif_init() - Initialize V4L2 subdev and media entity
- * @isif: VPFE isif module
- * @pdev: Pointer to platform device structure.
- * Return 0 on success and a negative error code on failure.
- */
-int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &isif->subdev;
- struct media_pad *pads = &isif->pads[0];
- struct media_entity *me = &sd->entity;
- static resource_size_t res_len;
- struct resource *res;
- void __iomem *addr;
- int status;
- int i = 0;
-
- /* Get the ISIF base address, linearization table0 and table1 addr. */
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- status = -ENOENT;
- goto fail_nobase_res;
- }
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nobase_res;
- }
- addr = ioremap_nocache(res->start, res_len);
- if (!addr) {
- status = -EBUSY;
- goto fail_base_iomap;
- }
- switch (i) {
- case 0:
- /* ISIF base address */
- isif->isif_cfg.base_addr = addr;
- break;
- case 1:
- /* ISIF linear tbl0 address */
- isif->isif_cfg.linear_tbl0_addr = addr;
- break;
- default:
- /* ISIF linear tbl0 address */
- isif->isif_cfg.linear_tbl1_addr = addr;
- break;
- }
- i++;
- }
- davinci_cfg_reg(DM365_VIN_CAM_WEN);
- davinci_cfg_reg(DM365_VIN_CAM_VD);
- davinci_cfg_reg(DM365_VIN_CAM_HD);
- davinci_cfg_reg(DM365_VIN_YIN4_7_EN);
- davinci_cfg_reg(DM365_VIN_YIN0_3_EN);
-
- /* queue ops */
- isif->video_out.ops = &isif_video_ops;
- v4l2_subdev_init(sd, &isif_v4l2_ops);
- sd->internal_ops = &isif_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI ISIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, isif);
- sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- pads[ISIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[ISIF_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- isif->input = ISIF_INPUT_NONE;
- isif->output = ISIF_OUTPUT_NONE;
- me->ops = &isif_media_ops;
- status = media_entity_pads_init(me, ISIF_PADS_NUM, pads);
- if (status)
- goto isif_fail;
- isif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- status = vpfe_video_init(&isif->video_out, "ISIF");
- if (status) {
- pr_err("Failed to init isif-out video device\n");
- goto isif_fail;
- }
- v4l2_ctrl_handler_init(&isif->ctrls, 6);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_crgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgrgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cgbgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_cbgain, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_gain_offset, NULL);
- v4l2_ctrl_new_custom(&isif->ctrls, &vpfe_isif_dpcm_pred, NULL);
-
- v4l2_ctrl_handler_setup(&isif->ctrls);
- sd->ctrl_handler = &isif->ctrls;
- isif_config_defaults(isif);
- return 0;
-fail_base_iomap:
- release_mem_region(res->start, res_len);
- i--;
-fail_nobase_res:
- if (isif->isif_cfg.base_addr)
- iounmap(isif->isif_cfg.base_addr);
- if (isif->isif_cfg.linear_tbl0_addr)
- iounmap(isif->isif_cfg.linear_tbl0_addr);
-
- while (i >= 0) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- release_mem_region(res->start, res_len);
- i--;
- }
- return status;
-isif_fail:
- v4l2_ctrl_handler_free(&isif->ctrls);
- isif_remove(isif, pdev);
- return status;
-}
-
-/*
- * vpfe_isif_cleanup - isif module cleanup
- * @isif: pointer to isif subdevice
- * @dev: pointer to platform device structure
- */
-void
-vpfe_isif_cleanup(struct vpfe_isif_device *isif, struct platform_device *pdev)
-{
- isif_remove(isif, pdev);
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.h b/drivers/staging/media/davinci_vpfe/dm365_isif.h
deleted file mode 100644
index 0e1fe472fb2b..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_ISIF_H
-#define _DAVINCI_VPFE_DM365_ISIF_H
-
-#include <linux/platform_device.h>
-
-#include <mach/mux.h>
-
-#include <media/davinci/vpfe_types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-#include "davinci_vpfe_user.h"
-#include "dm365_isif_regs.h"
-#include "vpfe_video.h"
-
-#define ISIF_CULLING_HCAPT_ODD 0xff
-#define ISIF_CULLING_HCAPT_EVEN 0xff
-#define ISIF_CULLING_VCAPT 0xff
-
-#define ISIF_CADU_BITS 0x07ff
-#define ISIF_CADL_BITS 0x0ffff
-
-enum isif_pixfmt {
- ISIF_PIXFMT_RAW = 0,
- ISIF_PIXFMT_YCBCR_16BIT = 1,
- ISIF_PIXFMT_YCBCR_8BIT = 2,
-};
-
-enum isif_frmfmt {
- ISIF_FRMFMT_PROGRESSIVE = 0,
- ISIF_FRMFMT_INTERLACED = 1,
-};
-
-/* PIXEL ORDER IN MEMORY from LSB to MSB */
-/* only applicable for 8-bit input mode */
-enum isif_pixorder {
- ISIF_PIXORDER_YCBYCR = 0,
- ISIF_PIXORDER_CBYCRY = 1,
-};
-
-enum isif_buftype {
- ISIF_BUFTYPE_FLD_INTERLEAVED = 0,
- ISIF_BUFTYPE_FLD_SEPARATED = 1,
-};
-
-struct isif_ycbcr_config {
- /* v4l2 pixel format */
- unsigned long v4l2_pix_fmt;
- /* isif pixel format */
- enum isif_pixfmt pix_fmt;
- /* isif frame format */
- enum isif_frmfmt frm_fmt;
- /* isif crop window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* isif pix order. Only used for ycbcr capture */
- enum isif_pixorder pix_order;
- /* isif buffer type. Only used for ycbcr capture */
- enum isif_buftype buf_type;
-};
-
-enum isif_cfa_pattern {
- ISIF_CFA_PAT_MOSAIC = 0,
- ISIF_CFA_PAT_STRIPE = 1,
-};
-
-enum isif_data_msb {
- /* MSB b15 */
- ISIF_BIT_MSB_15 = 0,
- /* MSB b14 */
- ISIF_BIT_MSB_14 = 1,
- /* MSB b13 */
- ISIF_BIT_MSB_13 = 2,
- /* MSB b12 */
- ISIF_BIT_MSB_12 = 3,
- /* MSB b11 */
- ISIF_BIT_MSB_11 = 4,
- /* MSB b10 */
- ISIF_BIT_MSB_10 = 5,
- /* MSB b9 */
- ISIF_BIT_MSB_9 = 6,
- /* MSB b8 */
- ISIF_BIT_MSB_8 = 7,
- /* MSB b7 */
- ISIF_BIT_MSB_7 = 8,
-};
-
-struct isif_params_raw {
- /* v4l2 pixel format */
- unsigned long v4l2_pix_fmt;
- /* isif pixel format */
- enum isif_pixfmt pix_fmt;
- /* isif frame format */
- enum isif_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* buffer type. Applicable for interlaced mode */
- enum isif_buftype buf_type;
- /* cfa pattern */
- enum isif_cfa_pattern cfa_pat;
- /* Data MSB position */
- enum isif_data_msb data_msb;
- /* Enable horizontal flip */
- unsigned char horz_flip_en;
- /* Enable image invert vertically */
- unsigned char image_invert_en;
- unsigned char dpcm_predictor;
- struct vpfe_isif_raw_config config_params;
-};
-
-enum isif_data_pack {
- ISIF_PACK_16BIT = 0,
- ISIF_PACK_12BIT = 1,
- ISIF_PACK_8BIT = 2,
-};
-
-struct isif_gain_values {
- unsigned int cr_gain;
- unsigned int cgr_gain;
- unsigned int cgb_gain;
- unsigned int cb_gain;
- unsigned int offset;
-};
-
-struct isif_oper_config {
- struct isif_ycbcr_config ycbcr;
- struct isif_params_raw bayer;
- enum isif_data_pack data_pack;
- struct isif_gain_values isif_gain_params;
- void __iomem *base_addr;
- void __iomem *linear_tbl0_addr;
- void __iomem *linear_tbl1_addr;
-};
-
-#define ISIF_PAD_SINK 0
-#define ISIF_PAD_SOURCE 1
-
-#define ISIF_PADS_NUM 2
-
-enum isif_input_entity {
- ISIF_INPUT_NONE = 0,
- ISIF_INPUT_PARALLEL = 1,
-};
-
-#define ISIF_OUTPUT_NONE (0)
-#define ISIF_OUTPUT_MEMORY (1 << 0)
-#define ISIF_OUTPUT_IPIPEIF (1 << 1)
-
-struct vpfe_isif_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[ISIF_PADS_NUM];
- struct v4l2_mbus_framefmt formats[ISIF_PADS_NUM];
- enum isif_input_entity input;
- unsigned int output;
- struct v4l2_ctrl_handler ctrls;
- struct v4l2_rect crop;
- struct isif_oper_config isif_cfg;
- struct vpfe_video_device video_out;
-};
-
-enum v4l2_field vpfe_isif_get_fid(struct vpfe_device *vpfe_dev);
-void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif);
-int vpfe_isif_register_entities(struct vpfe_isif_device *isif,
- struct v4l2_device *dev);
-int vpfe_isif_init(struct vpfe_isif_device *isif, struct platform_device *pdev);
-void vpfe_isif_cleanup(struct vpfe_isif_device *vpfe_isif,
- struct platform_device *pdev);
-void vpfe_isif_vidint1_isr(struct vpfe_isif_device *isif);
-void vpfe_isif_buffer_isr(struct vpfe_isif_device *isif);
-
-#endif /* _DAVINCI_VPFE_DM365_ISIF_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
deleted file mode 100644
index 6695680817b9..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_ISIF_REGS_H
-#define _DAVINCI_VPFE_DM365_ISIF_REGS_H
-
-/* ISIF registers relative offsets */
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDW 0x08
-#define VDW 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define LNH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define LNV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define CADU 0x3c
-#define CADL 0x40
-#define LINCFG0 0x44
-#define LINCFG1 0x48
-#define CCOLP 0x4c
-#define CRGAIN 0x50
-#define CGRGAIN 0x54
-#define CGBGAIN 0x58
-#define CBGAIN 0x5c
-#define COFSTA 0x60
-#define FLSHCFG0 0x64
-#define FLSHCFG1 0x68
-#define FLSHCFG2 0x6c
-#define VDINT0 0x70
-#define VDINT1 0x74
-#define VDINT2 0x78
-#define MISC 0x7c
-#define CGAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-/*****************************************************
- * Defect Correction registers
- *****************************************************/
-#define DFCCTL 0x8c
-#define VDFSATLV 0x90
-#define DFCMEMCTL 0x94
-#define DFCMEM0 0x98
-#define DFCMEM1 0x9c
-#define DFCMEM2 0xa0
-#define DFCMEM3 0xa4
-#define DFCMEM4 0xa8
-/****************************************************
- * Black Clamp registers
- ****************************************************/
-#define CLAMPCFG 0xac
-#define CLDCOFST 0xb0
-#define CLSV 0xb4
-#define CLHWIN0 0xb8
-#define CLHWIN1 0xbc
-#define CLHWIN2 0xc0
-#define CLVRV 0xc4
-#define CLVWIN0 0xc8
-#define CLVWIN1 0xcc
-#define CLVWIN2 0xd0
-#define CLVWIN3 0xd4
-/****************************************************
- * Lense Shading Correction
- ****************************************************/
-#define DATAHOFST 0xd8
-#define DATAVOFST 0xdc
-#define LSCHVAL 0xe0
-#define LSCVVAL 0xe4
-#define TWODLSCCFG 0xe8
-#define TWODLSCOFST 0xec
-#define TWODLSCINI 0xf0
-#define TWODLSCGRBU 0xf4
-#define TWODLSCGRBL 0xf8
-#define TWODLSCGROF 0xfc
-#define TWODLSCORBU 0x100
-#define TWODLSCORBL 0x104
-#define TWODLSCOROF 0x108
-#define TWODLSCIRQEN 0x10c
-#define TWODLSCIRQST 0x110
-/****************************************************
- * Data formatter
- ****************************************************/
-#define FMTCFG 0x114
-#define FMTPLEN 0x118
-#define FMTSPH 0x11c
-#define FMTLNH 0x120
-#define FMTSLV 0x124
-#define FMTLNV 0x128
-#define FMTRLEN 0x12c
-#define FMTHCNT 0x130
-#define FMTAPTR_BASE 0x134
-/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
-#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
-#define FMTPGMVF0 0x174
-#define FMTPGMVF1 0x178
-#define FMTPGMAPU0 0x17c
-#define FMTPGMAPU1 0x180
-#define FMTPGMAPS0 0x184
-#define FMTPGMAPS1 0x188
-#define FMTPGMAPS2 0x18c
-#define FMTPGMAPS3 0x190
-#define FMTPGMAPS4 0x194
-#define FMTPGMAPS5 0x198
-#define FMTPGMAPS6 0x19c
-#define FMTPGMAPS7 0x1a0
-/************************************************
- * Color Space Converter
- ************************************************/
-#define CSCCTL 0x1a4
-#define CSCM0 0x1a8
-#define CSCM1 0x1ac
-#define CSCM2 0x1b0
-#define CSCM3 0x1b4
-#define CSCM4 0x1b8
-#define CSCM5 0x1bc
-#define CSCM6 0x1c0
-#define CSCM7 0x1c4
-#define OBWIN0 0x1c8
-#define OBWIN1 0x1cc
-#define OBWIN2 0x1d0
-#define OBWIN3 0x1d4
-#define OBVAL0 0x1d8
-#define OBVAL1 0x1dc
-#define OBVAL2 0x1e0
-#define OBVAL3 0x1e4
-#define OBVAL4 0x1e8
-#define OBVAL5 0x1ec
-#define OBVAL6 0x1f0
-#define OBVAL7 0x1f4
-#define CLKCTL 0x1f8
-
-/* Masks & Shifts below */
-#define START_PX_HOR_MASK 0x7fff
-#define NUM_PX_HOR_MASK 0x7fff
-#define START_VER_ONE_MASK 0x7fff
-#define START_VER_TWO_MASK 0x7fff
-#define NUM_LINES_VER 0x7fff
-
-/* gain - offset masks */
-#define OFFSET_MASK 0xfff
-#define GAIN_SDRAM_EN_SHIFT 12
-#define GAIN_IPIPE_EN_SHIFT 13
-#define GAIN_H3A_EN_SHIFT 14
-#define OFST_SDRAM_EN_SHIFT 8
-#define OFST_IPIPE_EN_SHIFT 9
-#define OFST_H3A_EN_SHIFT 10
-#define GAIN_OFFSET_EN_MASK 0x7700
-
-/* Culling */
-#define CULL_PAT_EVEN_LINE_SHIFT 8
-
-/* CCDCFG register */
-#define ISIF_YCINSWP_RAW (0x00 << 4)
-#define ISIF_YCINSWP_YCBCR (0x01 << 4)
-#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
-#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
-#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
-#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
-#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
-#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
-#define ISIF_DATA_PACK_MASK 0x03
-#define ISIF_PIX_ORDER_SHIFT 11
-#define ISIF_PIX_ORDER_MASK 0x01
-#define ISIF_BW656_ENABLE (0x01 << 5)
-
-/* MODESET registers */
-#define ISIF_VDHDOUT_INPUT (0x00 << 0)
-#define ISIF_INPUT_MASK 0x03
-#define ISIF_INPUT_SHIFT 12
-#define ISIF_FID_POL_MASK 0x01
-#define ISIF_FID_POL_SHIFT 4
-#define ISIF_HD_POL_MASK 0x01
-#define ISIF_HD_POL_SHIFT 3
-#define ISIF_VD_POL_MASK 0x01
-#define ISIF_VD_POL_SHIFT 2
-#define ISIF_DATAPOL_NORMAL 0x00
-#define ISIF_DATAPOL_MASK 0x01
-#define ISIF_DATAPOL_SHIFT 6
-#define ISIF_EXWEN_DISABLE 0x00
-#define ISIF_EXWEN_MASK 0x01
-#define ISIF_EXWEN_SHIFT 5
-#define ISIF_FRM_FMT_MASK 0x01
-#define ISIF_FRM_FMT_SHIFT 7
-#define ISIF_DATASFT_MASK 0x07
-#define ISIF_DATASFT_SHIFT 8
-#define ISIF_LPF_SHIFT 14
-#define ISIF_LPF_MASK 0x1
-
-/* GAMMAWD registers */
-#define ISIF_ALAW_GAMA_WD_MASK 0xf
-#define ISIF_ALAW_GAMA_WD_SHIFT 1
-#define ISIF_ALAW_ENABLE 0x01
-#define ISIF_GAMMAWD_CFA_MASK 0x01
-#define ISIF_GAMMAWD_CFA_SHIFT 5
-
-/* HSIZE registers */
-#define ISIF_HSIZE_FLIP_MASK 0x01
-#define ISIF_HSIZE_FLIP_SHIFT 12
-#define ISIF_LINEOFST_MASK 0xfff
-
-/* MISC registers */
-#define ISIF_DPCM_EN_SHIFT 12
-#define ISIF_DPCM_PREDICTOR_SHIFT 13
-#define ISIF_DPCM_PREDICTOR_MASK 1
-
-/* Black clamp related */
-#define ISIF_BC_DCOFFSET_MASK 0x1fff
-#define ISIF_BC_MODE_COLOR_MASK 1
-#define ISIF_BC_MODE_COLOR_SHIFT 4
-#define ISIF_HORZ_BC_MODE_MASK 3
-#define ISIF_HORZ_BC_MODE_SHIFT 1
-#define ISIF_HORZ_BC_WIN_COUNT_MASK 0x1f
-#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
-#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
-#define ISIF_HORZ_BC_WIN_H_SIZE_MASK 3
-#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
-#define ISIF_HORZ_BC_WIN_V_SIZE_MASK 3
-#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
-#define ISIF_HORZ_BC_WIN_START_H_MASK 0x1fff
-#define ISIF_HORZ_BC_WIN_START_V_MASK 0x1fff
-#define ISIF_VERT_BC_OB_H_SZ_MASK 7
-#define ISIF_VERT_BC_RST_VAL_SEL_MASK 3
-#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
-#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
-#define ISIF_VERT_BC_OB_START_HORZ_MASK 0x1fff
-#define ISIF_VERT_BC_OB_START_VERT_MASK 0x1fff
-#define ISIF_VERT_BC_OB_VERT_SZ_MASK 0x1fff
-#define ISIF_VERT_BC_RST_VAL_MASK 0xfff
-#define ISIF_BC_VERT_START_SUB_V_MASK 0x1fff
-
-/* VDFC registers */
-#define ISIF_VDFC_EN_SHIFT 4
-#define ISIF_VDFC_CORR_MOD_MASK 3
-#define ISIF_VDFC_CORR_MOD_SHIFT 5
-#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
-#define ISIF_VDFC_LEVEL_SHFT_MASK 7
-#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
-#define ISIF_VDFC_SAT_LEVEL_MASK 0xfff
-#define ISIF_VDFC_POS_MASK 0x1fff
-#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
-
-/* CSC registers */
-#define ISIF_CSC_COEF_INTEG_MASK 7
-#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
-#define ISIF_CSC_COEF_INTEG_SHIFT 5
-#define ISIF_CSCM_MSB_SHIFT 8
-#define ISIF_DF_CSC_SPH_MASK 0x1fff
-#define ISIF_DF_CSC_LNH_MASK 0x1fff
-#define ISIF_DF_CSC_SLV_MASK 0x1fff
-#define ISIF_DF_CSC_LNV_MASK 0x1fff
-#define ISIF_DF_NUMLINES 0x7fff
-#define ISIF_DF_NUMPIX 0x1fff
-
-/* Offsets for LSC/DFC/Gain */
-#define ISIF_DATA_H_OFFSET_MASK 0x1fff
-#define ISIF_DATA_V_OFFSET_MASK 0x1fff
-
-/* Linearization */
-#define ISIF_LIN_CORRSFT_MASK 7
-#define ISIF_LIN_CORRSFT_SHIFT 4
-#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
-#define ISIF_LIN_SCALE_FACT_DECIMAL_MASK 0x3ff
-#define ISIF_LIN_ENTRY_MASK 0x3ff
-
-/* masks and shifts*/
-#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
-#define ISIF_SYNCEN_WEN_MASK (1 << 1)
-#define ISIF_SYNCEN_WEN_SHIFT 1
-
-#endif /* _DAVINCI_VPFE_DM365_ISIF_REGS_H */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
deleted file mode 100644
index 7adf1fae43f6..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ /dev/null
@@ -1,1995 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * Resizer allows upscaling or downscaling a image to a desired
- * resolution. There are 2 resizer modules. both operating on the
- * same input image, but can have different output resolution.
- */
-
-#include "dm365_ipipe_hw.h"
-#include "dm365_resizer.h"
-
-#define MIN_IN_WIDTH 32
-#define MIN_IN_HEIGHT 32
-#define MAX_IN_WIDTH 4095
-#define MAX_IN_HEIGHT 4095
-#define MIN_OUT_WIDTH 16
-#define MIN_OUT_HEIGHT 2
-
-static const unsigned int resizer_input_formats[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_SGRBG12_1X12,
-};
-
-static const unsigned int resizer_output_formats[] = {
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_Y8_1X8,
- MEDIA_BUS_FMT_UV8_1X8,
- MEDIA_BUS_FMT_YDYUYDYV8_1X16,
- MEDIA_BUS_FMT_SGRBG12_1X12,
-};
-
-/* resizer_calculate_line_length() - This function calculates the line length of
- * various image planes at the input and
- * output.
- */
-static void
-resizer_calculate_line_length(u32 pix, int width, int height,
- int *line_len, int *line_len_c)
-{
- *line_len = 0;
- *line_len_c = 0;
-
- if (pix == MEDIA_BUS_FMT_UYVY8_2X8 ||
- pix == MEDIA_BUS_FMT_SGRBG12_1X12) {
- *line_len = width << 1;
- } else {
- *line_len = width;
- *line_len_c = width;
- }
-
- /* adjust the line len to be a multiple of 32 */
- *line_len += 31;
- *line_len &= ~0x1f;
- *line_len_c += 31;
- *line_len_c &= ~0x1f;
-}
-
-static inline int
-resizer_validate_output_image_format(struct device *dev,
- struct v4l2_mbus_framefmt *format,
- int *in_line_len, int *in_line_len_c)
-{
- if (format->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
- format->code != MEDIA_BUS_FMT_Y8_1X8 &&
- format->code != MEDIA_BUS_FMT_UV8_1X8 &&
- format->code != MEDIA_BUS_FMT_YDYUYDYV8_1X16 &&
- format->code != MEDIA_BUS_FMT_SGRBG12_1X12) {
- dev_err(dev, "Invalid Mbus format, %d\n", format->code);
- return -EINVAL;
- }
- if (!format->width || !format->height) {
- dev_err(dev, "invalid width or height\n");
- return -EINVAL;
- }
- resizer_calculate_line_length(format->code, format->width,
- format->height, in_line_len, in_line_len_c);
- return 0;
-}
-
-static void
-resizer_configure_passthru(struct vpfe_resizer_device *resizer, int bypass)
-{
- struct resizer_params *param = &resizer->config;
-
- param->rsz_rsc_param[RSZ_A].cen = DISABLE;
- param->rsz_rsc_param[RSZ_A].yen = DISABLE;
- param->rsz_rsc_param[RSZ_A].v_phs_y = 0;
- param->rsz_rsc_param[RSZ_A].v_phs_c = 0;
- param->rsz_rsc_param[RSZ_A].v_dif = 256;
- param->rsz_rsc_param[RSZ_A].v_lpf_int_y = 0;
- param->rsz_rsc_param[RSZ_A].v_lpf_int_c = 0;
- param->rsz_rsc_param[RSZ_A].h_phs = 0;
- param->rsz_rsc_param[RSZ_A].h_dif = 256;
- param->rsz_rsc_param[RSZ_A].h_lpf_int_y = 0;
- param->rsz_rsc_param[RSZ_A].h_lpf_int_c = 0;
- param->rsz_rsc_param[RSZ_A].dscale_en = DISABLE;
- param->rsz2rgb[RSZ_A].rgb_en = DISABLE;
- param->rsz_en[RSZ_A] = ENABLE;
- param->rsz_en[RSZ_B] = DISABLE;
- if (bypass) {
- param->rsz_rsc_param[RSZ_A].i_vps = 0;
- param->rsz_rsc_param[RSZ_A].i_hps = 0;
- /* Raw Bypass */
- param->rsz_common.passthrough = BYPASS_ON;
- }
-}
-
-static void
-configure_resizer_out_params(struct vpfe_resizer_device *resizer, int index,
- void *output_spec, unsigned char partial,
- unsigned int flag)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *outformat;
- struct vpfe_rsz_output_spec *output;
-
- if (index == RSZ_A &&
- resizer->resizer_a.output == RESIZER_OUTPUT_NONE) {
- param->rsz_en[index] = DISABLE;
- return;
- }
- if (index == RSZ_B &&
- resizer->resizer_b.output == RESIZER_OUTPUT_NONE) {
- param->rsz_en[index] = DISABLE;
- return;
- }
- output = output_spec;
- param->rsz_en[index] = ENABLE;
- if (partial) {
- param->rsz_rsc_param[index].h_flip = output->h_flip;
- param->rsz_rsc_param[index].v_flip = output->v_flip;
- param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
- param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
- param->rsz_rsc_param[index].v_lpf_int_y =
- output->v_lpf_int_y;
- param->rsz_rsc_param[index].v_lpf_int_c =
- output->v_lpf_int_c;
- param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
- param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
- param->rsz_rsc_param[index].h_lpf_int_y =
- output->h_lpf_int_y;
- param->rsz_rsc_param[index].h_lpf_int_c =
- output->h_lpf_int_c;
- param->rsz_rsc_param[index].dscale_en =
- output->en_down_scale;
- param->rsz_rsc_param[index].h_dscale_ave_sz =
- output->h_dscale_ave_sz;
- param->rsz_rsc_param[index].v_dscale_ave_sz =
- output->v_dscale_ave_sz;
- param->ext_mem_param[index].user_y_ofst =
- (output->user_y_ofst + 31) & ~0x1f;
- param->ext_mem_param[index].user_c_ofst =
- (output->user_c_ofst + 31) & ~0x1f;
- return;
- }
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
- param->rsz_rsc_param[index].o_vsz = outformat->height - 1;
- param->rsz_rsc_param[index].o_hsz = outformat->width - 1;
- param->ext_mem_param[index].rsz_sdr_ptr_s_y = output->vst_y;
- param->ext_mem_param[index].rsz_sdr_ptr_e_y = outformat->height;
- param->ext_mem_param[index].rsz_sdr_ptr_s_c = output->vst_c;
- param->ext_mem_param[index].rsz_sdr_ptr_e_c = outformat->height;
-
- if (!flag)
- return;
- /* update common parameters */
- param->rsz_rsc_param[index].h_flip = output->h_flip;
- param->rsz_rsc_param[index].v_flip = output->v_flip;
- param->rsz_rsc_param[index].v_typ_y = output->v_typ_y;
- param->rsz_rsc_param[index].v_typ_c = output->v_typ_c;
- param->rsz_rsc_param[index].v_lpf_int_y = output->v_lpf_int_y;
- param->rsz_rsc_param[index].v_lpf_int_c = output->v_lpf_int_c;
- param->rsz_rsc_param[index].h_typ_y = output->h_typ_y;
- param->rsz_rsc_param[index].h_typ_c = output->h_typ_c;
- param->rsz_rsc_param[index].h_lpf_int_y = output->h_lpf_int_y;
- param->rsz_rsc_param[index].h_lpf_int_c = output->h_lpf_int_c;
- param->rsz_rsc_param[index].dscale_en = output->en_down_scale;
- param->rsz_rsc_param[index].h_dscale_ave_sz = output->h_dscale_ave_sz;
- param->rsz_rsc_param[index].v_dscale_ave_sz = output->h_dscale_ave_sz;
- param->ext_mem_param[index].user_y_ofst =
- (output->user_y_ofst + 31) & ~0x1f;
- param->ext_mem_param[index].user_c_ofst =
- (output->user_c_ofst + 31) & ~0x1f;
-}
-
-/*
- * resizer_calculate_resize_ratios() - Calculates resize ratio for resizer
- * A or B. This is called after setting
- * the input size or output size.
- * @resizer: Pointer to VPFE resizer subdevice.
- * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
- */
-static void
-resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *informat, *outformat;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- if (outformat->field != V4L2_FIELD_INTERLACED)
- param->rsz_rsc_param[index].v_dif =
- ((informat->height) * 256) / (outformat->height);
- else
- param->rsz_rsc_param[index].v_dif =
- ((informat->height >> 1) * 256) / (outformat->height);
- param->rsz_rsc_param[index].h_dif =
- ((informat->width) * 256) / (outformat->width);
-}
-
-static void resizer_enable_422_420_conversion(struct resizer_params *param,
- int index, bool en)
-{
- param->rsz_rsc_param[index].cen = en;
- param->rsz_rsc_param[index].yen = en;
-}
-
-/* resizer_calculate_sdram_offsets() - This function calculates the offsets from
- * start of buffer for the C plane when
- * output format is YUV420SP. It also
- * calculates the offsets from the start of
- * the buffer when the image is flipped
- * vertically or horizontally for ycbcr/y/c
- * planes.
- * @resizer: Pointer to resizer subdevice.
- * @index: index RSZ_A-resizer-A RSZ_B-resizer-B.
- */
-static int
-resizer_calculate_sdram_offsets(struct vpfe_resizer_device *resizer, int index)
-{
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *outformat;
- int bytesperpixel = 2;
- int image_height;
- int image_width;
- int yuv_420 = 0;
- int offset = 0;
-
- if (index == RSZ_A)
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- else
- outformat = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- image_height = outformat->height + 1;
- image_width = outformat->width + 1;
- param->ext_mem_param[index].c_offset = 0;
- param->ext_mem_param[index].flip_ofst_y = 0;
- param->ext_mem_param[index].flip_ofst_c = 0;
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16) {
- /* YUV 420 */
- yuv_420 = 1;
- bytesperpixel = 1;
- }
-
- if (param->rsz_rsc_param[index].h_flip)
- /* width * bytesperpixel - 1 */
- offset = (image_width * bytesperpixel) - 1;
- if (param->rsz_rsc_param[index].v_flip)
- offset += (image_height - 1) *
- param->ext_mem_param[index].rsz_sdr_oft_y;
- param->ext_mem_param[index].flip_ofst_y = offset;
- if (!yuv_420)
- return 0;
- offset = 0;
- /* half height for c-plane */
- if (param->rsz_rsc_param[index].h_flip)
- /* width * bytesperpixel - 1 */
- offset = image_width - 1;
- if (param->rsz_rsc_param[index].v_flip)
- offset += (((image_height >> 1) - 1) *
- param->ext_mem_param[index].rsz_sdr_oft_c);
- param->ext_mem_param[index].flip_ofst_c = offset;
- param->ext_mem_param[index].c_offset =
- param->ext_mem_param[index].rsz_sdr_oft_y * image_height;
- return 0;
-}
-
-static int resizer_configure_output_win(struct vpfe_resizer_device *resizer)
-{
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_output_spec output_specs;
- struct v4l2_mbus_framefmt *outformat;
- int line_len_c;
- int line_len;
- int ret;
-
- outformat = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
-
- memset(&output_specs, 0x0, sizeof(struct vpfe_rsz_output_spec));
- output_specs.vst_y = param->user_config.vst;
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- output_specs.vst_c = param->user_config.vst;
-
- configure_resizer_out_params(resizer, RSZ_A, &output_specs, 0, 0);
- resizer_calculate_line_length(outformat->code,
- param->rsz_rsc_param[0].o_hsz + 1,
- param->rsz_rsc_param[0].o_vsz + 1,
- &line_len, &line_len_c);
- param->ext_mem_param[0].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[0].rsz_sdr_oft_c = line_len_c;
- resizer_calculate_resize_ratios(resizer, RSZ_A);
- if (param->rsz_en[RSZ_B])
- resizer_calculate_resize_ratios(resizer, RSZ_B);
-
- if (outformat->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param, RSZ_A, ENABLE);
- else
- resizer_enable_422_420_conversion(param, RSZ_A, DISABLE);
-
- ret = resizer_calculate_sdram_offsets(resizer, RSZ_A);
- if (!ret && param->rsz_en[RSZ_B])
- ret = resizer_calculate_sdram_offsets(resizer, RSZ_B);
-
- if (ret)
- pr_err("Error in calculating sdram offsets\n");
- return ret;
-}
-
-static int
-resizer_calculate_down_scale_f_div_param(struct device *dev,
- int input_width, int output_width,
- struct resizer_scale_param *param)
-{
- /* rsz = R, input_width = H, output width = h in the equation */
- unsigned int two_power;
- unsigned int upper_h1;
- unsigned int upper_h2;
- unsigned int val1;
- unsigned int val;
- unsigned int rsz;
- unsigned int h1;
- unsigned int h2;
- unsigned int o;
- unsigned int n;
-
- upper_h1 = input_width >> 1;
- n = param->h_dscale_ave_sz;
- /* 2 ^ (scale+1) */
- two_power = 1 << (n + 1);
- upper_h1 = (upper_h1 >> (n + 1)) << (n + 1);
- upper_h2 = input_width - upper_h1;
- if (upper_h2 % two_power) {
- dev_err(dev, "frame halves to be a multiple of 2 power n+1\n");
- return -EINVAL;
- }
- two_power = 1 << n;
- rsz = (input_width << 8) / output_width;
- val = rsz * two_power;
- val = ((upper_h1 << 8) / val) + 1;
- if (!(val % 2)) {
- h1 = val;
- } else {
- val = upper_h1 << 8;
- val >>= n + 1;
- val -= rsz >> 1;
- val /= rsz << 1;
- val <<= 1;
- val += 2;
- h1 = val;
- }
- o = 10 + (two_power << 2);
- if (((input_width << 7) / rsz) % 2)
- o += ((DIV_ROUND_UP(rsz, 1024) << 1) << n);
- h2 = output_width - h1;
- /* phi */
- val = (h1 * rsz) - (((upper_h1 - (o - 10)) / two_power) << 8);
- /* skip */
- val1 = ((val - 1024) >> 9) << 1;
- param->f_div.num_passes = MAX_PASSES;
- param->f_div.pass[0].o_hsz = h1 - 1;
- param->f_div.pass[0].i_hps = 0;
- param->f_div.pass[0].h_phs = 0;
- param->f_div.pass[0].src_hps = 0;
- param->f_div.pass[0].src_hsz = upper_h1 + o;
- param->f_div.pass[1].o_hsz = h2 - 1;
- param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
- param->f_div.pass[1].h_phs = val - (val1 << 8);
- param->f_div.pass[1].src_hps = upper_h1 - o;
- param->f_div.pass[1].src_hsz = upper_h2 + o;
-
- return 0;
-}
-
-static int
-resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_config_params *user_config;
- struct v4l2_mbus_framefmt *informat;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
- user_config = &resizer->config.user_config;
- param->rsz_common.vps = param->user_config.vst;
- param->rsz_common.hps = param->user_config.hst;
-
- if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
- param->rsz_common.hsz = ((informat->width - 1) *
- IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev);
- else
- param->rsz_common.hsz = informat->width - 1;
-
- if (informat->field == V4L2_FIELD_INTERLACED)
- param->rsz_common.vsz = (informat->height - 1) >> 1;
- else
- param->rsz_common.vsz = informat->height - 1;
-
- param->rsz_common.raw_flip = 0;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF)
- param->rsz_common.source = IPIPEIF_DATA;
- else
- param->rsz_common.source = IPIPE_DATA;
-
- switch (informat->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_422;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_420;
- /* Select y */
- param->rsz_common.y_c = 0;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_UV8_1X8:
- param->rsz_common.src_img_fmt = RSZ_IMG_420;
- /* Select y */
- param->rsz_common.y_c = 1;
- param->rsz_common.raw_flip = 0;
- break;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- param->rsz_common.raw_flip = 1;
- break;
-
- default:
- param->rsz_common.src_img_fmt = RSZ_IMG_422;
- param->rsz_common.source = IPIPE_DATA;
- }
-
- param->rsz_common.yuv_y_min = user_config->yuv_y_min;
- param->rsz_common.yuv_y_max = user_config->yuv_y_max;
- param->rsz_common.yuv_c_min = user_config->yuv_c_min;
- param->rsz_common.yuv_c_max = user_config->yuv_c_max;
- param->rsz_common.out_chr_pos = user_config->out_chr_pos;
- param->rsz_common.rsz_seq_crv = user_config->chroma_sample_even;
-
- return 0;
-}
-static int
-resizer_configure_in_continuous_mode(struct vpfe_resizer_device *resizer)
-{
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct resizer_params *param = &resizer->config;
- struct vpfe_rsz_config_params *cont_config;
- int line_len_c;
- int line_len;
- int ret;
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) {
- dev_err(dev, "enable resizer - Resizer-A\n");
- return -EINVAL;
- }
-
- cont_config = &resizer->config.user_config;
- param->rsz_en[RSZ_A] = ENABLE;
- configure_resizer_out_params(resizer, RSZ_A,
- &cont_config->output1, 1, 0);
- param->rsz_en[RSZ_B] = DISABLE;
- param->oper_mode = RESIZER_MODE_CONTINUOUS;
-
- if (resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
- struct v4l2_mbus_framefmt *outformat2;
-
- param->rsz_en[RSZ_B] = ENABLE;
- outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
- ret = resizer_validate_output_image_format(dev, outformat2,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_B,
- &cont_config->output2, 0, 1);
- if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_B, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_B, DISABLE);
- }
- resizer_configure_common_in_params(resizer);
- ret = resizer_configure_output_win(resizer);
- if (ret)
- return ret;
-
- param->rsz_common.passthrough = cont_config->bypass;
- if (cont_config->bypass)
- resizer_configure_passthru(resizer, 1);
-
- return 0;
-}
-
-static inline int
-resizer_validate_input_image_format(struct device *dev,
- u32 pix,
- int width, int height, int *line_len)
-{
- int val;
-
- if (pix != MEDIA_BUS_FMT_UYVY8_2X8 &&
- pix != MEDIA_BUS_FMT_Y8_1X8 &&
- pix != MEDIA_BUS_FMT_UV8_1X8 &&
- pix != MEDIA_BUS_FMT_SGRBG12_1X12) {
- dev_err(dev,
- "resizer validate output: pix format not supported, %d\n", pix);
- return -EINVAL;
- }
-
- if (!width || !height) {
- dev_err(dev,
- "resizer validate input: invalid width or height\n");
- return -EINVAL;
- }
-
- if (pix == MEDIA_BUS_FMT_UV8_1X8)
- resizer_calculate_line_length(pix, width,
- height, &val, line_len);
- else
- resizer_calculate_line_length(pix, width,
- height, line_len, &val);
-
- return 0;
-}
-
-static int
-resizer_validate_decimation(struct device *dev, enum ipipeif_decimation dec_en,
- unsigned char rsz, unsigned char frame_div_mode_en,
- int width)
-{
- if (dec_en && frame_div_mode_en) {
- dev_err(dev,
- "dec_en & frame_div_mode_en can not enabled simultaneously\n");
- return -EINVAL;
- }
-
- if (frame_div_mode_en) {
- dev_err(dev, "frame_div_mode mode not supported\n");
- return -EINVAL;
- }
-
- if (!dec_en)
- return 0;
-
- if (width <= VPFE_IPIPE_MAX_INPUT_WIDTH) {
- dev_err(dev,
- "image width to be more than %d for decimation\n",
- VPFE_IPIPE_MAX_INPUT_WIDTH);
- return -EINVAL;
- }
-
- if (rsz < IPIPEIF_RSZ_MIN || rsz > IPIPEIF_RSZ_MAX) {
- dev_err(dev, "rsz range is %d to %d\n",
- IPIPEIF_RSZ_MIN, IPIPEIF_RSZ_MAX);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* resizer_calculate_normal_f_div_param() - Algorithm to calculate the frame
- * division parameters for resizer.
- * in normal mode.
- */
-static int
-resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
- int output_width, struct resizer_scale_param *param)
-{
- /* rsz = R, input_width = H, output width = h in the equation */
- unsigned int val1;
- unsigned int rsz;
- unsigned int val;
- unsigned int h1;
- unsigned int h2;
- unsigned int o;
-
- if (output_width > input_width) {
- dev_err(dev, "frame div mode is used for scale down only\n");
- return -EINVAL;
- }
-
- rsz = (input_width << 8) / output_width;
- val = rsz << 1;
- val = ((input_width << 8) / val) + 1;
- o = 14;
- if (!(val % 2)) {
- h1 = val;
- } else {
- val = input_width << 7;
- val -= rsz >> 1;
- val /= rsz << 1;
- val <<= 1;
- val += 2;
- o += (DIV_ROUND_UP(rsz, 1024) << 1);
- h1 = val;
- }
- h2 = output_width - h1;
- /* phi */
- val = (h1 * rsz) - (((input_width >> 1) - o) << 8);
- /* skip */
- val1 = ((val - 1024) >> 9) << 1;
- param->f_div.num_passes = MAX_PASSES;
- param->f_div.pass[0].o_hsz = h1 - 1;
- param->f_div.pass[0].i_hps = 0;
- param->f_div.pass[0].h_phs = 0;
- param->f_div.pass[0].src_hps = 0;
- param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
- param->f_div.pass[1].o_hsz = h2 - 1;
- param->f_div.pass[1].i_hps = val1;
- param->f_div.pass[1].h_phs = val - (val1 << 8);
- param->f_div.pass[1].src_hps = (input_width >> 2) - o;
- param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
-
- return 0;
-}
-
-static int
-resizer_configure_in_single_shot_mode(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_rsz_config_params *config = &resizer->config.user_config;
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct v4l2_mbus_framefmt *outformat1, *outformat2;
- struct resizer_params *param = &resizer->config;
- struct v4l2_mbus_framefmt *informat;
- int decimation;
- int line_len_c;
- int line_len;
- int rsz;
- int ret;
-
- informat = &resizer->crop_resizer.formats[RESIZER_CROP_PAD_SINK];
- outformat1 = &resizer->resizer_a.formats[RESIZER_PAD_SOURCE];
- outformat2 = &resizer->resizer_b.formats[RESIZER_PAD_SOURCE];
-
- decimation = vpfe_ipipeif_decimation_enabled(vpfe_dev);
- rsz = vpfe_ipipeif_get_rsz(vpfe_dev);
- if (decimation && param->user_config.frame_div_mode_en) {
- dev_err(dev,
- "dec_en & frame_div_mode_en cannot enabled simultaneously\n");
- return -EINVAL;
- }
-
- ret = resizer_validate_decimation(dev, decimation, rsz,
- param->user_config.frame_div_mode_en, informat->width);
- if (ret)
- return -EINVAL;
-
- ret = resizer_validate_input_image_format(dev, informat->code,
- informat->width, informat->height, &line_len);
- if (ret)
- return -EINVAL;
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- param->rsz_en[RSZ_A] = ENABLE;
- ret = resizer_validate_output_image_format(dev, outformat1,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_A].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_A].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_A,
- &param->user_config.output1, 0, 1);
-
- if (outformat1->code == MEDIA_BUS_FMT_SGRBG12_1X12)
- param->rsz_common.raw_flip = 1;
- else
- param->rsz_common.raw_flip = 0;
-
- if (outformat1->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_A, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_A, DISABLE);
- }
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- param->rsz_en[RSZ_B] = ENABLE;
- ret = resizer_validate_output_image_format(dev, outformat2,
- &line_len, &line_len_c);
- if (ret)
- return ret;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_y = line_len;
- param->ext_mem_param[RSZ_B].rsz_sdr_oft_c = line_len_c;
- configure_resizer_out_params(resizer, RSZ_B,
- &param->user_config.output2, 0, 1);
- if (outformat2->code == MEDIA_BUS_FMT_YDYUYDYV8_1X16)
- resizer_enable_422_420_conversion(param,
- RSZ_B, ENABLE);
- else
- resizer_enable_422_420_conversion(param,
- RSZ_B, DISABLE);
- }
-
- resizer_configure_common_in_params(resizer);
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- resizer_calculate_resize_ratios(resizer, RSZ_A);
- resizer_calculate_sdram_offsets(resizer, RSZ_A);
- /* Overriding resize ratio calculation */
- if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
- param->rsz_rsc_param[RSZ_A].v_dif =
- (((informat->height + 1) * 2) * 256) /
- (param->rsz_rsc_param[RSZ_A].o_vsz + 1);
- }
- }
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- resizer_calculate_resize_ratios(resizer, RSZ_B);
- resizer_calculate_sdram_offsets(resizer, RSZ_B);
- /* Overriding resize ratio calculation */
- if (informat->code == MEDIA_BUS_FMT_UV8_1X8) {
- param->rsz_rsc_param[RSZ_B].v_dif =
- (((informat->height + 1) * 2) * 256) /
- (param->rsz_rsc_param[RSZ_B].o_vsz + 1);
- }
- }
- if (param->user_config.frame_div_mode_en &&
- param->rsz_en[RSZ_A]) {
- if (!param->rsz_rsc_param[RSZ_A].dscale_en)
- ret = resizer_calculate_normal_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_A].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_A]);
- else
- ret = resizer_calculate_down_scale_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_A].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_A]);
- if (ret)
- return -EINVAL;
- }
- if (param->user_config.frame_div_mode_en &&
- param->rsz_en[RSZ_B]) {
- if (!param->rsz_rsc_param[RSZ_B].dscale_en)
- ret = resizer_calculate_normal_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_B].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_B]);
- else
- ret = resizer_calculate_down_scale_f_div_param(dev,
- informat->width,
- param->rsz_rsc_param[RSZ_B].o_vsz + 1,
- &param->rsz_rsc_param[RSZ_B]);
- if (ret)
- return -EINVAL;
- }
- param->rsz_common.passthrough = config->bypass;
- if (config->bypass)
- resizer_configure_passthru(resizer, 1);
- return 0;
-}
-
-static void
-resizer_set_default_configuration(struct vpfe_resizer_device *resizer)
-{
-#define WIDTH_I 640
-#define HEIGHT_I 480
-#define WIDTH_O 640
-#define HEIGHT_O 480
- const struct resizer_params rsz_default_config = {
- .oper_mode = RESIZER_MODE_ONE_SHOT,
- .rsz_common = {
- .vsz = HEIGHT_I - 1,
- .hsz = WIDTH_I - 1,
- .src_img_fmt = RSZ_IMG_422,
- .raw_flip = 1, /* flip preserve Raw format */
- .source = IPIPE_DATA,
- .passthrough = BYPASS_OFF,
- .yuv_y_max = 255,
- .yuv_c_max = 255,
- .rsz_seq_crv = DISABLE,
- .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
- },
- .rsz_rsc_param = {
- {
- .h_flip = DISABLE,
- .v_flip = DISABLE,
- .cen = DISABLE,
- .yen = DISABLE,
- .o_vsz = HEIGHT_O - 1,
- .o_hsz = WIDTH_O - 1,
- .v_dif = 256,
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dif = 256,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- {
- .h_flip = DISABLE,
- .v_flip = DISABLE,
- .cen = DISABLE,
- .yen = DISABLE,
- .o_vsz = HEIGHT_O - 1,
- .o_hsz = WIDTH_O - 1,
- .v_dif = 256,
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dif = 256,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- },
- .rsz2rgb = {
- {
- .rgb_en = DISABLE
- },
- {
- .rgb_en = DISABLE
- }
- },
- .ext_mem_param = {
- {
- .rsz_sdr_oft_y = WIDTH_O << 1,
- .rsz_sdr_ptr_e_y = HEIGHT_O,
- .rsz_sdr_oft_c = WIDTH_O,
- .rsz_sdr_ptr_e_c = HEIGHT_O >> 1,
- },
- {
- .rsz_sdr_oft_y = WIDTH_O << 1,
- .rsz_sdr_ptr_e_y = HEIGHT_O,
- .rsz_sdr_oft_c = WIDTH_O,
- .rsz_sdr_ptr_e_c = HEIGHT_O,
- },
- },
- .rsz_en[0] = ENABLE,
- .rsz_en[1] = DISABLE,
- .user_config = {
- .output1 = {
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- .output2 = {
- .v_typ_y = VPFE_RSZ_INTP_CUBIC,
- .v_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_typ_y = VPFE_RSZ_INTP_CUBIC,
- .h_typ_c = VPFE_RSZ_INTP_CUBIC,
- .h_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- .v_dscale_ave_sz =
- VPFE_IPIPE_DWN_SCALE_1_OVER_2,
- },
- .yuv_y_max = 255,
- .yuv_c_max = 255,
- .out_chr_pos = VPFE_IPIPE_YUV422_CHR_POS_COSITE,
- },
- };
- memcpy(&resizer->config, &rsz_default_config,
- sizeof(struct resizer_params));
-}
-
-/*
- * resizer_set_configuration() - set resizer config
- * @resizer: vpfe resizer device pointer.
- * @chan_config: resizer channel configuration.
- */
-static int
-resizer_set_configuration(struct vpfe_resizer_device *resizer,
- struct vpfe_rsz_config *chan_config)
-{
- if (!chan_config->config)
- resizer_set_default_configuration(resizer);
- else
- if (copy_from_user(&resizer->config.user_config,
- (void __user *)chan_config->config,
- sizeof(struct vpfe_rsz_config_params)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * resizer_get_configuration() - get resizer config
- * @resizer: vpfe resizer device pointer.
- * @channel: image processor logical channel.
- * @chan_config: resizer channel configuration.
- */
-static int
-resizer_get_configuration(struct vpfe_resizer_device *resizer,
- struct vpfe_rsz_config *chan_config)
-{
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
-
- if (!chan_config->config) {
- dev_err(dev, "Resizer channel invalid pointer\n");
- return -EINVAL;
- }
-
- if (copy_to_user((void __user *)chan_config->config,
- (void *)&resizer->config.user_config,
- sizeof(struct vpfe_rsz_config_params))) {
- dev_err(dev, "resizer_get_configuration: Error in copy to user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/*
- * VPFE video operations
- */
-
-/*
- * resizer_a_video_out_queue() - RESIZER-A video out queue
- * @vpfe_dev: vpfe device pointer.
- * @addr: buffer address.
- */
-static int resizer_a_video_out_queue(struct vpfe_device *vpfe_dev,
- unsigned long addr)
-{
- struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
-
- return resizer_set_outaddr(resizer->base_addr,
- &resizer->config, RSZ_A, addr);
-}
-
-/*
- * resizer_b_video_out_queue() - RESIZER-B video out queue
- * @vpfe_dev: vpfe device pointer.
- * @addr: buffer address.
- */
-static int resizer_b_video_out_queue(struct vpfe_device *vpfe_dev,
- unsigned long addr)
-{
- struct vpfe_resizer_device *resizer = &vpfe_dev->vpfe_resizer;
-
- return resizer_set_outaddr(resizer->base_addr,
- &resizer->config, RSZ_B, addr);
-}
-
-static const struct vpfe_video_operations resizer_a_video_ops = {
- .queue = resizer_a_video_out_queue,
-};
-
-static const struct vpfe_video_operations resizer_b_video_ops = {
- .queue = resizer_b_video_out_queue,
-};
-
-static void resizer_enable(struct vpfe_resizer_device *resizer, int en)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- unsigned char val;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
- return;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_IPIPEIF &&
- ipipeif_sink == IPIPEIF_INPUT_MEMORY) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_SRC_EN);
- } while (val);
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_A);
- } while (val);
- }
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE) {
- do {
- val = regr_rsz(resizer->base_addr, RSZ_B);
- } while (val);
- }
- }
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
- rsz_enable(resizer->base_addr, RSZ_A, en);
-
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
- rsz_enable(resizer->base_addr, RSZ_B, en);
-}
-
-
-/*
- * resizer_ss_isr() - resizer module single-shot buffer scheduling isr
- * @resizer: vpfe resizer device pointer.
- */
-static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_pipeline *pipe = &video_out->pipe;
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- u32 val;
-
- if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
- return;
-
- if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
- val = vpss_dma_complete_interrupt();
- if (val != 0 && val != 2)
- return;
- }
-
- if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
- spin_lock(&video_out->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_out);
- video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_out);
- spin_unlock(&video_out->dma_queue_lock);
- }
-
- /* If resizer B is enabled */
- if (pipe->output_num > 1 && resizer->resizer_b.output ==
- RESIZER_OUTPUT_MEMORY) {
- spin_lock(&video_out2->dma_queue_lock);
- vpfe_video_process_buffer_complete(video_out2);
- video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
- vpfe_video_schedule_next_buffer(video_out2);
- spin_unlock(&video_out2->dma_queue_lock);
- }
-
- /* start HW if buffers are queued */
- if (vpfe_video_is_pipe_ready(pipe) &&
- resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
- resizer_enable(resizer, 1);
- vpfe_ipipe_enable(vpfe_dev, 1);
- vpfe_ipipeif_enable(vpfe_dev);
- }
-}
-
-/*
- * vpfe_resizer_buffer_isr() - resizer module buffer scheduling isr
- * @resizer: vpfe resizer device pointer.
- */
-void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_pipeline *pipe = &resizer->resizer_a.video_out.pipe;
- enum v4l2_field field;
- int fid;
-
- if (!video_out->started)
- return;
-
- if (resizer->crop_resizer.input == RESIZER_CROP_INPUT_NONE)
- return;
-
- field = video_out->fmt.fmt.pix.field;
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- if (video_out->cur_frm != video_out->next_frm) {
- vpfe_video_process_buffer_complete(video_out);
- if (pipe->output_num > 1)
- vpfe_video_process_buffer_complete(video_out2);
- }
-
- video_out->skip_frame_count--;
- if (!video_out->skip_frame_count) {
- video_out->skip_frame_count =
- video_out->skip_frame_count_init;
- rsz_src_enable(resizer->base_addr, 1);
- } else {
- rsz_src_enable(resizer->base_addr, 0);
- }
- return;
- }
-
- /* handle interlaced frame capture */
- fid = vpfe_isif_get_fid(vpfe_dev);
-
- /* switch the software maintained field id */
- video_out->field_id ^= 1;
- if (fid == video_out->field_id) {
- /*
- * we are in-sync here,continue.
- * One frame is just being captured. If the
- * next frame is available, release the current
- * frame and move on
- */
- if (fid == 0 && video_out->cur_frm != video_out->next_frm) {
- vpfe_video_process_buffer_complete(video_out);
- if (pipe->output_num > 1)
- vpfe_video_process_buffer_complete(video_out2);
- }
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- video_out->field_id = fid;
- }
-}
-
-/*
- * vpfe_resizer_dma_isr() - resizer module dma isr
- * @resizer: vpfe resizer device pointer.
- */
-void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_video_device *video_out2 = &resizer->resizer_b.video_out;
- struct vpfe_video_device *video_out = &resizer->resizer_a.video_out;
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- struct vpfe_pipeline *pipe = &video_out->pipe;
- int schedule_capture = 0;
- enum v4l2_field field;
- int fid;
-
- if (!video_out->started)
- return;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_SINGLESHOT) {
- resizer_ss_isr(resizer);
- return;
- }
-
- field = video_out->fmt.fmt.pix.field;
- if (field == V4L2_FIELD_NONE) {
- if (!list_empty(&video_out->dma_queue) &&
- video_out->cur_frm == video_out->next_frm)
- schedule_capture = 1;
- } else {
- fid = vpfe_isif_get_fid(vpfe_dev);
- if (fid == video_out->field_id) {
- /* we are in-sync here,continue */
- if (fid == 1 && !list_empty(&video_out->dma_queue) &&
- video_out->cur_frm == video_out->next_frm)
- schedule_capture = 1;
- }
- }
-
- if (!schedule_capture)
- return;
-
- spin_lock(&video_out->dma_queue_lock);
- vpfe_video_schedule_next_buffer(video_out);
- spin_unlock(&video_out->dma_queue_lock);
- if (pipe->output_num > 1) {
- spin_lock(&video_out2->dma_queue_lock);
- vpfe_video_schedule_next_buffer(video_out2);
- spin_unlock(&video_out2->dma_queue_lock);
- }
-}
-
-/*
- * V4L2 subdev operations
- */
-
-/*
- * resizer_ioctl() - Handle resizer module private ioctl's
- * @sd: pointer to v4l2 subdev structure
- * @cmd: configuration command
- * @arg: configuration argument
- */
-static long resizer_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct device *dev = resizer->crop_resizer.subdev.v4l2_dev->dev;
- struct vpfe_rsz_config *user_config;
- int ret = -ENOIOCTLCMD;
-
- if (&resizer->crop_resizer.subdev != sd)
- return ret;
-
- switch (cmd) {
- case VIDIOC_VPFE_RSZ_S_CONFIG:
- user_config = arg;
- ret = resizer_set_configuration(resizer, user_config);
- break;
-
- case VIDIOC_VPFE_RSZ_G_CONFIG:
- user_config = arg;
- if (!user_config->config) {
- dev_err(dev, "error in VIDIOC_VPFE_RSZ_G_CONFIG\n");
- return -EINVAL;
- }
- ret = resizer_get_configuration(resizer, user_config);
- break;
- }
- return ret;
-}
-
-static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_sink = vpfe_dev->vpfe_ipipeif.input;
- u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
- struct resizer_params *param = &resizer->config;
- int ret = 0;
-
- if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY ||
- resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
- if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
- ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
- ret = resizer_configure_in_single_shot_mode(resizer);
- else
- ret = resizer_configure_in_continuous_mode(resizer);
- if (ret)
- return ret;
- ret = config_rsz_hw(resizer, param);
- }
- return ret;
-}
-
-/*
- * resizer_set_stream() - Enable/Disable streaming on resizer subdev
- * @sd: pointer to v4l2 subdev structure
- * @enable: 1 == Enable, 0 == Disable
- */
-static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
-
- if (&resizer->crop_resizer.subdev != sd)
- return 0;
-
- if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY)
- return 0;
-
- switch (enable) {
- case 1:
- if (resizer_do_hw_setup(resizer) < 0)
- return -EINVAL;
- resizer_enable(resizer, enable);
- break;
-
- case 0:
- resizer_enable(resizer, enable);
- break;
- }
-
- return 0;
-}
-
-/*
- * __resizer_get_format() - helper function for getting resizer format
- * @sd: pointer to subdev.
- * @cfg: V4L2 subdev pad config
- * @pad: pad number.
- * @which: wanted subdev format.
- * Return wanted mbus frame format.
- */
-static struct v4l2_mbus_framefmt *
-__resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
-
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, cfg, pad);
- if (&resizer->crop_resizer.subdev == sd)
- return &resizer->crop_resizer.formats[pad];
- if (&resizer->resizer_a.subdev == sd)
- return &resizer->resizer_a.formats[pad];
- if (&resizer->resizer_b.subdev == sd)
- return &resizer->resizer_b.formats[pad];
- return NULL;
-}
-
-/*
- * resizer_try_format() - Handle try format by pad subdev method
- * @sd: pointer to subdev.
- * @cfg: V4L2 subdev pad config
- * @pad: pad num.
- * @fmt: pointer to v4l2 format structure.
- * @which: wanted subdev format.
- */
-static void
-resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- unsigned int max_out_height;
- unsigned int max_out_width;
- unsigned int i;
-
- if ((&resizer->resizer_a.subdev == sd && pad == RESIZER_PAD_SINK) ||
- (&resizer->resizer_b.subdev == sd && pad == RESIZER_PAD_SINK) ||
- (&resizer->crop_resizer.subdev == sd &&
- (pad == RESIZER_CROP_PAD_SOURCE ||
- pad == RESIZER_CROP_PAD_SOURCE2 || pad == RESIZER_CROP_PAD_SINK))) {
- for (i = 0; i < ARRAY_SIZE(resizer_input_formats); i++) {
- if (fmt->code == resizer_input_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_input_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH,
- MAX_IN_WIDTH);
- fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT,
- MAX_IN_HEIGHT);
- } else if (&resizer->resizer_a.subdev == sd &&
- pad == RESIZER_PAD_SOURCE) {
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_A;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_A;
-
- for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
- if (fmt->code == resizer_output_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
- max_out_width);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
- max_out_height);
- } else if (&resizer->resizer_b.subdev == sd &&
- pad == RESIZER_PAD_SOURCE) {
- max_out_width = IPIPE_MAX_OUTPUT_WIDTH_B;
- max_out_height = IPIPE_MAX_OUTPUT_HEIGHT_B;
-
- for (i = 0; i < ARRAY_SIZE(resizer_output_formats); i++) {
- if (fmt->code == resizer_output_formats[i])
- break;
- }
- /* If not found, use UYVY as default */
- if (i >= ARRAY_SIZE(resizer_output_formats))
- fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-
- fmt->width = clamp_t(u32, fmt->width, MIN_OUT_WIDTH,
- max_out_width);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, fmt->height, MIN_OUT_HEIGHT,
- max_out_height);
- }
-}
-
-/*
- * resizer_set_format() - Handle set format by pads subdev method
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int resizer_set_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- resizer_try_format(sd, cfg, fmt->pad, &fmt->format, fmt->which);
- *format = fmt->format;
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- if (&resizer->crop_resizer.subdev == sd) {
- if (fmt->pad == RESIZER_CROP_PAD_SINK) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE &&
- resizer->crop_resizer.output == RESIZER_A) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- resizer->crop_resizer.
- formats[RESIZER_CROP_PAD_SOURCE2] = fmt->format;
- } else if (fmt->pad == RESIZER_CROP_PAD_SOURCE2 &&
- resizer->crop_resizer.output2 == RESIZER_B) {
- resizer->crop_resizer.formats[fmt->pad] = fmt->format;
- resizer->crop_resizer.
- formats[RESIZER_CROP_PAD_SOURCE] = fmt->format;
- } else {
- return -EINVAL;
- }
- } else if (&resizer->resizer_a.subdev == sd) {
- if (fmt->pad == RESIZER_PAD_SINK)
- resizer->resizer_a.formats[fmt->pad] = fmt->format;
- else if (fmt->pad == RESIZER_PAD_SOURCE)
- resizer->resizer_a.formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
- } else if (&resizer->resizer_b.subdev == sd) {
- if (fmt->pad == RESIZER_PAD_SINK)
- resizer->resizer_b.formats[fmt->pad] = fmt->format;
- else if (fmt->pad == RESIZER_PAD_SOURCE)
- resizer->resizer_b.formats[fmt->pad] = fmt->format;
- else
- return -EINVAL;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * resizer_get_format() - Retrieve the video format on a pad
- * @sd: pointer to v4l2 subdev structure.
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
-static int resizer_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct v4l2_mbus_framefmt *format;
-
- format = __resizer_get_format(sd, cfg, fmt->pad, fmt->which);
- if (format == NULL)
- return -EINVAL;
-
- fmt->format = *format;
-
- return 0;
-}
-
-/*
- * resizer_enum_frame_size() - enum frame sizes on pads
- * @sd: Pointer to subdevice.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_frame_size_enum structure.
- */
-static int resizer_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct v4l2_mbus_framefmt format;
-
- if (fse->index != 0)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = 1;
- format.height = 1;
- resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
- fse->min_width = format.width;
- fse->min_height = format.height;
-
- if (format.code != fse->code)
- return -EINVAL;
-
- format.code = fse->code;
- format.width = -1;
- format.height = -1;
- resizer_try_format(sd, cfg, fse->pad, &format, fse->which);
- fse->max_width = format.width;
- fse->max_height = format.height;
-
- return 0;
-}
-
-/*
- * resizer_enum_mbus_code() - enum mbus codes for pads
- * @sd: Pointer to subdevice.
- * @cfg: V4L2 subdev pad config
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- */
-static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad == RESIZER_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(resizer_input_formats))
- return -EINVAL;
-
- code->code = resizer_input_formats[code->index];
- } else if (code->pad == RESIZER_PAD_SOURCE) {
- if (code->index >= ARRAY_SIZE(resizer_output_formats))
- return -EINVAL;
-
- code->code = resizer_output_formats[code->index];
- }
-
- return 0;
-}
-
-/*
- * resizer_init_formats() - Initialize formats on all pads
- * @sd: Pointer to subdevice.
- * @fh: V4L2 subdev file handle.
- *
- * Initialize all pad formats with default values. Try formats are
- * initialized on the file handle.
- */
-static int resizer_init_formats(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh)
-{
- __u32 which = V4L2_SUBDEV_FORMAT_TRY;
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct v4l2_subdev_format format;
-
- if (&resizer->crop_resizer.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_CROP_PAD_SOURCE2;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_WIDTH;
- resizer_set_format(sd, fh->pad, &format);
- } else if (&resizer->resizer_a.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_A;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_A;
- resizer_set_format(sd, fh->pad, &format);
- } else if (&resizer->resizer_b.subdev == sd) {
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SINK;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- format.format.width = MAX_IN_WIDTH;
- format.format.height = MAX_IN_HEIGHT;
- resizer_set_format(sd, fh->pad, &format);
-
- memset(&format, 0, sizeof(format));
- format.pad = RESIZER_PAD_SOURCE;
- format.which = which;
- format.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
- format.format.width = IPIPE_MAX_OUTPUT_WIDTH_B;
- format.format.height = IPIPE_MAX_OUTPUT_HEIGHT_B;
- resizer_set_format(sd, fh->pad, &format);
- }
-
- return 0;
-}
-
-/* subdev core operations */
-static const struct v4l2_subdev_core_ops resizer_v4l2_core_ops = {
- .ioctl = resizer_ioctl,
-};
-
-/* subdev internal operations */
-static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
- .open = resizer_init_formats,
-};
-
-/* subdev video operations */
-static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
- .s_stream = resizer_set_stream,
-};
-
-/* subdev pad operations */
-static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
- .enum_mbus_code = resizer_enum_mbus_code,
- .enum_frame_size = resizer_enum_frame_size,
- .get_fmt = resizer_get_format,
- .set_fmt = resizer_set_format,
-};
-
-/* subdev operations */
-static const struct v4l2_subdev_ops resizer_v4l2_ops = {
- .core = &resizer_v4l2_core_ops,
- .video = &resizer_v4l2_video_ops,
- .pad = &resizer_v4l2_pad_ops,
-};
-
-/*
- * Media entity operations
- */
-
-/*
- * resizer_link_setup() - Setup resizer connections
- * @entity: Pointer to media entity structure
- * @local: Pointer to local pad array
- * @remote: Pointer to remote pad array
- * @flags: Link flags
- * return -EINVAL or zero on success
- */
-static int resizer_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
- struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- u16 ipipeif_source = vpfe_dev->vpfe_ipipeif.output;
- u16 ipipe_source = vpfe_dev->vpfe_ipipe.output;
- unsigned int index = local->index;
-
- /* FIXME: this is actually a hack! */
- if (is_media_entity_v4l2_subdev(remote->entity))
- index |= 2 << 16;
-
- if (&resizer->crop_resizer.subdev == sd) {
- switch (index) {
- case RESIZER_CROP_PAD_SINK | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_NONE;
- break;
- }
-
- if (resizer->crop_resizer.input !=
- RESIZER_CROP_INPUT_NONE)
- return -EBUSY;
- if (ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_IPIPEIF;
- else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
- resizer->crop_resizer.input =
- RESIZER_CROP_INPUT_IPIPE;
- else
- return -EINVAL;
- break;
-
- case RESIZER_CROP_PAD_SOURCE | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.output =
- RESIZER_CROP_OUTPUT_NONE;
- break;
- }
- if (resizer->crop_resizer.output !=
- RESIZER_CROP_OUTPUT_NONE)
- return -EBUSY;
- resizer->crop_resizer.output = RESIZER_A;
- break;
-
- case RESIZER_CROP_PAD_SOURCE2 | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->crop_resizer.output2 =
- RESIZER_CROP_OUTPUT_NONE;
- break;
- }
- if (resizer->crop_resizer.output2 !=
- RESIZER_CROP_OUTPUT_NONE)
- return -EBUSY;
- resizer->crop_resizer.output2 = RESIZER_B;
- break;
-
- default:
- return -EINVAL;
- }
- } else if (&resizer->resizer_a.subdev == sd) {
- switch (index) {
- case RESIZER_PAD_SINK | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_a.input = RESIZER_INPUT_NONE;
- break;
- }
- if (resizer->resizer_a.input != RESIZER_INPUT_NONE)
- return -EBUSY;
- resizer->resizer_a.input = RESIZER_INPUT_CROP_RESIZER;
- break;
-
- case RESIZER_PAD_SOURCE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_a.output = RESIZER_OUTPUT_NONE;
- break;
- }
- if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
- return -EBUSY;
- resizer->resizer_a.output = RESIZER_OUTPUT_MEMORY;
- break;
-
- default:
- return -EINVAL;
- }
- } else if (&resizer->resizer_b.subdev == sd) {
- switch (index) {
- case RESIZER_PAD_SINK | 2 << 16:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_b.input = RESIZER_INPUT_NONE;
- break;
- }
- if (resizer->resizer_b.input != RESIZER_INPUT_NONE)
- return -EBUSY;
- resizer->resizer_b.input = RESIZER_INPUT_CROP_RESIZER;
- break;
-
- case RESIZER_PAD_SOURCE:
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- resizer->resizer_b.output = RESIZER_OUTPUT_NONE;
- break;
- }
- if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
- return -EBUSY;
- resizer->resizer_b.output = RESIZER_OUTPUT_MEMORY;
- break;
-
- default:
- return -EINVAL;
- }
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct media_entity_operations resizer_media_ops = {
- .link_setup = resizer_link_setup,
-};
-
-/*
- * vpfe_resizer_unregister_entities() - Unregister entity
- * @vpfe_rsz - pointer to resizer subdevice structure.
- */
-void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
-{
- /* unregister video devices */
- vpfe_video_unregister(&vpfe_rsz->resizer_a.video_out);
- vpfe_video_unregister(&vpfe_rsz->resizer_b.video_out);
-
- /* unregister subdev */
- v4l2_device_unregister_subdev(&vpfe_rsz->crop_resizer.subdev);
- v4l2_device_unregister_subdev(&vpfe_rsz->resizer_a.subdev);
- v4l2_device_unregister_subdev(&vpfe_rsz->resizer_b.subdev);
- /* cleanup entity */
- media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
-}
-
-/*
- * vpfe_resizer_register_entities() - Register entity
- * @resizer - pointer to resizer device.
- * @vdev: pointer to v4l2 device structure.
- */
-int vpfe_resizer_register_entities(struct vpfe_resizer_device *resizer,
- struct v4l2_device *vdev)
-{
- struct vpfe_device *vpfe_dev = to_vpfe_device(resizer);
- unsigned int flags = 0;
- int ret;
-
- /* Register the crop resizer subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->crop_resizer.subdev);
- if (ret < 0) {
- pr_err("Failed to register crop resizer as v4l2-subdev\n");
- return ret;
- }
- /* Register Resizer-A subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->resizer_a.subdev);
- if (ret < 0) {
- pr_err("Failed to register resizer-a as v4l2-subdev\n");
- return ret;
- }
- /* Register Resizer-B subdev */
- ret = v4l2_device_register_subdev(vdev, &resizer->resizer_b.subdev);
- if (ret < 0) {
- pr_err("Failed to register resizer-b as v4l2-subdev\n");
- return ret;
- }
- /* Register video-out device for resizer-a */
- ret = vpfe_video_register(&resizer->resizer_a.video_out, vdev);
- if (ret) {
- pr_err("Failed to register RSZ-A video-out device\n");
- goto out_video_out2_register;
- }
- resizer->resizer_a.video_out.vpfe_dev = vpfe_dev;
-
- /* Register video-out device for resizer-b */
- ret = vpfe_video_register(&resizer->resizer_b.video_out, vdev);
- if (ret) {
- pr_err("Failed to register RSZ-B video-out device\n");
- goto out_video_out2_register;
- }
- resizer->resizer_b.video_out.vpfe_dev = vpfe_dev;
-
- /* create link between Resizer Crop----> Resizer A*/
- ret = media_create_pad_link(&resizer->crop_resizer.subdev.entity, 1,
- &resizer->resizer_a.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer Crop----> Resizer B*/
- ret = media_create_pad_link(&resizer->crop_resizer.subdev.entity, 2,
- &resizer->resizer_b.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer A ----> video out */
- ret = media_create_pad_link(&resizer->resizer_a.subdev.entity, 1,
- &resizer->resizer_a.video_out.video_dev.entity, 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- /* create link between Resizer B ----> video out */
- ret = media_create_pad_link(&resizer->resizer_b.subdev.entity, 1,
- &resizer->resizer_b.video_out.video_dev.entity, 0, flags);
- if (ret < 0)
- goto out_create_link;
-
- return 0;
-
-out_create_link:
- vpfe_video_unregister(&resizer->resizer_b.video_out);
-out_video_out2_register:
- vpfe_video_unregister(&resizer->resizer_a.video_out);
- v4l2_device_unregister_subdev(&resizer->crop_resizer.subdev);
- v4l2_device_unregister_subdev(&resizer->resizer_a.subdev);
- v4l2_device_unregister_subdev(&resizer->resizer_b.subdev);
- media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
- media_entity_cleanup(&resizer->resizer_a.subdev.entity);
- media_entity_cleanup(&resizer->resizer_b.subdev.entity);
- return ret;
-}
-
-/*
- * vpfe_resizer_init() - resizer device initialization.
- * @vpfe_rsz - pointer to resizer device
- * @pdev: platform device pointer.
- */
-int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = &vpfe_rsz->crop_resizer.subdev;
- struct media_pad *pads = &vpfe_rsz->crop_resizer.pads[0];
- struct media_entity *me = &sd->entity;
- resource_size_t res_len;
- struct resource *res;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (!res)
- return -ENOENT;
-
- res_len = resource_size(res);
- res = request_mem_region(res->start, res_len, res->name);
- if (!res)
- return -EBUSY;
-
- vpfe_rsz->base_addr = ioremap_nocache(res->start, res_len);
- if (!vpfe_rsz->base_addr)
- return -EBUSY;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI RESIZER CROP", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_CROP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_CROP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- pads[RESIZER_CROP_PAD_SOURCE2].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->crop_resizer.input = RESIZER_CROP_INPUT_NONE;
- vpfe_rsz->crop_resizer.output = RESIZER_CROP_OUTPUT_NONE;
- vpfe_rsz->crop_resizer.output2 = RESIZER_CROP_OUTPUT_NONE;
- vpfe_rsz->crop_resizer.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_pads_init(me, RESIZER_CROP_PADS_NUM, pads);
- if (ret)
- return ret;
-
- sd = &vpfe_rsz->resizer_a.subdev;
- pads = &vpfe_rsz->resizer_a.pads[0];
- me = &sd->entity;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI RESIZER A", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->resizer_a.input = RESIZER_INPUT_NONE;
- vpfe_rsz->resizer_a.output = RESIZER_OUTPUT_NONE;
- vpfe_rsz->resizer_a.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_pads_init(me, RESIZER_PADS_NUM, pads);
- if (ret)
- return ret;
-
- sd = &vpfe_rsz->resizer_b.subdev;
- pads = &vpfe_rsz->resizer_b.pads[0];
- me = &sd->entity;
-
- v4l2_subdev_init(sd, &resizer_v4l2_ops);
- sd->internal_ops = &resizer_v4l2_internal_ops;
- strscpy(sd->name, "DAVINCI RESIZER B", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for davinci subdevs */
- v4l2_set_subdevdata(sd, vpfe_rsz);
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- pads[RESIZER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- vpfe_rsz->resizer_b.input = RESIZER_INPUT_NONE;
- vpfe_rsz->resizer_b.output = RESIZER_OUTPUT_NONE;
- vpfe_rsz->resizer_b.rsz_device = vpfe_rsz;
- me->ops = &resizer_media_ops;
- ret = media_entity_pads_init(me, RESIZER_PADS_NUM, pads);
- if (ret)
- return ret;
-
- vpfe_rsz->resizer_a.video_out.ops = &resizer_a_video_ops;
- vpfe_rsz->resizer_a.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = vpfe_video_init(&vpfe_rsz->resizer_a.video_out, "RSZ-A");
- if (ret) {
- pr_err("Failed to init RSZ video-out device\n");
- return ret;
- }
- vpfe_rsz->resizer_b.video_out.ops = &resizer_b_video_ops;
- vpfe_rsz->resizer_b.video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = vpfe_video_init(&vpfe_rsz->resizer_b.video_out, "RSZ-B");
- if (ret) {
- pr_err("Failed to init RSZ video-out2 device\n");
- return ret;
- }
- memset(&vpfe_rsz->config, 0, sizeof(struct resizer_params));
-
- return 0;
-}
-
-void
-vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(vpfe_rsz->base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res)
- release_mem_region(res->start,
- resource_size(res));
-}
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
deleted file mode 100644
index 5e31de96b2c9..000000000000
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_DM365_RESIZER_H
-#define _DAVINCI_VPFE_DM365_RESIZER_H
-
-enum resizer_oper_mode {
- RESIZER_MODE_CONTINUOUS = 0,
- RESIZER_MODE_ONE_SHOT = 1,
-};
-
-struct f_div_pass {
- unsigned int o_hsz;
- unsigned int i_hps;
- unsigned int h_phs;
- unsigned int src_hps;
- unsigned int src_hsz;
-};
-
-#define MAX_PASSES 2
-
-struct f_div_param {
- unsigned char en;
- unsigned int num_passes;
- struct f_div_pass pass[MAX_PASSES];
-};
-
-/* Resizer Rescale Parameters*/
-struct resizer_scale_param {
- bool h_flip;
- bool v_flip;
- bool cen;
- bool yen;
- unsigned short i_vps;
- unsigned short i_hps;
- unsigned short o_vsz;
- unsigned short o_hsz;
- unsigned short v_phs_y;
- unsigned short v_phs_c;
- unsigned short v_dif;
- /* resize method - Luminance */
- enum vpfe_rsz_intp_t v_typ_y;
- /* resize method - Chrominance */
- enum vpfe_rsz_intp_t v_typ_c;
- /* vertical lpf intensity - Luminance */
- unsigned char v_lpf_int_y;
- /* vertical lpf intensity - Chrominance */
- unsigned char v_lpf_int_c;
- unsigned short h_phs;
- unsigned short h_dif;
- /* resize method - Luminance */
- enum vpfe_rsz_intp_t h_typ_y;
- /* resize method - Chrominance */
- enum vpfe_rsz_intp_t h_typ_c;
- /* horizontal lpf intensity - Luminance */
- unsigned char h_lpf_int_y;
- /* horizontal lpf intensity - Chrominance */
- unsigned char h_lpf_int_c;
- bool dscale_en;
- enum vpfe_rsz_down_scale_ave_sz h_dscale_ave_sz;
- enum vpfe_rsz_down_scale_ave_sz v_dscale_ave_sz;
- /* store the calculated frame division parameter */
- struct f_div_param f_div;
-};
-
-enum resizer_rgb_t {
- OUTPUT_32BIT,
- OUTPUT_16BIT
-};
-
-enum resizer_rgb_msk_t {
- NOMASK = 0,
- MASKLAST2 = 1,
-};
-
-/* Resizer RGB Conversion Parameters */
-struct resizer_rgb {
- bool rgb_en;
- enum resizer_rgb_t rgb_typ;
- enum resizer_rgb_msk_t rgb_msk0;
- enum resizer_rgb_msk_t rgb_msk1;
- unsigned int rgb_alpha_val;
-};
-
-/* Resizer External Memory Parameters */
-struct rsz_ext_mem_param {
- unsigned int rsz_sdr_oft_y;
- unsigned int rsz_sdr_ptr_s_y;
- unsigned int rsz_sdr_ptr_e_y;
- unsigned int rsz_sdr_oft_c;
- unsigned int rsz_sdr_ptr_s_c;
- unsigned int rsz_sdr_ptr_e_c;
- /* offset to be added to buffer start when flipping for y/ycbcr */
- unsigned int flip_ofst_y;
- /* offset to be added to buffer start when flipping for c */
- unsigned int flip_ofst_c;
- /* c offset for YUV 420SP */
- unsigned int c_offset;
- /* User Defined Y offset for YUV 420SP or YUV420ILE data */
- unsigned int user_y_ofst;
- /* User Defined C offset for YUV 420SP data */
- unsigned int user_c_ofst;
-};
-
-enum rsz_data_source {
- IPIPE_DATA,
- IPIPEIF_DATA
-};
-
-enum rsz_src_img_fmt {
- RSZ_IMG_422,
- RSZ_IMG_420
-};
-
-enum rsz_dpaths_bypass_t {
- BYPASS_OFF = 0,
- BYPASS_ON = 1,
-};
-
-struct rsz_common_params {
- unsigned int vps;
- unsigned int vsz;
- unsigned int hps;
- unsigned int hsz;
- /* 420 or 422 */
- enum rsz_src_img_fmt src_img_fmt;
- /* Y or C when src_fmt is 420, 0 - y, 1 - c */
- unsigned char y_c;
- /* flip raw or ycbcr */
- unsigned char raw_flip;
- /* IPIPE or IPIPEIF data */
- enum rsz_data_source source;
- enum rsz_dpaths_bypass_t passthrough;
- unsigned char yuv_y_min;
- unsigned char yuv_y_max;
- unsigned char yuv_c_min;
- unsigned char yuv_c_max;
- bool rsz_seq_crv;
- enum vpfe_chr_pos out_chr_pos;
-};
-
-struct resizer_params {
- enum resizer_oper_mode oper_mode;
- struct rsz_common_params rsz_common;
- struct resizer_scale_param rsz_rsc_param[2];
- struct resizer_rgb rsz2rgb[2];
- struct rsz_ext_mem_param ext_mem_param[2];
- bool rsz_en[2];
- struct vpfe_rsz_config_params user_config;
-};
-
-#define ENABLE 1
-#define DISABLE (!ENABLE)
-
-#define RESIZER_CROP_PAD_SINK 0
-#define RESIZER_CROP_PAD_SOURCE 1
-#define RESIZER_CROP_PAD_SOURCE2 2
-
-#define RESIZER_CROP_PADS_NUM 3
-
-enum resizer_crop_input_entity {
- RESIZER_CROP_INPUT_NONE = 0,
- RESIZER_CROP_INPUT_IPIPEIF = 1,
- RESIZER_CROP_INPUT_IPIPE = 2,
-};
-
-enum resizer_crop_output_entity {
- RESIZER_CROP_OUTPUT_NONE,
- RESIZER_A,
- RESIZER_B,
-};
-
-struct dm365_crop_resizer_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[RESIZER_CROP_PADS_NUM];
- struct v4l2_mbus_framefmt formats[RESIZER_CROP_PADS_NUM];
- enum resizer_crop_input_entity input;
- enum resizer_crop_output_entity output;
- enum resizer_crop_output_entity output2;
- struct vpfe_resizer_device *rsz_device;
-};
-
-#define RESIZER_PAD_SINK 0
-#define RESIZER_PAD_SOURCE 1
-
-#define RESIZER_PADS_NUM 2
-
-enum resizer_input_entity {
- RESIZER_INPUT_NONE = 0,
- RESIZER_INPUT_CROP_RESIZER = 1,
-};
-
-enum resizer_output_entity {
- RESIZER_OUTPUT_NONE = 0,
- RESIZER_OUTPUT_MEMORY = 1,
-};
-
-struct dm365_resizer_device {
- struct v4l2_subdev subdev;
- struct media_pad pads[RESIZER_PADS_NUM];
- struct v4l2_mbus_framefmt formats[RESIZER_PADS_NUM];
- enum resizer_input_entity input;
- enum resizer_output_entity output;
- struct vpfe_video_device video_out;
- struct vpfe_resizer_device *rsz_device;
-};
-
-struct vpfe_resizer_device {
- struct dm365_crop_resizer_device crop_resizer;
- struct dm365_resizer_device resizer_a;
- struct dm365_resizer_device resizer_b;
- struct resizer_params config;
- void __iomem *base_addr;
-};
-
-int vpfe_resizer_init(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev);
-int vpfe_resizer_register_entities(struct vpfe_resizer_device *vpfe_rsz,
- struct v4l2_device *v4l2_dev);
-void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz);
-void vpfe_resizer_cleanup(struct vpfe_resizer_device *vpfe_rsz,
- struct platform_device *pdev);
-void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer);
-void vpfe_resizer_dma_isr(struct vpfe_resizer_device *resizer);
-
-#endif /* _DAVINCI_VPFE_DM365_RESIZER_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe.h b/drivers/staging/media/davinci_vpfe/vpfe.h
deleted file mode 100644
index 1f8e011fc162..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _VPFE_H
-#define _VPFE_H
-
-#ifdef __KERNEL__
-#include <linux/v4l2-subdev.h>
-#include <linux/clk.h>
-#include <linux/i2c.h>
-
-#include <media/davinci/vpfe_types.h>
-
-#define CAPTURE_DRV_NAME "vpfe-capture"
-
-struct vpfe_route {
- __u32 input;
- __u32 output;
-};
-
-enum vpfe_subdev_id {
- VPFE_SUBDEV_TVP5146 = 1,
- VPFE_SUBDEV_MT9T031 = 2,
- VPFE_SUBDEV_TVP7002 = 3,
- VPFE_SUBDEV_MT9P031 = 4,
-};
-
-struct vpfe_ext_subdev_info {
- /* v4l2 subdev */
- struct v4l2_subdev *subdev;
- /* Sub device module name */
- char module_name[32];
- /* Sub device group id */
- int grp_id;
- /* Number of inputs supported */
- int num_inputs;
- /* inputs available at the sub device */
- struct v4l2_input *inputs;
- /* Sub dev routing information for each input */
- struct vpfe_route *routes;
- /* ccdc bus/interface configuration */
- struct vpfe_hw_if_param ccdc_if_params;
- /* i2c subdevice board info */
- struct i2c_board_info board_info;
- /* Is this a camera sub device ? */
- unsigned is_camera:1;
- /* check if sub dev supports routing */
- unsigned can_route:1;
- /* registered ? */
- unsigned registered:1;
-};
-
-struct vpfe_config {
- /* Number of sub devices connected to vpfe */
- int num_subdevs;
- /* information about each subdev */
- struct vpfe_ext_subdev_info *sub_devs;
- /* evm card info */
- char *card_name;
- /* setup function for the input path */
- int (*setup_input)(enum vpfe_subdev_id id);
- /* number of clocks */
- int num_clocks;
- /* clocks used for vpfe capture */
- char *clocks[];
-};
-#endif
-#endif
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
deleted file mode 100644
index 9dc28ffe38d5..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- *
- *
- * Driver name : VPFE Capture driver
- * VPFE Capture driver allows applications to capture and stream video
- * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
- * TVP5146 or Raw Bayer RGB image data from an image sensor
- * such as Microns' MT9T001, MT9T031 etc.
- *
- * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
- * consists of a Video Processing Front End (VPFE) for capturing
- * video/raw image data and Video Processing Back End (VPBE) for displaying
- * YUV data through an in-built analog encoder or Digital LCD port. This
- * driver is for capture through VPFE. A typical EVM using these SoCs have
- * following high level configuration.
- *
- * decoder(TVP5146/ YUV/
- * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
- * data input | |
- * V |
- * SDRAM |
- * V
- * Image Processor
- * |
- * V
- * SDRAM
- * The data flow happens from a decoder connected to the VPFE over a
- * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
- * and to the input of VPFE through an optional MUX (if more inputs are
- * to be interfaced on the EVM). The input data is first passed through
- * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
- * does very little or no processing on YUV data and does pre-process Raw
- * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
- * Color Space Conversion (CSC), data gain/offset etc. After this, data
- * can be written to SDRAM or can be connected to the image processing
- * block such as IPIPE (on DM355/DM365 only).
- *
- * Features supported
- * - MMAP IO
- * - USERPTR IO
- * - Capture using TVP5146 over BT.656
- * - Support for interfacing decoders using sub device model
- * - Work with DM365 or DM355 or DM6446 CCDC to do Raw Bayer
- * RGB/YUV data capture to SDRAM.
- * - Chaining of Image Processor
- * - SINGLE-SHOT mode
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "vpfe.h"
-#include "vpfe_mc_capture.h"
-
-static bool debug;
-static bool interface;
-
-module_param(interface, bool, 0444);
-module_param(debug, bool, 0644);
-
-/*
- * VPFE capture can be used for capturing video such as from TVP5146 or TVP7002
- * and for capture raw bayer data from camera sensors such as mt9p031. At this
- * point there is problem in co-existence of mt9p031 and tvp5146 due to i2c
- * address collision. So set the variable below from bootargs to do either video
- * capture or camera capture.
- * interface = 0 - video capture (from TVP514x or such),
- * interface = 1 - Camera capture (from mt9p031 or such)
- * Re-visit this when we fix the co-existence issue
- */
-MODULE_PARM_DESC(interface, "interface 0-1 (default:0)");
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
-MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-
-/* map mbus_fmt to pixelformat */
-void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix)
-{
- switch (mbus->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_YUYV8_2X8:
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_YUYV10_1X20:
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_SGRBG12_1X12:
- pix->pixelformat = V4L2_PIX_FMT_SBGGR16;
- pix->bytesperline = pix->width * 2;
- break;
-
- case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
- pix->pixelformat = V4L2_PIX_FMT_NV12;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_Y8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_GREY;
- pix->bytesperline = pix->width;
- break;
-
- case MEDIA_BUS_FMT_UV8_1X8:
- pix->pixelformat = V4L2_PIX_FMT_UV8;
- pix->bytesperline = pix->width;
- break;
-
- default:
- pr_err("Invalid mbus code set\n");
- }
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->bytesperline, 32);
- if (pix->pixelformat == V4L2_PIX_FMT_NV12)
- pix->sizeimage = pix->bytesperline * pix->height +
- ((pix->bytesperline * pix->height) >> 1);
- else
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-/* ISR for VINT0*/
-static irqreturn_t vpfe_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
- vpfe_isif_buffer_isr(&vpfe_dev->vpfe_isif);
- vpfe_resizer_buffer_isr(&vpfe_dev->vpfe_resizer);
- return IRQ_HANDLED;
-}
-
-/* vpfe_vdint1_isr() - isr handler for VINT1 interrupt */
-static irqreturn_t vpfe_vdint1_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
- vpfe_isif_vidint1_isr(&vpfe_dev->vpfe_isif);
- return IRQ_HANDLED;
-}
-
-/* vpfe_imp_dma_isr() - ISR for ipipe dma completion */
-static irqreturn_t vpfe_imp_dma_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "%s\n", __func__);
- vpfe_ipipeif_ss_buffer_isr(&vpfe_dev->vpfe_ipipeif);
- vpfe_resizer_dma_isr(&vpfe_dev->vpfe_resizer);
- return IRQ_HANDLED;
-}
-
-/*
- * vpfe_disable_clock() - Disable clocks for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Disables clocks defined in vpfe configuration. The function
- * assumes that at least one clock is to be defined which is
- * true as of now.
- */
-static void vpfe_disable_clock(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
- int i;
-
- for (i = 0; i < vpfe_cfg->num_clocks; i++) {
- clk_disable_unprepare(vpfe_dev->clks[i]);
- clk_put(vpfe_dev->clks[i]);
- }
- kzfree(vpfe_dev->clks);
- v4l2_info(vpfe_dev->pdev->driver, "vpfe capture clocks disabled\n");
-}
-
-/*
- * vpfe_enable_clock() - Enable clocks for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Enables clocks defined in vpfe configuration. The function
- * assumes that at least one clock is to be defined which is
- * true as of now.
- */
-static int vpfe_enable_clock(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_config *vpfe_cfg = vpfe_dev->cfg;
- int ret = -EFAULT;
- int i;
-
- if (!vpfe_cfg->num_clocks)
- return 0;
-
- vpfe_dev->clks = kcalloc(vpfe_cfg->num_clocks,
- sizeof(*vpfe_dev->clks), GFP_KERNEL);
- if (!vpfe_dev->clks)
- return -ENOMEM;
-
- for (i = 0; i < vpfe_cfg->num_clocks; i++) {
- if (vpfe_cfg->clocks[i] == NULL) {
- v4l2_err(vpfe_dev->pdev->driver,
- "clock %s is not defined in vpfe config\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- vpfe_dev->clks[i] =
- clk_get(vpfe_dev->pdev, vpfe_cfg->clocks[i]);
- if (IS_ERR(vpfe_dev->clks[i])) {
- v4l2_err(vpfe_dev->pdev->driver,
- "Failed to get clock %s\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- if (clk_prepare_enable(vpfe_dev->clks[i])) {
- v4l2_err(vpfe_dev->pdev->driver,
- "vpfe clock %s not enabled\n",
- vpfe_cfg->clocks[i]);
- goto out;
- }
-
- v4l2_info(vpfe_dev->pdev->driver, "vpss clock %s enabled",
- vpfe_cfg->clocks[i]);
- }
-
- return 0;
-out:
- for (i = 0; i < vpfe_cfg->num_clocks; i++)
- if (!IS_ERR(vpfe_dev->clks[i])) {
- clk_disable_unprepare(vpfe_dev->clks[i]);
- clk_put(vpfe_dev->clks[i]);
- }
-
- v4l2_err(vpfe_dev->pdev->driver, "Failed to enable clocks\n");
- kzfree(vpfe_dev->clks);
-
- return ret;
-}
-
-/*
- * vpfe_detach_irq() - Detach IRQs for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Detach all IRQs defined in vpfe configuration.
- */
-static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
-{
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
- free_irq(vpfe_dev->imp_dma_irq, vpfe_dev);
-}
-
-/*
- * vpfe_attach_irq() - Attach IRQs for vpfe capture driver
- * @vpfe_dev - ptr to vpfe capture device
- *
- * Attach all IRQs defined in vpfe configuration.
- */
-static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
-{
- int ret;
-
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
- "vpfe_capture0", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting VINT0 interrupt\n");
- return ret;
- }
-
- ret = request_irq(vpfe_dev->ccdc_irq1, vpfe_vdint1_isr, 0,
- "vpfe_capture1", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting VINT1 interrupt\n");
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- return ret;
- }
-
- ret = request_irq(vpfe_dev->imp_dma_irq, vpfe_imp_dma_isr,
- 0, "Imp_Sdram_Irq", vpfe_dev);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error: requesting IMP IRQ interrupt\n");
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * register_i2c_devices() - register all i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- *
- * register all i2c v4l2 subdevs
- */
-static int register_i2c_devices(struct vpfe_device *vpfe_dev)
-{
- struct vpfe_ext_subdev_info *sdinfo;
- struct vpfe_config *vpfe_cfg;
- struct i2c_adapter *i2c_adap;
- unsigned int num_subdevs;
- int ret;
- int i;
- int k;
-
- vpfe_cfg = vpfe_dev->cfg;
- i2c_adap = i2c_get_adapter(1);
- num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd =
- kcalloc(num_subdevs, sizeof(struct v4l2_subdev *),
- GFP_KERNEL);
- if (!vpfe_dev->sd)
- return -ENOMEM;
-
- for (i = 0, k = 0; i < num_subdevs; i++) {
- sdinfo = &vpfe_cfg->sub_devs[i];
- /*
- * register subdevices based on interface setting. Currently
- * tvp5146 and mt9p031 cannot co-exists due to i2c address
- * conflicts. So only one of them is registered. Re-visit this
- * once we have support for i2c switch handling in i2c driver
- * framework
- */
- if (interface == sdinfo->is_camera) {
- /* setup input path */
- if (vpfe_cfg->setup_input &&
- vpfe_cfg->setup_input(sdinfo->grp_id) < 0) {
- ret = -EFAULT;
- v4l2_info(&vpfe_dev->v4l2_dev,
- "could not setup input for %s\n",
- sdinfo->module_name);
- goto probe_sd_out;
- }
- /* Load up the subdevice */
- vpfe_dev->sd[k] =
- v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
- i2c_adap, &sdinfo->board_info,
- NULL);
- if (vpfe_dev->sd[k]) {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- sdinfo->module_name);
-
- vpfe_dev->sd[k]->grp_id = sdinfo->grp_id;
- k++;
-
- sdinfo->registered = 1;
- }
- } else {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s is not registered\n",
- sdinfo->module_name);
- }
- }
- vpfe_dev->num_ext_subdevs = k;
-
- return 0;
-
-probe_sd_out:
- kzfree(vpfe_dev->sd);
-
- return ret;
-}
-
-/*
- * vpfe_register_entities() - register all v4l2 subdevs and media entities
- * @vpfe_dev - ptr to vpfe capture device
- *
- * register all v4l2 subdevs, media entities, and creates links
- * between entities
- */
-static int vpfe_register_entities(struct vpfe_device *vpfe_dev)
-{
- unsigned int flags = 0;
- int ret;
- int i;
-
- /* register i2c devices first */
- ret = register_i2c_devices(vpfe_dev);
- if (ret)
- return ret;
-
- /* register rest of the sub-devs */
- ret = vpfe_isif_register_entities(&vpfe_dev->vpfe_isif,
- &vpfe_dev->v4l2_dev);
- if (ret)
- return ret;
-
- ret = vpfe_ipipeif_register_entities(&vpfe_dev->vpfe_ipipeif,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_isif_register;
-
- ret = vpfe_ipipe_register_entities(&vpfe_dev->vpfe_ipipe,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_ipipeif_register;
-
- ret = vpfe_resizer_register_entities(&vpfe_dev->vpfe_resizer,
- &vpfe_dev->v4l2_dev);
- if (ret)
- goto out_ipipe_register;
-
- /* create links now, starting with external(i2c) entities */
- for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
- /*
- * if entity has no pads (ex: amplifier),
- * can't establish link
- */
- if (vpfe_dev->sd[i]->entity.num_pads) {
- ret = media_create_pad_link(&vpfe_dev->sd[i]->entity,
- 0, &vpfe_dev->vpfe_isif.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
- }
-
- ret = media_create_pad_link(&vpfe_dev->vpfe_isif.subdev.entity, 1,
- &vpfe_dev->vpfe_ipipeif.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_create_pad_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
- &vpfe_dev->vpfe_ipipe.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_create_pad_link(&vpfe_dev->vpfe_ipipe.subdev.entity,
- 1, &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = media_create_pad_link(&vpfe_dev->vpfe_ipipeif.subdev.entity, 1,
- &vpfe_dev->vpfe_resizer.crop_resizer.subdev.entity,
- 0, flags);
- if (ret < 0)
- goto out_resizer_register;
-
- ret = v4l2_device_register_subdev_nodes(&vpfe_dev->v4l2_dev);
- if (ret < 0)
- goto out_resizer_register;
-
- return 0;
-
-out_resizer_register:
- vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
-out_ipipe_register:
- vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
-out_ipipeif_register:
- vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
-out_isif_register:
- vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
-
- return ret;
-}
-
-/*
- * vpfe_unregister_entities() - unregister all v4l2 subdevs and media entities
- * @vpfe_dev - ptr to vpfe capture device
- *
- * unregister all v4l2 subdevs and media entities
- */
-static void vpfe_unregister_entities(struct vpfe_device *vpfe_dev)
-{
- vpfe_isif_unregister_entities(&vpfe_dev->vpfe_isif);
- vpfe_ipipeif_unregister_entities(&vpfe_dev->vpfe_ipipeif);
- vpfe_ipipe_unregister_entities(&vpfe_dev->vpfe_ipipe);
- vpfe_resizer_unregister_entities(&vpfe_dev->vpfe_resizer);
-}
-
-/*
- * vpfe_cleanup_modules() - cleanup all non-i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- * @pdev - pointer to platform device
- *
- * cleanup all v4l2 subdevs
- */
-static void vpfe_cleanup_modules(struct vpfe_device *vpfe_dev,
- struct platform_device *pdev)
-{
- vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
- vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
- vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
- vpfe_resizer_cleanup(&vpfe_dev->vpfe_resizer, pdev);
-}
-
-/*
- * vpfe_initialize_modules() - initialize all non-i2c v4l2 subdevs
- * @vpfe_dev - ptr to vpfe capture device
- * @pdev - pointer to platform device
- *
- * initialize all v4l2 subdevs and media entities
- */
-static int vpfe_initialize_modules(struct vpfe_device *vpfe_dev,
- struct platform_device *pdev)
-{
- int ret;
-
- ret = vpfe_isif_init(&vpfe_dev->vpfe_isif, pdev);
- if (ret)
- return ret;
-
- ret = vpfe_ipipeif_init(&vpfe_dev->vpfe_ipipeif, pdev);
- if (ret)
- goto out_isif_init;
-
- ret = vpfe_ipipe_init(&vpfe_dev->vpfe_ipipe, pdev);
- if (ret)
- goto out_ipipeif_init;
-
- ret = vpfe_resizer_init(&vpfe_dev->vpfe_resizer, pdev);
- if (ret)
- goto out_ipipe_init;
-
- return 0;
-
-out_ipipe_init:
- vpfe_ipipe_cleanup(&vpfe_dev->vpfe_ipipe, pdev);
-out_ipipeif_init:
- vpfe_ipipeif_cleanup(&vpfe_dev->vpfe_ipipeif, pdev);
-out_isif_init:
- vpfe_isif_cleanup(&vpfe_dev->vpfe_isif, pdev);
-
- return ret;
-}
-
-/*
- * vpfe_probe() : vpfe probe function
- * @pdev: platform device pointer
- *
- * This function creates device entries by register itself to the V4L2 driver
- * and initializes fields of each device objects
- */
-static int vpfe_probe(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev;
- struct resource *res1;
- int ret = -ENOMEM;
-
- vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
- if (!vpfe_dev)
- return ret;
-
- if (pdev->dev.platform_data == NULL) {
- v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- vpfe_dev->cfg = pdev->dev.platform_data;
- if (vpfe_dev->cfg->card_name == NULL ||
- vpfe_dev->cfg->sub_devs == NULL) {
- v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- /* Get VINT0 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT0\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->ccdc_irq0 = res1->start;
-
- /* Get VINT1 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT1\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->ccdc_irq1 = res1->start;
-
- /* Get DMA irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for DMA\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
- vpfe_dev->imp_dma_irq = res1->start;
-
- vpfe_dev->pdev = &pdev->dev;
-
- /* enable vpss clocks */
- ret = vpfe_enable_clock(vpfe_dev);
- if (ret)
- goto probe_free_dev_mem;
-
- ret = vpfe_initialize_modules(vpfe_dev, pdev);
- if (ret)
- goto probe_disable_clock;
-
- vpfe_dev->media_dev.dev = vpfe_dev->pdev;
- strscpy((char *)&vpfe_dev->media_dev.model, "davinci-media",
- sizeof(vpfe_dev->media_dev.model));
-
- ret = media_device_register(&vpfe_dev->media_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register media device.\n");
- goto probe_out_entities_cleanup;
- }
-
- vpfe_dev->v4l2_dev.mdev = &vpfe_dev->media_dev;
- ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver, "Unable to register v4l2 device.\n");
- goto probe_out_media_unregister;
- }
-
- v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, vpfe_dev);
- /* register subdevs/entities */
- ret = vpfe_register_entities(vpfe_dev);
- if (ret)
- goto probe_out_v4l2_unregister;
-
- ret = vpfe_attach_irq(vpfe_dev);
- if (ret)
- goto probe_out_entities_unregister;
-
- return 0;
-
-probe_out_entities_unregister:
- vpfe_unregister_entities(vpfe_dev);
- kzfree(vpfe_dev->sd);
-probe_out_v4l2_unregister:
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
-probe_out_media_unregister:
- media_device_unregister(&vpfe_dev->media_dev);
-probe_out_entities_cleanup:
- vpfe_cleanup_modules(vpfe_dev, pdev);
-probe_disable_clock:
- vpfe_disable_clock(vpfe_dev);
-probe_free_dev_mem:
- kzfree(vpfe_dev);
-
- return ret;
-}
-
-/*
- * vpfe_remove : This function un-registers device from V4L2 driver
- */
-static int vpfe_remove(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
-
- v4l2_info(pdev->dev.driver, "%s\n", __func__);
-
- kzfree(vpfe_dev->sd);
- vpfe_detach_irq(vpfe_dev);
- vpfe_unregister_entities(vpfe_dev);
- vpfe_cleanup_modules(vpfe_dev, pdev);
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
- media_device_unregister(&vpfe_dev->media_dev);
- vpfe_disable_clock(vpfe_dev);
- kzfree(vpfe_dev);
-
- return 0;
-}
-
-static struct platform_driver vpfe_driver = {
- .driver = {
- .name = CAPTURE_DRV_NAME,
- },
- .probe = vpfe_probe,
- .remove = vpfe_remove,
-};
-
-module_platform_driver(vpfe_driver);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
deleted file mode 100644
index fe4a421b5dba..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_MC_CAPTURE_H
-#define _DAVINCI_VPFE_MC_CAPTURE_H
-
-#include "dm365_ipipe.h"
-#include "dm365_ipipeif.h"
-#include "dm365_isif.h"
-#include "dm365_resizer.h"
-#include "vpfe_video.h"
-
-#define VPFE_MAJOR_RELEASE 0
-#define VPFE_MINOR_RELEASE 0
-#define VPFE_BUILD 1
-#define VPFE_CAPTURE_VERSION_CODE ((VPFE_MAJOR_RELEASE << 16) | \
- (VPFE_MINOR_RELEASE << 8) | \
- VPFE_BUILD)
-
-/* IPIPE hardware limits */
-#define IPIPE_MAX_OUTPUT_WIDTH_A 2176
-#define IPIPE_MAX_OUTPUT_WIDTH_B 640
-
-/* Based on max resolution supported. QXGA */
-#define IPIPE_MAX_OUTPUT_HEIGHT_A 1536
-/* Based on max resolution supported. VGA */
-#define IPIPE_MAX_OUTPUT_HEIGHT_B 480
-
-#define to_vpfe_device(ptr_module) \
- container_of(ptr_module, struct vpfe_device, vpfe_##ptr_module)
-#define to_device(ptr_module) \
- (to_vpfe_device(ptr_module)->dev)
-
-struct vpfe_device {
- /* external registered sub devices */
- struct v4l2_subdev **sd;
- /* number of registered external subdevs */
- unsigned int num_ext_subdevs;
- /* vpfe cfg */
- struct vpfe_config *cfg;
- /* clock ptrs for vpfe capture */
- struct clk **clks;
- /* V4l2 device */
- struct v4l2_device v4l2_dev;
- /* parent device */
- struct device *pdev;
- /* IRQ number for DMA transfer completion at the image processor */
- unsigned int imp_dma_irq;
- /* CCDC IRQs used when CCDC/ISIF output to SDRAM */
- unsigned int ccdc_irq0;
- unsigned int ccdc_irq1;
- /* media device */
- struct media_device media_dev;
- /* ccdc subdevice */
- struct vpfe_isif_device vpfe_isif;
- /* ipipeif subdevice */
- struct vpfe_ipipeif_device vpfe_ipipeif;
- /* ipipe subdevice */
- struct vpfe_ipipe_device vpfe_ipipe;
- /* resizer subdevice */
- struct vpfe_resizer_device vpfe_resizer;
-};
-
-/* File handle structure */
-struct vpfe_fh {
- struct v4l2_fh vfh;
- struct vpfe_video_device *video;
- /* Indicates whether this file handle is doing IO */
- u8 io_allowed;
-};
-
-void mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix);
-
-#endif /* _DAVINCI_VPFE_MC_CAPTURE_H */
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
deleted file mode 100644
index ab6bc452d9f6..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ /dev/null
@@ -1,1646 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <media/v4l2-ioctl.h>
-
-#include "vpfe.h"
-#include "vpfe_mc_capture.h"
-
-static int debug;
-
-/* get v4l2 subdev pointer to external subdev which is active */
-static struct media_entity *vpfe_get_input_entity
- (struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct media_pad *remote;
-
- remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (!remote) {
- pr_err("Invalid media connection to isif/ccdc\n");
- return NULL;
- }
- return remote->entity;
-}
-
-/* updates external subdev(sensor/decoder) which is active */
-static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_config *vpfe_cfg;
- struct v4l2_subdev *subdev;
- struct media_pad *remote;
- int i;
-
- remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
- if (!remote) {
- pr_err("Invalid media connection to isif/ccdc\n");
- return -EINVAL;
- }
-
- subdev = media_entity_to_v4l2_subdev(remote->entity);
- vpfe_cfg = vpfe_dev->pdev->platform_data;
- for (i = 0; i < vpfe_cfg->num_subdevs; i++) {
- if (!strcmp(vpfe_cfg->sub_devs[i].module_name, subdev->name)) {
- video->current_ext_subdev = &vpfe_cfg->sub_devs[i];
- break;
- }
- }
-
- /* if user not linked decoder/sensor to isif/ccdc */
- if (i == vpfe_cfg->num_subdevs) {
- pr_err("Invalid media chain connection to isif/ccdc\n");
- return -EINVAL;
- }
- /* find the v4l2 subdev pointer */
- for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) {
- if (!strcmp(video->current_ext_subdev->module_name,
- vpfe_dev->sd[i]->name))
- video->current_ext_subdev->subdev = vpfe_dev->sd[i];
- }
- return 0;
-}
-
-/* get the subdev which is connected to the output video node */
-static struct v4l2_subdev *
-vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
-{
- struct media_pad *remote = media_entity_remote_pad(&video->pad);
-
- if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
- return NULL;
- if (pad)
- *pad = remote->index;
- return media_entity_to_v4l2_subdev(remote->entity);
-}
-
-/* get the format set at output pad of the adjacent subdev */
-static int
-__vpfe_video_get_format(struct vpfe_video_device *video,
- struct v4l2_format *format)
-{
- struct v4l2_subdev_format fmt;
- struct v4l2_subdev *subdev;
- struct media_pad *remote;
- u32 pad;
- int ret;
-
- subdev = vpfe_video_remote_subdev(video, &pad);
- if (!subdev)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- remote = media_entity_remote_pad(&video->pad);
- fmt.pad = remote->index;
-
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- if (ret == -ENOIOCTLCMD)
- return -EINVAL;
-
- format->type = video->type;
- /* convert mbus_format to v4l2_format */
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(&fmt.format, &format->fmt.pix);
-
- return 0;
-}
-
-/* make a note of pipeline details */
-static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
-{
- struct media_graph graph;
- struct media_entity *entity = &video->video_dev.entity;
- struct media_device *mdev = entity->graph_obj.mdev;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_video_device *far_end = NULL;
- int ret;
-
- pipe->input_num = 0;
- pipe->output_num = 0;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pipe->inputs[pipe->input_num++] = video;
- else
- pipe->outputs[pipe->output_num++] = video;
-
- mutex_lock(&mdev->graph_mutex);
- ret = media_graph_walk_init(&graph, mdev);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
- return -ENOMEM;
- }
- media_graph_walk_start(&graph, entity);
- while ((entity = media_graph_walk_next(&graph))) {
- if (entity == &video->video_dev.entity)
- continue;
- if (!is_media_entity_v4l2_video_device(entity))
- continue;
- far_end = to_vpfe_video(media_entity_to_video_device(entity));
- if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pipe->inputs[pipe->input_num++] = far_end;
- else
- pipe->outputs[pipe->output_num++] = far_end;
- }
- media_graph_walk_cleanup(&graph);
- mutex_unlock(&mdev->graph_mutex);
-
- return 0;
-}
-
-/* update pipe state selected by user */
-static int vpfe_update_pipe_state(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int ret;
-
- ret = vpfe_prepare_pipeline(video);
- if (ret)
- return ret;
-
- /*
- * Find out if there is any input video
- * if yes, it is single shot.
- */
- if (pipe->input_num == 0) {
- pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- ret = vpfe_update_current_ext_subdev(video);
- if (ret) {
- pr_err("Invalid external subdev\n");
- return ret;
- }
- } else {
- pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
- }
- video->initialized = 1;
- video->skip_frame_count = 1;
- video->skip_frame_count_init = 1;
- return 0;
-}
-
-/* checks whether pipeline is ready for enabling */
-int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
-{
- int i;
-
- for (i = 0; i < pipe->input_num; i++)
- if (!pipe->inputs[i]->started ||
- pipe->inputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
- return 0;
- for (i = 0; i < pipe->output_num; i++)
- if (!pipe->outputs[i]->started ||
- pipe->outputs[i]->state != VPFE_VIDEO_BUFFER_QUEUED)
- return 0;
- return 1;
-}
-
-/*
- * Validate a pipeline by checking both ends of all links for format
- * discrepancies.
- *
- * Return 0 if all formats match, or -EPIPE if at least one link is found with
- * different formats on its two ends.
- */
-static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
-{
- struct v4l2_subdev_format fmt_source;
- struct v4l2_subdev_format fmt_sink;
- struct v4l2_subdev *subdev;
- struct media_pad *pad;
- int ret;
-
- /*
- * Should not matter if it is output[0] or 1 as
- * the general ideas is to traverse backwards and
- * the fact that the out video node always has the
- * format of the connected pad.
- */
- subdev = vpfe_video_remote_subdev(pipe->outputs[0], NULL);
- if (!subdev)
- return -EPIPE;
-
- while (1) {
- /* Retrieve the sink format */
- pad = &subdev->entity.pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_sink.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL,
- &fmt_sink);
-
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Retrieve the source format */
- pad = media_entity_remote_pad(pad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- break;
-
- subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt_source.pad = pad->index;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Check if the two ends match */
- if (fmt_source.format.code != fmt_sink.format.code ||
- fmt_source.format.width != fmt_sink.format.width ||
- fmt_source.format.height != fmt_sink.format.height)
- return -EPIPE;
- }
- return 0;
-}
-
-/*
- * vpfe_pipeline_enable() - Enable streaming on a pipeline
- * @vpfe_dev: vpfe device
- * @pipe: vpfe pipeline
- *
- * Walk the entities chain starting at the pipeline output video node and start
- * all modules in the chain in the given mode.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int vpfe_pipeline_enable(struct vpfe_pipeline *pipe)
-{
- struct media_entity *entity;
- struct v4l2_subdev *subdev;
- struct media_device *mdev;
- int ret;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- entity = vpfe_get_input_entity(pipe->outputs[0]);
- else
- entity = &pipe->inputs[0]->video_dev.entity;
-
- mdev = entity->graph_obj.mdev;
- mutex_lock(&mdev->graph_mutex);
- ret = media_graph_walk_init(&pipe->graph, mdev);
- if (ret)
- goto out;
- media_graph_walk_start(&pipe->graph, entity);
- while ((entity = media_graph_walk_next(&pipe->graph))) {
-
- if (!is_media_entity_v4l2_subdev(entity))
- continue;
- subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- break;
- }
-out:
- if (ret)
- media_graph_walk_cleanup(&pipe->graph);
- mutex_unlock(&mdev->graph_mutex);
- return ret;
-}
-
-/*
- * vpfe_pipeline_disable() - Disable streaming on a pipeline
- * @vpfe_dev: vpfe device
- * @pipe: VPFE pipeline
- *
- * Walk the entities chain starting at the pipeline output video node and stop
- * all modules in the chain.
- *
- * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
- * can't be stopped.
- */
-static int vpfe_pipeline_disable(struct vpfe_pipeline *pipe)
-{
- struct media_entity *entity;
- struct v4l2_subdev *subdev;
- struct media_device *mdev;
- int ret = 0;
-
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- entity = vpfe_get_input_entity(pipe->outputs[0]);
- else
- entity = &pipe->inputs[0]->video_dev.entity;
-
- mdev = entity->graph_obj.mdev;
- mutex_lock(&mdev->graph_mutex);
- media_graph_walk_start(&pipe->graph, entity);
-
- while ((entity = media_graph_walk_next(&pipe->graph))) {
-
- if (!is_media_entity_v4l2_subdev(entity))
- continue;
- subdev = media_entity_to_v4l2_subdev(entity);
- ret = v4l2_subdev_call(subdev, video, s_stream, 0);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- break;
- }
- mutex_unlock(&mdev->graph_mutex);
-
- media_graph_walk_cleanup(&pipe->graph);
- return ret ? -ETIMEDOUT : 0;
-}
-
-/*
- * vpfe_pipeline_set_stream() - Enable/disable streaming on a pipeline
- * @vpfe_dev: VPFE device
- * @pipe: VPFE pipeline
- * @state: Stream state (stopped or active)
- *
- * Set the pipeline to the given stream state.
- *
- * Return 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int vpfe_pipeline_set_stream(struct vpfe_pipeline *pipe,
- enum vpfe_pipeline_stream_state state)
-{
- if (state == VPFE_PIPELINE_STREAM_STOPPED)
- return vpfe_pipeline_disable(pipe);
-
- return vpfe_pipeline_enable(pipe);
-}
-
-static int all_videos_stopped(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int i;
-
- for (i = 0; i < pipe->input_num; i++)
- if (pipe->inputs[i]->started)
- return 0;
- for (i = 0; i < pipe->output_num; i++)
- if (pipe->outputs[i]->started)
- return 0;
- return 1;
-}
-
-/*
- * vpfe_open() - open video device
- * @file: file pointer
- *
- * initialize media pipeline state, allocate memory for file handle
- *
- * Return 0 if successful, or the return -ENODEV otherwise.
- */
-static int vpfe_open(struct file *file)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_fh *handle;
-
- /* Allocate memory for the file handle object */
- handle = kzalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
-
- if (!handle)
- return -ENOMEM;
-
- v4l2_fh_init(&handle->vfh, &video->video_dev);
- v4l2_fh_add(&handle->vfh);
-
- mutex_lock(&video->lock);
- /* If decoder is not initialized. initialize it */
- if (!video->initialized && vpfe_update_pipe_state(video)) {
- mutex_unlock(&video->lock);
- v4l2_fh_del(&handle->vfh);
- v4l2_fh_exit(&handle->vfh);
- kfree(handle);
- return -ENODEV;
- }
- /* Increment device users counter */
- video->usrs++;
- /* Set io_allowed member to false */
- handle->io_allowed = 0;
- handle->video = video;
- file->private_data = &handle->vfh;
- mutex_unlock(&video->lock);
-
- return 0;
-}
-
-/* get the next buffer available from dma queue */
-static unsigned long
-vpfe_video_get_next_buffer(struct vpfe_video_device *video)
-{
- video->cur_frm = video->next_frm =
- list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
-
- list_del(&video->next_frm->list);
- video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- return vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
-}
-
-/* schedule the next buffer which is available on dma queue */
-void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- if (list_empty(&video->dma_queue))
- return;
-
- video->next_frm = list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
-
- if (video->pipe.state == VPFE_PIPELINE_STREAM_SINGLESHOT)
- video->cur_frm = video->next_frm;
-
- list_del(&video->next_frm->list);
- video->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- addr = vb2_dma_contig_plane_dma_addr(&video->next_frm->vb.vb2_buf, 0);
- video->ops->queue(vpfe_dev, addr);
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
-}
-
-/* schedule the buffer for capturing bottom field */
-void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video)
-{
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
- addr += video->field_off;
- video->ops->queue(vpfe_dev, addr);
-}
-
-/* make buffer available for dequeue */
-void vpfe_video_process_buffer_complete(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
-
- video->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
- vb2_buffer_done(&video->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
- if (pipe->state == VPFE_PIPELINE_STREAM_CONTINUOUS)
- video->cur_frm = video->next_frm;
-}
-
-/* vpfe_stop_capture() - stop streaming */
-static void vpfe_stop_capture(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
-
- video->started = 0;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return;
- if (all_videos_stopped(video))
- vpfe_pipeline_set_stream(pipe,
- VPFE_PIPELINE_STREAM_STOPPED);
-}
-
-/*
- * vpfe_release() - release video device
- * @file: file pointer
- *
- * deletes buffer queue, frees the buffers and the vpfe file handle
- *
- * Return 0
- */
-static int vpfe_release(struct file *file)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct v4l2_fh *vfh = file->private_data;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = container_of(vfh, struct vpfe_fh, vfh);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
-
- /* Get the device lock */
- mutex_lock(&video->lock);
- /* if this instance is doing IO */
- if (fh->io_allowed) {
- if (video->started) {
- vpfe_stop_capture(video);
- /*
- * mark pipe state as stopped in vpfe_release(),
- * as app might call streamon() after streamoff()
- * in which case driver has to start streaming.
- */
- video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
- vb2_streamoff(&video->buffer_queue,
- video->buffer_queue.type);
- }
- video->io_usrs = 0;
- /* Free buffers allocated */
- vb2_queue_release(&video->buffer_queue);
- }
- /* Decrement device users counter */
- video->usrs--;
- v4l2_fh_del(&fh->vfh);
- v4l2_fh_exit(&fh->vfh);
- /* If this is the last file handle */
- if (!video->usrs)
- video->initialized = 0;
- mutex_unlock(&video->lock);
- file->private_data = NULL;
- /* Free memory allocated to file handle object */
- v4l2_fh_del(vfh);
- kzfree(fh);
- return 0;
-}
-
-/*
- * vpfe_mmap() - It is used to map kernel space buffers
- * into user spaces
- */
-static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
- return vb2_mmap(&video->buffer_queue, vma);
-}
-
-/*
- * vpfe_poll() - It is used for select/poll system call
- */
-static __poll_t vpfe_poll(struct file *file, poll_table *wait)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
- if (video->started)
- return vb2_poll(&video->buffer_queue, file, wait);
- return 0;
-}
-
-/* vpfe capture driver file operations */
-static const struct v4l2_file_operations vpfe_fops = {
- .owner = THIS_MODULE,
- .open = vpfe_open,
- .release = vpfe_release,
- .unlocked_ioctl = video_ioctl2,
- .mmap = vpfe_mmap,
- .poll = vpfe_poll
-};
-
-/*
- * vpfe_querycap() - query capabilities of video device
- * @file: file pointer
- * @priv: void pointer
- * @cap: pointer to v4l2_capability structure
- *
- * fills v4l2 capabilities structure
- *
- * Return 0
- */
-static int vpfe_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
-
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
- strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
- strscpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
- strscpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
-
- return 0;
-}
-
-/*
- * vpfe_g_fmt() - get the format which is active on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * fills v4l2 format structure with active format
- *
- * Return 0
- */
-static int vpfe_g_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt\n");
- /* Fill in the information about format */
- *fmt = video->fmt;
- return 0;
-}
-
-/*
- * vpfe_enum_fmt() - enum formats supported on media chain
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_fmtdesc structure
- *
- * fills v4l2_fmtdesc structure with output format set on adjacent subdev,
- * only one format is enumearted as subdevs are already configured
- *
- * Return 0 if successful, error code otherwise
- */
-static int vpfe_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev_format sd_fmt;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_subdev *subdev;
- struct v4l2_format format;
- struct media_pad *remote;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
-
- /*
- * since already subdev pad format is set,
- * only one pixel format is available
- */
- if (fmt->index > 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
- return -EINVAL;
- }
- /* get the remote pad */
- remote = media_entity_remote_pad(&video->pad);
- if (!remote) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote pad for video node\n");
- return -EINVAL;
- }
- /* get the remote subdev */
- subdev = vpfe_video_remote_subdev(video, NULL);
- if (!subdev) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote subdev for video node\n");
- return -EINVAL;
- }
- sd_fmt.pad = remote->index;
- sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- /* get output format of remote subdev */
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "invalid remote subdev for video node\n");
- return ret;
- }
- /* convert to pix format */
- mbus.code = sd_fmt.format.code;
- mbus_to_pix(&mbus, &format.fmt.pix);
- /* copy the result */
- fmt->pixelformat = format.fmt.pix.pixelformat;
-
- return 0;
-}
-
-/*
- * vpfe_s_fmt() - set the format on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * validate and set the format on video device
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_format format;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
- /* If streaming is started, return error */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
- return -EBUSY;
- }
- /* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
- *fmt = format;
- video->fmt = *fmt;
- return 0;
-}
-
-/*
- * vpfe_try_fmt() - try the format on video device
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_format structure
- *
- * validate the format, update with correct format
- * based on output format set on adjacent subdev
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_format format;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
- /* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
-
- *fmt = format;
- return 0;
-}
-
-/*
- * vpfe_enum_input() - enum inputs supported on media chain
- * @file: file pointer
- * @priv: void pointer
- * @fmt: pointer to v4l2_fmtdesc structure
- *
- * fills v4l2_input structure with input available on media chain,
- * only one input is enumearted as media chain is setup by this time
- *
- * Return 0 if successful, -EINVAL is media chain is invalid
- */
-static int vpfe_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_ext_subdev_info *sdinfo = video->current_ext_subdev;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
- /* enumerate from the subdev user has chosen through mc */
- if (inp->index < sdinfo->num_inputs) {
- memcpy(inp, &sdinfo->inputs[inp->index],
- sizeof(struct v4l2_input));
- return 0;
- }
- return -EINVAL;
-}
-
-/*
- * vpfe_g_input() - get index of the input which is active
- * @file: file pointer
- * @priv: void pointer
- * @index: pointer to unsigned int
- *
- * set index with input index which is active
- */
-static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
-
- *index = video->current_input;
- return 0;
-}
-
-/*
- * vpfe_s_input() - set input which is pointed by input index
- * @file: file pointer
- * @priv: void pointer
- * @index: pointer to unsigned int
- *
- * set input on external subdev
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- struct vpfe_route *route;
- struct v4l2_input *inps;
- u32 output;
- u32 input;
- int ret;
- int i;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
- /*
- * If streaming is started return device busy
- * error
- */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- sdinfo = video->current_ext_subdev;
- if (!sdinfo->registered) {
- ret = -EINVAL;
- goto unlock_out;
- }
- if (vpfe_dev->cfg->setup_input &&
- vpfe_dev->cfg->setup_input(sdinfo->grp_id) < 0) {
- ret = -EFAULT;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "couldn't setup input for %s\n",
- sdinfo->module_name);
- goto unlock_out;
- }
- route = &sdinfo->routes[index];
- if (route && sdinfo->can_route) {
- input = route->input;
- output = route->output;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, video,
- s_routing, input, output, 0);
- if (ret) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "s_input:error in setting input in decoder\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- }
- /* set standards set by subdev in video device */
- for (i = 0; i < sdinfo->num_inputs; i++) {
- inps = &sdinfo->inputs[i];
- video->video_dev.tvnorms |= inps->std;
- }
- video->current_input = index;
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_querystd() - query std which is being input on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @std_id: pointer to v4l2_std_id structure
- *
- * call external subdev through v4l2_device_call_until_err to
- * get the std that is being active.
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
-
- ret = mutex_lock_interruptible(&video->lock);
- sdinfo = video->current_ext_subdev;
- if (ret)
- return ret;
- /* Call querystd function of decoder device */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, querystd, std_id);
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_s_std() - set std on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @std_id: pointer to v4l2_std_id structure
- *
- * set std pointed by std_id on external subdev by calling it using
- * v4l2_device_call_until_err
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_ext_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
-
- /* Call decoder driver function to set the standard */
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
- sdinfo = video->current_ext_subdev;
- /* If streaming is started, return device busy error */
- if (video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_std, std_id);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
- video->stdid = V4L2_STD_UNKNOWN;
- goto unlock_out;
- }
- video->stdid = std_id;
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
- *tvnorm = video->stdid;
- return 0;
-}
-
-/*
- * vpfe_enum_dv_timings() - enumerate dv_timings which are supported by
- * to external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_enum_dv_timings structure
- *
- * enum dv_timings's which are supported by external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_enum_dv_timings(struct file *file, void *fh,
- struct v4l2_enum_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- timings->pad = 0;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_dv_timings\n");
- return v4l2_subdev_call(subdev, pad, enum_dv_timings, timings);
-}
-
-/*
- * vpfe_query_dv_timings() - query the dv_timings which is being input
- * to external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * get dv_timings which is being input on external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_query_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_query_dv_timings\n");
- return v4l2_subdev_call(subdev, video, query_dv_timings, timings);
-}
-
-/*
- * vpfe_s_dv_timings() - set dv_timings on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * set dv_timings pointed by timings on external subdev through
- * v4l2_device_call_until_err, this configures amplifier also
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_s_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_dv_timings\n");
-
- video->stdid = V4L2_STD_UNKNOWN;
- return v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- video->current_ext_subdev->grp_id,
- video, s_dv_timings, timings);
-}
-
-/*
- * vpfe_g_dv_timings() - get dv_timings which is set on external subdev
- * @file: file pointer
- * @priv: void pointer
- * @timings: pointer to v4l2_dv_timings structure
- *
- * get dv_timings which is set on external subdev through
- * v4l2_subdev_call
- *
- * Return 0 on success, error code otherwise
- */
-static int
-vpfe_g_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct v4l2_subdev *subdev = video->current_ext_subdev->subdev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_dv_timings\n");
- return v4l2_subdev_call(subdev, video, g_dv_timings, timings);
-}
-
-/*
- * Videobuf operations
- */
-/*
- * vpfe_buffer_queue_setup : Callback function for buffer setup.
- * @vq: vb2_queue ptr
- * @fmt: v4l2 format
- * @nbuffers: ptr to number of buffers requested by application
- * @nplanes:: contains number of distinct video planes needed to hold a frame
- * @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_devs: ptr to allocation context
- *
- * This callback function is called when reqbuf() is called to adjust
- * the buffer nbuffers and buffer size
- */
-static int
-vpfe_buffer_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long size;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue_setup\n");
- size = video->fmt.fmt.pix.sizeimage;
-
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
-
- *nplanes = 1;
- sizes[0] = size;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "nbuffers=%d, size=%lu\n", *nbuffers, size);
- return 0;
-}
-
-/*
- * vpfe_buffer_prepare : callback function for buffer prepare
- * @vb: ptr to vb2_buffer
- *
- * This is the callback function for buffer prepare when vb2_qbuf()
- * function is called. The buffer is prepared and user space virtual address
- * or user address is converted into physical address
- */
-static int vpfe_buffer_prepare(struct vb2_buffer *vb)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
-
- /* Initialize buffer */
- vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
- if (vb2_plane_vaddr(vb, 0) &&
- vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
-
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- /* Make sure user addresses are aligned to 32 bytes */
- if (!ALIGN(addr, 32))
- return -EINVAL;
-
- return 0;
-}
-
-static void vpfe_buffer_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- /* Get the file handle object and device object */
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_cap_buffer *buf = container_of(vbuf,
- struct vpfe_cap_buffer, vb);
- unsigned long flags;
- unsigned long empty;
- unsigned long addr;
-
- spin_lock_irqsave(&video->dma_queue_lock, flags);
- empty = list_empty(&video->dma_queue);
- /* add the buffer to the DMA queue */
- list_add_tail(&buf->list, &video->dma_queue);
- spin_unlock_irqrestore(&video->dma_queue_lock, flags);
- /* this case happens in case of single shot */
- if (empty && video->started && pipe->state ==
- VPFE_PIPELINE_STREAM_SINGLESHOT &&
- video->state == VPFE_VIDEO_BUFFER_NOT_QUEUED) {
- spin_lock(&video->dma_queue_lock);
- addr = vpfe_video_get_next_buffer(video);
- video->ops->queue(vpfe_dev, addr);
-
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
- spin_unlock(&video->dma_queue_lock);
-
- /* enable h/w each time in single shot */
- if (vpfe_video_is_pipe_ready(pipe))
- vpfe_pipeline_set_stream(pipe,
- VPFE_PIPELINE_STREAM_SINGLESHOT);
- }
-}
-
-/* vpfe_start_capture() - start streaming on all the subdevs */
-static int vpfe_start_capture(struct vpfe_video_device *video)
-{
- struct vpfe_pipeline *pipe = &video->pipe;
- int ret = 0;
-
- video->started = 1;
- if (vpfe_video_is_pipe_ready(pipe))
- ret = vpfe_pipeline_set_stream(pipe, pipe->state);
-
- return ret;
-}
-
-static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- unsigned long addr;
- int ret;
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- goto streamoff;
-
- /* Get the next frame from the buffer queue */
- video->cur_frm = video->next_frm =
- list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list);
- /* Remove buffer from the buffer queue */
- list_del(&video->cur_frm->list);
- /* Mark state of the current frame to active */
- video->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- /* Initialize field_id and started member */
- video->field_id = 0;
- addr = vb2_dma_contig_plane_dma_addr(&video->cur_frm->vb.vb2_buf, 0);
- video->ops->queue(vpfe_dev, addr);
- video->state = VPFE_VIDEO_BUFFER_QUEUED;
-
- ret = vpfe_start_capture(video);
- if (ret) {
- struct vpfe_cap_buffer *buf, *tmp;
-
- vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_QUEUED);
- list_for_each_entry_safe(buf, tmp, &video->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf,
- VB2_BUF_STATE_QUEUED);
- }
- goto unlock_out;
- }
-
- mutex_unlock(&video->lock);
-
- return ret;
-unlock_out:
- mutex_unlock(&video->lock);
-streamoff:
- ret = vb2_streamoff(&video->buffer_queue, video->buffer_queue.type);
- return 0;
-}
-
-static int vpfe_buffer_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vpfe_cap_buffer *buf = container_of(vbuf,
- struct vpfe_cap_buffer, vb);
-
- INIT_LIST_HEAD(&buf->list);
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void vpfe_stop_streaming(struct vb2_queue *vq)
-{
- struct vpfe_fh *fh = vb2_get_drv_priv(vq);
- struct vpfe_video_device *video = fh->video;
-
- /* release all active buffers */
- if (video->cur_frm == video->next_frm) {
- vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (video->cur_frm != NULL)
- vb2_buffer_done(&video->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (video->next_frm != NULL)
- vb2_buffer_done(&video->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&video->dma_queue)) {
- video->next_frm = list_entry(video->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&video->next_frm->list);
- vb2_buffer_done(&video->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-}
-
-static void vpfe_buf_cleanup(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vpfe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
- struct vpfe_video_device *video = fh->video;
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_cap_buffer *buf = container_of(vbuf,
- struct vpfe_cap_buffer, vb);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buf_cleanup\n");
- if (vb->state == VB2_BUF_STATE_ACTIVE)
- list_del_init(&buf->list);
-}
-
-static const struct vb2_ops video_qops = {
- .queue_setup = vpfe_buffer_queue_setup,
- .buf_init = vpfe_buffer_init,
- .buf_prepare = vpfe_buffer_prepare,
- .start_streaming = vpfe_start_streaming,
- .stop_streaming = vpfe_stop_streaming,
- .buf_cleanup = vpfe_buf_cleanup,
- .buf_queue = vpfe_buffer_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-/*
- * vpfe_reqbufs() - supported REQBUF only once opening
- * the device.
- */
-static int vpfe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
- struct vb2_queue *q;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
-
- if (req_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- req_buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT){
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
-
- if (video->io_usrs != 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- video->memory = req_buf->memory;
-
- /* Initialize videobuf2 queue as per the buffer type */
- q = &video->buffer_queue;
- q->type = req_buf->type;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
- q->drv_priv = fh;
- q->min_buffers_needed = 1;
- q->ops = &video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->dev = vpfe_dev->pdev;
- q->lock = &video->lock;
-
- ret = vb2_queue_init(q);
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- goto unlock_out;
- }
-
- fh->io_allowed = 1;
- video->io_usrs = 1;
- INIT_LIST_HEAD(&video->dma_queue);
- ret = vb2_reqbufs(&video->buffer_queue, req_buf);
-
-unlock_out:
- mutex_unlock(&video->lock);
- return ret;
-}
-
-/*
- * vpfe_querybuf() - query buffers for exchange
- */
-static int vpfe_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
-
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- if (video->memory != V4L2_MEMORY_MMAP) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
- return -EINVAL;
- }
-
- /* Call vb2_querybuf to get information */
- return vb2_querybuf(&video->buffer_queue, buf);
-}
-
-/*
- * vpfe_qbuf() - queue buffers for capture or processing
- */
-static int vpfe_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
-
- if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- p->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
- /*
- * If this file handle is not allowed to do IO,
- * return error
- */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- return vb2_qbuf(&video->buffer_queue,
- video->video_dev.v4l2_dev->mdev, p);
-}
-
-/*
- * vpfe_dqbuf() - deque buffer which is done with processing
- */
-static int vpfe_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
-
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- return vb2_dqbuf(&video->buffer_queue,
- buf, (file->f_flags & O_NONBLOCK));
-}
-
-/*
- * vpfe_streamon() - start streaming
- * @file: file pointer
- * @priv: void pointer
- * @buf_type: enum v4l2_buf_type
- *
- * queue buffer onto hardware for capture/processing and
- * start all the subdevs which are in media chain
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_pipeline *pipe = &video->pipe;
- struct vpfe_fh *fh = file->private_data;
- int ret = -EINVAL;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
-
- if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return ret;
- }
- /* If file handle is not allowed IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
- /* If buffer queue is empty, return error */
- if (list_empty(&video->buffer_queue.queued_list)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EIO;
- }
- /* Validate the pipeline */
- if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = vpfe_video_validate_pipeline(pipe);
- if (ret < 0)
- return ret;
- }
- /* Call vb2_streamon to start streaming */
- return vb2_streamon(&video->buffer_queue, buf_type);
-}
-
-/*
- * vpfe_streamoff() - stop streaming
- * @file: file pointer
- * @priv: void pointer
- * @buf_type: enum v4l2_buf_type
- *
- * stop all the subdevs which are in media chain
- *
- * Return 0 on success, error code otherwise
- */
-static int vpfe_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_video_device *video = video_drvdata(file);
- struct vpfe_device *vpfe_dev = video->vpfe_dev;
- struct vpfe_fh *fh = file->private_data;
- int ret = 0;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
-
- if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If io is allowed for this file handle, return error */
- if (!fh->io_allowed) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- /* If streaming is not started, return error */
- if (!video->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "device is not started\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
-
- vpfe_stop_capture(video);
- ret = vb2_streamoff(&video->buffer_queue, buf_type);
- mutex_unlock(&video->lock);
-
- return ret;
-}
-
-/* vpfe capture ioctl operations */
-static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
- .vidioc_querycap = vpfe_querycap,
- .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
- .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
- .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
- .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
- .vidioc_g_fmt_vid_out = vpfe_g_fmt,
- .vidioc_s_fmt_vid_out = vpfe_s_fmt,
- .vidioc_try_fmt_vid_out = vpfe_try_fmt,
- .vidioc_enum_fmt_vid_out = vpfe_enum_fmt,
- .vidioc_enum_input = vpfe_enum_input,
- .vidioc_g_input = vpfe_g_input,
- .vidioc_s_input = vpfe_s_input,
- .vidioc_querystd = vpfe_querystd,
- .vidioc_s_std = vpfe_s_std,
- .vidioc_g_std = vpfe_g_std,
- .vidioc_enum_dv_timings = vpfe_enum_dv_timings,
- .vidioc_query_dv_timings = vpfe_query_dv_timings,
- .vidioc_s_dv_timings = vpfe_s_dv_timings,
- .vidioc_g_dv_timings = vpfe_g_dv_timings,
- .vidioc_reqbufs = vpfe_reqbufs,
- .vidioc_querybuf = vpfe_querybuf,
- .vidioc_qbuf = vpfe_qbuf,
- .vidioc_dqbuf = vpfe_dqbuf,
- .vidioc_streamon = vpfe_streamon,
- .vidioc_streamoff = vpfe_streamoff,
-};
-
-/* VPFE video init function */
-int vpfe_video_init(struct vpfe_video_device *video, const char *name)
-{
- const char *direction;
- int ret;
-
- switch (video->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
- video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
-
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- direction = "input";
- video->pad.flags = MEDIA_PAD_FL_SOURCE;
- video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- break;
-
- default:
- return -EINVAL;
- }
- /* Initialize field of video device */
- mutex_init(&video->lock);
- video->video_dev.release = video_device_release;
- video->video_dev.fops = &vpfe_fops;
- video->video_dev.ioctl_ops = &vpfe_ioctl_ops;
- video->video_dev.minor = -1;
- video->video_dev.tvnorms = 0;
- video->video_dev.lock = &video->lock;
- snprintf(video->video_dev.name, sizeof(video->video_dev.name),
- "DAVINCI VIDEO %s %s", name, direction);
-
- spin_lock_init(&video->irqlock);
- spin_lock_init(&video->dma_queue_lock);
- ret = media_entity_pads_init(&video->video_dev.entity,
- 1, &video->pad);
- if (ret < 0)
- return ret;
-
- video_set_drvdata(&video->video_dev, video);
-
- return 0;
-}
-
-/* vpfe video device register function */
-int vpfe_video_register(struct vpfe_video_device *video,
- struct v4l2_device *vdev)
-{
- int ret;
-
- video->video_dev.v4l2_dev = vdev;
-
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- video->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE;
- else
- video->video_dev.device_caps = V4L2_CAP_VIDEO_OUTPUT;
- video->video_dev.device_caps |= V4L2_CAP_STREAMING;
- ret = video_register_device(&video->video_dev, VFL_TYPE_GRABBER, -1);
- if (ret < 0)
- pr_err("%s: could not register video device (%d)\n",
- __func__, ret);
- return ret;
-}
-
-/* vpfe video device unregister function */
-void vpfe_video_unregister(struct vpfe_video_device *video)
-{
- if (video_is_registered(&video->video_dev)) {
- video_unregister_device(&video->video_dev);
- media_entity_cleanup(&video->video_dev.entity);
- }
-}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
deleted file mode 100644
index 5d01c4883ab4..000000000000
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2012 Texas Instruments Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Contributors:
- * Manjunath Hadli <manjunath.hadli@ti.com>
- * Prabhakar Lad <prabhakar.lad@ti.com>
- */
-
-#ifndef _DAVINCI_VPFE_VIDEO_H
-#define _DAVINCI_VPFE_VIDEO_H
-
-#include <media/videobuf2-v4l2.h>
-#include <media/videobuf2-dma-contig.h>
-
-struct vpfe_device;
-
-/*
- * struct vpfe_video_operations - VPFE video operations
- * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
- * if there was no buffer previously queued.
- */
-struct vpfe_video_operations {
- int (*queue)(struct vpfe_device *vpfe_dev, unsigned long addr);
-};
-
-enum vpfe_pipeline_stream_state {
- VPFE_PIPELINE_STREAM_STOPPED = 0,
- VPFE_PIPELINE_STREAM_CONTINUOUS = 1,
- VPFE_PIPELINE_STREAM_SINGLESHOT = 2,
-};
-
-enum vpfe_video_state {
- /* indicates that buffer is not queued */
- VPFE_VIDEO_BUFFER_NOT_QUEUED = 0,
- /* indicates that buffer is queued */
- VPFE_VIDEO_BUFFER_QUEUED = 1,
-};
-
-struct vpfe_pipeline {
- /* media pipeline */
- struct media_pipeline *pipe;
- struct media_graph graph;
- /* state of the pipeline, continuous,
- * single-shot or stopped
- */
- enum vpfe_pipeline_stream_state state;
- /* number of active input video entities */
- unsigned int input_num;
- /* number of active output video entities */
- unsigned int output_num;
- /* input video nodes in case of single-shot mode */
- struct vpfe_video_device *inputs[10];
- /* capturing video nodes */
- struct vpfe_video_device *outputs[10];
-};
-
-#define to_vpfe_pipeline(__e) \
- container_of((__e)->pipe, struct vpfe_pipeline, pipe)
-
-#define to_vpfe_video(vdev) \
- container_of(vdev, struct vpfe_video_device, video_dev)
-
-struct vpfe_cap_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-struct vpfe_video_device {
- /* vpfe device */
- struct vpfe_device *vpfe_dev;
- /* video dev */
- struct video_device video_dev;
- /* media pad of video entity */
- struct media_pad pad;
- /* video operations supported by video device */
- const struct vpfe_video_operations *ops;
- /* type of the video buffers used by user */
- enum v4l2_buf_type type;
- /* Indicates id of the field which is being captured */
- u32 field_id;
- /* pipeline for which video device is part of */
- struct vpfe_pipeline pipe;
- /* Indicates whether streaming started */
- u8 started;
- /* Indicates state of the stream */
- unsigned int state;
- /* current input at the sub device */
- int current_input;
- /*
- * This field keeps track of type of buffer exchange mechanism
- * user has selected
- */
- enum v4l2_memory memory;
- /* number of open instances of the channel */
- u32 usrs;
- /* flag to indicate whether decoder is initialized */
- u8 initialized;
- /* skip frame count */
- u8 skip_frame_count;
- /* skip frame count init value */
- u8 skip_frame_count_init;
- /* time per frame for skipping */
- struct v4l2_fract timeperframe;
- /* ptr to currently selected sub device */
- struct vpfe_ext_subdev_info *current_ext_subdev;
- /* Pointer pointing to current vpfe_cap_buffer */
- struct vpfe_cap_buffer *cur_frm;
- /* Pointer pointing to next vpfe_cap_buffer */
- struct vpfe_cap_buffer *next_frm;
- /* Used to store pixel format */
- struct v4l2_format fmt;
- struct vb2_queue buffer_queue;
- /* Queue of filled frames */
- struct list_head dma_queue;
- spinlock_t irqlock;
- /* IRQ lock for DMA queue */
- spinlock_t dma_queue_lock;
- /* lock used to serialize all video4linux ioctls */
- struct mutex lock;
- /* number of users performing IO */
- u32 io_usrs;
- /* Currently selected or default standard */
- v4l2_std_id stdid;
- /*
- * offset where second field starts from the starting of the
- * buffer for field separated YCbCr formats
- */
- u32 field_off;
-};
-
-int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe);
-void vpfe_video_unregister(struct vpfe_video_device *video);
-int vpfe_video_register(struct vpfe_video_device *video,
- struct v4l2_device *vdev);
-int vpfe_video_init(struct vpfe_video_device *video, const char *name);
-void vpfe_video_process_buffer_complete(struct vpfe_video_device *video);
-void vpfe_video_schedule_bottom_field(struct vpfe_video_device *video);
-void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video);
-
-#endif /* _DAVINCI_VPFE_VIDEO_H */
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/staging/media/hantro/Kconfig
index be133bbaa68a..de77fe6554e7 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/staging/media/hantro/Kconfig
@@ -20,4 +20,4 @@ config VIDEO_HANTRO_ROCKCHIP
depends on ARCH_ROCKCHIP || COMPILE_TEST
default y
help
- Enable support for RK3288 and RK3399 SoCs.
+ Enable support for RK3288, RK3328, and RK3399 SoCs.
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/staging/media/hantro/Makefile
index 1584acdbf4a3..5d6b0383d280 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/staging/media/hantro/Makefile
@@ -4,11 +4,16 @@ hantro-vpu-y += \
hantro_drv.o \
hantro_v4l2.o \
hantro_h1_jpeg_enc.o \
+ hantro_g1_h264_dec.o \
hantro_g1_mpeg2_dec.o \
+ hantro_g1_vp8_dec.o \
rk3399_vpu_hw_jpeg_enc.o \
rk3399_vpu_hw_mpeg2_dec.o \
+ rk3399_vpu_hw_vp8_dec.o \
hantro_jpeg.o \
- hantro_mpeg2.o
+ hantro_h264.o \
+ hantro_mpeg2.o \
+ hantro_vp8.o
hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
rk3288_vpu_hw.o \
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index 62dcca9ff19c..f670bbde4159 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -20,11 +20,20 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
#include "hantro_hw.h"
+#define VP8_MB_DIM 16
+#define VP8_MB_WIDTH(w) DIV_ROUND_UP(w, VP8_MB_DIM)
+#define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM)
+
+#define H264_MB_DIM 16
+#define H264_MB_WIDTH(w) DIV_ROUND_UP(w, H264_MB_DIM)
+#define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM)
+
#define MPEG2_MB_DIM 16
#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
@@ -38,8 +47,9 @@ struct hantro_codec_ops;
#define HANTRO_JPEG_ENCODER BIT(0)
#define HANTRO_ENCODERS 0x0000ffff
-
#define HANTRO_MPEG2_DECODER BIT(16)
+#define HANTRO_VP8_DECODER BIT(17)
+#define HANTRO_H264_DECODER BIT(18)
#define HANTRO_DECODERS 0xffff0000
/**
@@ -96,22 +106,24 @@ struct hantro_variant {
* enum hantro_codec_mode - codec operating mode.
* @HANTRO_MODE_NONE: No operating mode. Used for RAW video formats.
* @HANTRO_MODE_JPEG_ENC: JPEG encoder.
+ * @HANTRO_MODE_H264_DEC: H264 decoder.
* @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
+ * @HANTRO_MODE_VP8_DEC: VP8 decoder.
*/
enum hantro_codec_mode {
HANTRO_MODE_NONE = -1,
HANTRO_MODE_JPEG_ENC,
+ HANTRO_MODE_H264_DEC,
HANTRO_MODE_MPEG2_DEC,
+ HANTRO_MODE_VP8_DEC,
};
/*
* struct hantro_ctrl - helper type to declare supported controls
- * @id: V4L2 control ID (V4L2_CID_xxx)
* @codec: codec id this control belong to (HANTRO_JPEG_ENCODER, etc.)
* @cfg: control configuration
*/
struct hantro_ctrl {
- unsigned int id;
unsigned int codec;
struct v4l2_ctrl_config cfg;
};
@@ -215,6 +227,7 @@ struct hantro_dev {
* @codec_ops: Set of operations related to codec mode.
* @jpeg_enc: JPEG-encoding context.
* @mpeg2_dec: MPEG-2-decoding context.
+ * @vp8_dec: VP8-decoding context.
*/
struct hantro_ctx {
struct hantro_dev *dev;
@@ -239,8 +252,10 @@ struct hantro_ctx {
/* Specific for particular codec modes. */
union {
+ struct hantro_h264_dec_hw_ctx h264_dec;
struct hantro_jpeg_enc_hw_ctx jpeg_enc;
struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
+ struct hantro_vp8_dec_hw_ctx vp8_dec;
};
};
@@ -265,6 +280,12 @@ struct hantro_fmt {
struct v4l2_frmsize_stepwise frmsize;
};
+struct hantro_reg {
+ u32 base;
+ u32 shift;
+ u32 mask;
+};
+
/* Logging helpers */
/**
@@ -343,9 +364,33 @@ static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
return val;
}
+static inline void hantro_reg_write(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ u32 v;
+
+ v = vdpu_read(vpu, reg->base);
+ v &= ~(reg->mask << reg->shift);
+ v |= ((val & reg->mask) << reg->shift);
+ vdpu_write_relaxed(vpu, v, reg->base);
+}
+
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+static inline struct vb2_v4l2_buffer *
+hantro_get_src_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+}
+
+static inline struct vb2_v4l2_buffer *
+hantro_get_dst_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+}
+
#endif /* HANTRO_H_ */
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index c3665f0e87a2..6d9d41170832 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -111,8 +111,6 @@ static void hantro_job_finish(struct hantro_dev *vpu,
src->sequence = ctx->sequence_out++;
dst->sequence = ctx->sequence_cap++;
- v4l2_m2m_buf_copy_metadata(src, dst, true);
-
ret = ctx->buf_finish(ctx, &dst->vb2_buf, bytesused);
if (ret)
result = VB2_BUF_STATE_ERROR;
@@ -153,11 +151,37 @@ void hantro_watchdog(struct work_struct *work)
}
}
+void hantro_prepare_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+}
+
+void hantro_finish_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog. */
+ schedule_delayed_work(&ctx->dev->watchdog_work,
+ msecs_to_jiffies(2000));
+}
+
static void device_run(void *priv)
{
struct hantro_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src, *dst;
int ret;
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
if (ret)
goto err_cancel_job;
@@ -165,6 +189,8 @@ static void device_run(void *priv)
if (ret < 0)
goto err_cancel_job;
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
ctx->codec_ops->run(ctx);
return;
@@ -262,28 +288,74 @@ static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
.s_ctrl = hantro_s_ctrl,
};
-static struct hantro_ctrl controls[] = {
+static const struct hantro_ctrl controls[] = {
{
- .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
.codec = HANTRO_JPEG_ENCODER,
.cfg = {
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
.min = 5,
.max = 100,
.step = 1,
.def = 50,
+ .ops = &hantro_ctrl_ops,
},
}, {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
.codec = HANTRO_MPEG2_DECODER,
.cfg = {
- .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
},
}, {
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
.codec = HANTRO_MPEG2_DECODER,
.cfg = {
- .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ },
+ }, {
+ .codec = HANTRO_VP8_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
},
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
+ .min = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ .def = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ .max = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
+ },
+ }, {
},
};
@@ -298,22 +370,12 @@ static int hantro_ctrls_setup(struct hantro_dev *vpu,
for (i = 0; i < num_ctrls; i++) {
if (!(allowed_codecs & controls[i].codec))
continue;
- if (!controls[i].cfg.elem_size) {
- v4l2_ctrl_new_std(&ctx->ctrl_handler,
- &hantro_ctrl_ops,
- controls[i].id, controls[i].cfg.min,
- controls[i].cfg.max,
- controls[i].cfg.step,
- controls[i].cfg.def);
- } else {
- controls[i].cfg.id = controls[i].id;
- v4l2_ctrl_new_custom(&ctx->ctrl_handler,
- &controls[i].cfg, NULL);
- }
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &controls[i].cfg, NULL);
if (ctx->ctrl_handler.error) {
vpu_err("Adding control (%d) failed %d\n",
- controls[i].id,
+ controls[i].cfg.id,
ctx->ctrl_handler.error);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
return ctx->ctrl_handler.error;
@@ -419,6 +481,7 @@ static const struct v4l2_file_operations hantro_fops = {
static const struct of_device_id of_hantro_match[] = {
#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
{ .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
#endif
{ /* sentinel */ }
@@ -724,6 +787,7 @@ static int hantro_probe(struct platform_device *pdev)
dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
return ret;
}
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
for (i = 0; i < vpu->variant->num_irqs; i++) {
const char *irq_name = vpu->variant->irqs[i].name;
@@ -733,10 +797,8 @@ static int hantro_probe(struct platform_device *pdev)
continue;
irq = platform_get_irq_byname(vpu->pdev, irq_name);
- if (irq <= 0) {
- dev_err(vpu->dev, "Could not get %s IRQ.\n", irq_name);
+ if (irq <= 0)
return -ENXIO;
- }
ret = devm_request_irq(vpu->dev, irq,
vpu->variant->irqs[i].handler, 0,
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
new file mode 100644
index 000000000000..7ab534936843
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_g1_regs.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static void set_params(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_slice_params *slices = ctrls->slices;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ struct vb2_v4l2_buffer *src_buf = hantro_get_src_buf(ctx);
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+
+ /* Decoder control register 0. */
+ reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
+ slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTERLACE_E;
+ if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ reg |= G1_REG_DEC_CTRL0_PIC_FIELDMODE_E;
+ if (!(slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD))
+ reg |= G1_REG_DEC_CTRL0_PIC_TOPFIELD_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Decoder control register 1. */
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(sps->pic_width_in_mbs_minus1 + 1) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(sps->pic_height_in_map_units_minus1 + 1) |
+ G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Decoder control register 2. */
+ reg = G1_REG_DEC_CTRL2_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
+ G1_REG_DEC_CTRL2_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset);
+
+ /* always use the matrix sent from userspace */
+ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
+
+ if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ /* Decoder control register 3. */
+ reg = G1_REG_DEC_CTRL3_START_CODE_E |
+ G1_REG_DEC_CTRL3_INIT_QP(pps->pic_init_qp_minus26 + 26) |
+ G1_REG_DEC_CTRL3_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL3);
+
+ /* Decoder control register 4. */
+ reg = G1_REG_DEC_CTRL4_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
+ G1_REG_DEC_CTRL4_FRAMENUM(slices[0].frame_num) |
+ G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc);
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= G1_REG_DEC_CTRL4_CABAC_E;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
+ if (sps->chroma_format_idc == 0)
+ reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ /* Decoder control register 5. */
+ reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(slices[0].dec_ref_pic_marking_bit_size) |
+ G1_REG_DEC_CTRL5_IDR_PIC_ID(slices[0].idr_pic_id);
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= G1_REG_DEC_CTRL5_CONST_INTRA_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_FILT_CTRL_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_RDPIC_CNT_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E;
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ reg |= G1_REG_DEC_CTRL5_IDR_PIC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL5);
+
+ /* Decoder control register 6. */
+ reg = G1_REG_DEC_CTRL6_PPS_ID(slices[0].pic_parameter_set_id) |
+ G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_POC_LENGTH(slices[0].pic_order_cnt_bit_size);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL6);
+
+ /* Error concealment register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_ERR_CONC);
+
+ /* Prediction filter tap register. */
+ vdpu_write_relaxed(vpu,
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_0(1) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_1(-5 & 0x3ff) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_2(20),
+ G1_REG_PRED_FLT);
+
+ /* Reference picture buffer control register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_REF_BUF_CTRL);
+
+ /* Reference picture buffer control register 2. */
+ vdpu_write_relaxed(vpu, G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(8),
+ G1_REG_REF_BUF_CTRL2);
+}
+
+static void set_ref(struct hantro_ctx *ctx)
+{
+ struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ const u8 *b0_reflist, *b1_reflist, *p_reflist;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 dpb_longterm = 0;
+ u32 dpb_valid = 0;
+ int reg_num;
+ u32 reg;
+ int i;
+
+ /*
+ * Set up bit maps of valid and long term DPBs.
+ * NOTE: The bits are reversed, i.e. MSb is DPB 0.
+ */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; ++i) {
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ dpb_valid |= BIT(HANTRO_H264_DPB_SIZE - 1 - i);
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ dpb_longterm |= BIT(HANTRO_H264_DPB_SIZE - 1 - i);
+ }
+ vdpu_write_relaxed(vpu, dpb_valid << 16, G1_REG_VALID_REF);
+ vdpu_write_relaxed(vpu, dpb_longterm << 16, G1_REG_LT_REF);
+
+ /*
+ * Set up reference frame picture numbers.
+ *
+ * Each G1_REG_REF_PIC(x) register contains numbers of two
+ * subsequential reference pictures.
+ */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i += 2) {
+ reg = 0;
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ reg |= G1_REG_REF_PIC_REFER0_NBR(dpb[i].pic_num);
+ else
+ reg |= G1_REG_REF_PIC_REFER0_NBR(dpb[i].frame_num);
+
+ if (dpb[i + 1].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ reg |= G1_REG_REF_PIC_REFER1_NBR(dpb[i + 1].pic_num);
+ else
+ reg |= G1_REG_REF_PIC_REFER1_NBR(dpb[i + 1].frame_num);
+
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(i / 2));
+ }
+
+ b0_reflist = ctx->h264_dec.reflists.b0;
+ b1_reflist = ctx->h264_dec.reflists.b1;
+ p_reflist = ctx->h264_dec.reflists.p;
+
+ /*
+ * Each G1_REG_BD_REF_PIC(x) register contains three entries
+ * of each forward and backward picture list.
+ */
+ reg_num = 0;
+ for (i = 0; i < 15; i += 3) {
+ reg = G1_REG_BD_REF_PIC_BINIT_RLIST_F0(b0_reflist[i]) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F1(b0_reflist[i + 1]) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F2(b0_reflist[i + 2]) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B0(b1_reflist[i]) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B1(b1_reflist[i + 1]) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B2(b1_reflist[i + 2]);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_REF_PIC(reg_num++));
+ }
+
+ /*
+ * G1_REG_BD_P_REF_PIC register contains last entries (index 15)
+ * of forward and backward reference picture lists and first 4 entries
+ * of P forward picture list.
+ */
+ reg = G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(b0_reflist[15]) |
+ G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(b1_reflist[15]) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(p_reflist[0]) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(p_reflist[1]) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(p_reflist[2]) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(p_reflist[3]);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_P_REF_PIC);
+
+ /*
+ * Each G1_REG_FWD_PIC(x) register contains six consecutive
+ * entries of P forward picture list, starting from index 4.
+ */
+ reg_num = 0;
+ for (i = 4; i < HANTRO_H264_DPB_SIZE; i += 6) {
+ reg = G1_REG_FWD_PIC_PINIT_RLIST_F0(p_reflist[i]) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F1(p_reflist[i + 1]) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F2(p_reflist[i + 2]) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F3(p_reflist[i + 3]) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F4(p_reflist[i + 4]) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F5(p_reflist[i + 5]);
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(reg_num++));
+ }
+
+ /* Set up addresses of DPB buffers. */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
+ struct vb2_buffer *buf = hantro_h264_get_ref_buf(ctx, i);
+
+ vdpu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(buf, 0),
+ G1_REG_ADDR_REF(i));
+ }
+}
+
+static void set_buffers(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma, dst_dma;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
+
+ /* Destination (decoded frame) buffer. */
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+
+ /* Higher profiles require DMV buffer appended to reference frames. */
+ if (ctrls->sps->profile_idc > 66) {
+ size_t pic_size = ctx->h264_dec.pic_size;
+ size_t mv_offset = round_up(pic_size, 8);
+
+ if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ mv_offset += 32 * H264_MB_WIDTH(ctx->dst_fmt.width);
+
+ vdpu_write_relaxed(vpu, dst_dma + mv_offset,
+ G1_REG_ADDR_DIR_MV);
+ }
+
+ /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
+}
+
+void hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ /* Prepare the H264 decoder context. */
+ if (hantro_h264_dec_prepare_run(ctx))
+ return;
+
+ /* Configure hardware registers. */
+ set_params(ctx);
+ set_ref(ctx);
+ set_buffers(ctx);
+
+ hantro_finish_run(ctx);
+
+ /* Start decoding! */
+ vdpu_write_relaxed(vpu,
+ G1_REG_CONFIG_DEC_AXI_RD_ID(0xffu) |
+ G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_MAX_BURST(16) |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E,
+ G1_REG_CONFIG);
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+}
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index e592c1b66375..80f0e94f8afa 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -167,12 +167,11 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
const struct v4l2_mpeg2_picture *picture;
u32 reg;
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
/* Apply request controls if any */
- v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
- &ctx->ctrl_handler);
+ hantro_prepare_run(ctx);
slice_params = hantro_get_ctrl(ctx,
V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -248,12 +247,7 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
&dst_buf->vb2_buf,
sequence, picture, slice_params);
- /* Controls no longer in-use, we can complete them */
- v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
- &ctx->ctrl_handler);
-
- /* Kick the watchdog and start decoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ hantro_finish_run(ctx);
reg = G1_REG_DEC_E(1);
vdpu_write(vpu, reg, G1_SWREG(1));
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
new file mode 100644
index 000000000000..6d99c2be01cf
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP8 codec driver
+ *
+ * Copyright (C) 2019 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2019 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/vp8-ctrls.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+/* DCT partition base address regs */
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { G1_REG_ADDR_STR, 0, 0xffffffff },
+ { G1_REG_ADDR_REF(8), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(9), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(10), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(11), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(12), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(14), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(15), 0, 0xffffffff },
+};
+
+/* Loop filter level regs */
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { G1_REG_REF_PIC(2), 18, 0x3f },
+ { G1_REG_REF_PIC(2), 12, 0x3f },
+ { G1_REG_REF_PIC(2), 6, 0x3f },
+ { G1_REG_REF_PIC(2), 0, 0x3f },
+};
+
+/* Macroblock loop filter level adjustment regs */
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { G1_REG_REF_PIC(0), 21, 0x7f },
+ { G1_REG_REF_PIC(0), 14, 0x7f },
+ { G1_REG_REF_PIC(0), 7, 0x7f },
+ { G1_REG_REF_PIC(0), 0, 0x7f },
+};
+
+/* Reference frame adjustment regs */
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { G1_REG_REF_PIC(1), 21, 0x7f },
+ { G1_REG_REF_PIC(1), 14, 0x7f },
+ { G1_REG_REF_PIC(1), 7, 0x7f },
+ { G1_REG_REF_PIC(1), 0, 0x7f },
+};
+
+/* Quantizer */
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { G1_REG_REF_PIC(3), 11, 0x7ff },
+ { G1_REG_REF_PIC(3), 0, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 11, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 0, 0x7ff },
+};
+
+/* Quantizer delta regs */
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { G1_REG_REF_PIC(3), 27, 0x1f },
+ { G1_REG_REF_PIC(3), 22, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 27, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 22, 0x1f },
+ { G1_REG_BD_P_REF_PIC, 27, 0x1f },
+};
+
+/* DCT partition start bits regs */
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { G1_REG_DEC_CTRL2, 26, 0x3f }, { G1_REG_DEC_CTRL4, 26, 0x3f },
+ { G1_REG_DEC_CTRL4, 20, 0x3f }, { G1_REG_DEC_CTRL7, 24, 0x3f },
+ { G1_REG_DEC_CTRL7, 18, 0x3f }, { G1_REG_DEC_CTRL7, 12, 0x3f },
+ { G1_REG_DEC_CTRL7, 6, 0x3f }, { G1_REG_DEC_CTRL7, 0, 0x3f },
+};
+
+/* Precision filter tap regs */
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][4] = {
+ {
+ { G1_REG_PRED_FLT, 22, 0x3ff },
+ { G1_REG_PRED_FLT, 12, 0x3ff },
+ { G1_REG_PRED_FLT, 2, 0x3ff },
+ { G1_REG_REF_PIC(4), 22, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(4), 12, 0x3ff },
+ { G1_REG_REF_PIC(4), 2, 0x3ff },
+ { G1_REG_REF_PIC(5), 22, 0x3ff },
+ { G1_REG_REF_PIC(5), 12, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(5), 2, 0x3ff },
+ { G1_REG_REF_PIC(6), 22, 0x3ff },
+ { G1_REG_REF_PIC(6), 12, 0x3ff },
+ { G1_REG_REF_PIC(6), 2, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(7), 22, 0x3ff },
+ { G1_REG_REF_PIC(7), 12, 0x3ff },
+ { G1_REG_REF_PIC(7), 2, 0x3ff },
+ { G1_REG_LT_REF, 22, 0x3ff },
+ },
+ {
+ { G1_REG_LT_REF, 12, 0x3ff },
+ { G1_REG_LT_REF, 2, 0x3ff },
+ { G1_REG_VALID_REF, 22, 0x3ff },
+ { G1_REG_VALID_REF, 12, 0x3ff },
+ },
+ {
+ { G1_REG_VALID_REF, 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 2, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(1), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 22, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(2), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 12, 0x3ff },
+ },
+};
+
+/*
+ * Set loop filters
+ */
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ const struct v4l2_vp8_loopfilter_header *lf = &hdr->lf_header;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = G1_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= G1_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(0));
+
+ if (lf->flags & V4L2_VP8_LF_HEADER_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+/*
+ * Set quantization parameters
+ */
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_quantization_header *q = &hdr->quant_header;
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+/*
+ * set control partition and DCT partition regs
+ *
+ * VP8 frame stream data layout:
+ *
+ * first_part_size parttion_sizes[0]
+ * ^ ^
+ * src_dma | |
+ * ^ +--------+------+ +-----+-----+
+ * | | control part | | |
+ * +--------+----------------+------------------+-----------+-----+-----------+
+ * | tag 3B | extra 7B | hdr | mb_data | DCT sz | DCT part0 | ... | DCT partn |
+ * +--------+-----------------------------------+-----------+-----+-----------+
+ * | | | |
+ * v +----+---+ v
+ * mb_start | src_dma_end
+ * v
+ * DCT size part
+ * (num_dct-1)*3B
+ * Note:
+ * 1. only key-frames have extra 7-bytes
+ * 2. all offsets are base on src_dma
+ * 3. number of DCT parts is 1, 2, 4 or 8
+ * 4. the addresses set to the VPU must be 64-bits aligned
+ */
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ struct hantro_reg reg;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK))
+ + src_dma, G1_REG_ADDR_REF(13));
+
+ /* Macroblock data start bits */
+ reg.base = G1_REG_DEC_CTRL2;
+ reg.mask = 0x3f;
+ reg.shift = 18;
+ hantro_reg_write(vpu, &reg, mb_start_bits);
+
+ /* Macroblock aligned data length */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0x3fffff;
+ reg.shift = 0;
+ hantro_reg_write(vpu, &reg, mb_size + 1);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0xf;
+ reg.shift = 24;
+ hantro_reg_write(vpu, &reg, hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ vdpu_write_relaxed(vpu,
+ G1_REG_DEC_CTRL3_STREAM_LEN(dct_part_total_len),
+ G1_REG_DEC_CTRL3);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_reg reg;
+ u32 val = 0;
+ int i, j;
+
+ reg.base = G1_REG_BD_REF_PIC(3);
+ reg.mask = 0xf;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ val = (hantro_vp8_dec_mc_filter[i][0] << 2) |
+ hantro_vp8_dec_mc_filter[i][5];
+
+ for (j = 0; j < 4; j++)
+ hantro_reg_write(vpu, &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j + 1]);
+
+ switch (i) {
+ case 2:
+ reg.shift = 8;
+ break;
+ case 4:
+ reg.shift = 4;
+ break;
+ case 6:
+ reg.shift = 0;
+ break;
+ default:
+ continue;
+ }
+
+ hantro_reg_write(vpu, &reg, val);
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ dma_addr_t ref;
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
+
+ ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ WARN_ON(!ref && hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
+
+ ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ WARN_ON(!ref && hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(5));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ G1_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = G1_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED) {
+ reg |= G1_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP)
+ reg |= G1_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
+
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+}
+
+void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame_header *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
+ if (WARN_ON(!hdr))
+ return;
+
+ /* Reset segment_map buffer in keyframe */
+ if (VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ reg = G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E |
+ G1_REG_CONFIG_DEC_IN_ENDIAN |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
+
+ reg = G1_REG_DEC_CTRL0_DEC_MODE(10);
+ if (!VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
+ if (!(hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF))
+ reg |= G1_REG_DEC_CTRL0_SKIP_MODE;
+ if (hdr->lf_header.level == 0)
+ reg |= G1_REG_DEC_CTRL0_FILTERING_DIS;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Frame dimensions */
+ mb_width = VP8_MB_WIDTH(width);
+ mb_height = VP8_MB_HEIGHT(height);
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
+ G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
+ G1_REG_DEC_CTRL1_PIC_MB_H_EXT(mb_height >> 8);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Boolean decoder */
+ reg = G1_REG_DEC_CTRL2_BOOLEAN_RANGE(hdr->coder_state.range)
+ | G1_REG_DEC_CTRL2_BOOLEAN_VALUE(hdr->coder_state.value);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ reg = 0;
+ if (hdr->version != 3)
+ reg |= G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= G1_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+ cfg_ref(ctx, hdr);
+ cfg_buffers(ctx, hdr);
+
+ hantro_finish_run(ctx);
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+}
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index 0c1e3043dc7e..ecd34a7db190 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -84,8 +84,10 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
struct hantro_jpeg_ctx jpeg_ctx;
u32 reg;
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -119,7 +121,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
- /* Kick the watchdog and start encoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ hantro_finish_run(ctx);
+
vepu_write(vpu, reg, H1_REG_ENC_CTRL);
}
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
new file mode 100644
index 000000000000..0d758e0c0f99
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+/* Size with u32 units. */
+#define CABAC_INIT_BUFFER_SIZE (460 * 2)
+#define POC_BUFFER_SIZE 34
+#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
+
+#define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+
+/* Data structure describing auxiliary buffer format. */
+struct hantro_h264_dec_priv_tbl {
+ u32 cabac_table[CABAC_INIT_BUFFER_SIZE];
+ u32 poc[POC_BUFFER_SIZE];
+ u8 scaling_list[SCALING_LIST_SIZE];
+};
+
+/*
+ * Constant CABAC table.
+ * From drivers/media/platform/rk3288-vpu/rk3288_vpu_hw_h264d.c
+ * in https://chromium.googlesource.com/chromiumos/third_party/kernel,
+ * chromeos-3.14 branch.
+ */
+static const u32 h264_cabac_table[] = {
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07330000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x000b0137,
+ 0x0045ef7f, 0xf3660052, 0xf94aeb6b, 0xe57fe17f, 0xe87fee5f, 0xe57feb72,
+ 0xe27fef7b, 0xf473f07a, 0xf573f43f, 0xfe44f154, 0xf368fd46, 0xf85df65a,
+ 0xe27fff4a, 0xfa61f95b, 0xec7ffc38, 0xfb52f94c, 0xea7df95d, 0xf557fd4d,
+ 0xfb47fc3f, 0xfc44f454, 0xf93ef941, 0x083d0538, 0xfe420140, 0x003dfe4e,
+ 0x01320734, 0x0a23002c, 0x0b26012d, 0x002e052c, 0x1f110133, 0x07321c13,
+ 0x10210e3e, 0xf36cf164, 0xf365f35b, 0xf45ef658, 0xf054f656, 0xf953f357,
+ 0xed5e0146, 0x0048fb4a, 0x123bf866, 0xf164005f, 0xfc4b0248, 0xf54bfd47,
+ 0x0f2ef345, 0x003e0041, 0x1525f148, 0x09391036, 0x003e0c48, 0x18000f09,
+ 0x08190d12, 0x0f090d13, 0x0a250c12, 0x061d1421, 0x0f1e042d, 0x013a003e,
+ 0x073d0c26, 0x0b2d0f27, 0x0b2a0d2c, 0x102d0c29, 0x0a311e22, 0x122a0a37,
+ 0x1133112e, 0x00591aed, 0x16ef1aef, 0x1ee71cec, 0x21e925e5, 0x21e928e4,
+ 0x26ef21f5, 0x28f129fa, 0x26012911, 0x1efa1b03, 0x1a1625f0, 0x23fc26f8,
+ 0x26fd2503, 0x26052a00, 0x23102716, 0x0e301b25, 0x153c0c44, 0x0261fd47,
+ 0xfa2afb32, 0xfd36fe3e, 0x003a013f, 0xfe48ff4a, 0xf75bfb43, 0xfb1bfd27,
+ 0xfe2c002e, 0xf040f844, 0xf64efa4d, 0xf656f45c, 0xf137f63c, 0xfa3efc41,
+ 0xf449f84c, 0xf950f758, 0xef6ef561, 0xec54f54f, 0xfa49fc4a, 0xf356f360,
+ 0xf561ed75, 0xf84efb21, 0xfc30fe35, 0xfd3ef347, 0xf64ff456, 0xf35af261,
+ 0x0000fa5d, 0xfa54f84f, 0x0042ff47, 0x003efe3c, 0xfe3bfb4b, 0xfd3efc3a,
+ 0xf742ff4f, 0x00470344, 0x0a2cf93e, 0x0f240e28, 0x101b0c1d, 0x012c1424,
+ 0x1220052a, 0x01300a3e, 0x112e0940, 0xf468f561, 0xf060f958, 0xf855f955,
+ 0xf755f358, 0x0442fd4d, 0xfd4cfa4c, 0x0a3aff4c, 0xff53f963, 0xf25f025f,
+ 0x004cfb4a, 0x0046f54b, 0x01440041, 0xf249033e, 0x043eff44, 0xf34b0b37,
+ 0x05400c46, 0x0f060613, 0x07100c0e, 0x120d0d0b, 0x0d0f0f10, 0x0c170d17,
+ 0x0f140e1a, 0x0e2c1128, 0x112f1811, 0x15151916, 0x1f1b161d, 0x13230e32,
+ 0x0a39073f, 0xfe4dfc52, 0xfd5e0945, 0xf46d24dd, 0x24de20e6, 0x25e22ce0,
+ 0x22ee22f1, 0x28f121f9, 0x23fb2100, 0x2602210d, 0x17230d3a, 0x1dfd1a00,
+ 0x161e1ff9, 0x23f122fd, 0x220324ff, 0x2205200b, 0x2305220c, 0x270b1e1d,
+ 0x221a1d27, 0x13421f15, 0x1f1f1932, 0xef78ec70, 0xee72f555, 0xf15cf259,
+ 0xe647f151, 0xf2500044, 0xf246e838, 0xe944e832, 0xf54a17f3, 0x1af328f1,
+ 0x31f22c03, 0x2d062c22, 0x21361352, 0xfd4bff17, 0x0122012b, 0x0036fe37,
+ 0x003d0140, 0x0044f75c, 0xf26af361, 0xf15af45a, 0xee58f649, 0xf74ff256,
+ 0xf649f646, 0xf645fb42, 0xf740fb3a, 0x023b15f6, 0x18f51cf8, 0x1cff1d03,
+ 0x1d092314, 0x1d240e43, 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968,
+ 0xfa35ff36, 0x07331721, 0x17021500, 0x01090031, 0xdb760539, 0xf34ef541,
+ 0x013e0c31, 0xfc491132, 0x1240092b, 0x1d001a43, 0x105a0968, 0xd27fec68,
+ 0x0143f34e, 0xf541013e, 0xfa56ef5f, 0xfa3d092d, 0xfd45fa51, 0xf5600637,
+ 0x0743fb56, 0x0258003a, 0xfd4cf65e, 0x05360445, 0xfd510058, 0xf943fb4a,
+ 0xfc4afb50, 0xf948013a, 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948,
+ 0x0d29033e, 0x002dfc4e, 0xfd60e57e, 0xe462e765, 0xe943e452, 0xec5ef053,
+ 0xea6eeb5b, 0xee66f35d, 0xe37ff95c, 0xfb59f960, 0xf36cfd2e, 0xff41ff39,
+ 0xf75dfd4a, 0xf75cf857, 0xe97e0536, 0x063c063b, 0x0645ff30, 0x0044fc45,
+ 0xf858fe55, 0xfa4eff4b, 0xf94d0236, 0x0532fd44, 0x0132062a, 0xfc51013f,
+ 0xfc460043, 0x0239fe4c, 0x0b230440, 0x013d0b23, 0x12190c18, 0x0d1d0d24,
+ 0xf65df949, 0xfe490d2e, 0x0931f964, 0x09350235, 0x0535fe3d, 0x00380038,
+ 0xf33ffb3c, 0xff3e0439, 0xfa450439, 0x0e270433, 0x0d440340, 0x013d093f,
+ 0x07321027, 0x052c0434, 0x0b30fb3c, 0xff3b003b, 0x1621052c, 0x0e2bff4e,
+ 0x003c0945, 0x0b1c0228, 0x032c0031, 0x002e022c, 0x0233002f, 0x0427023e,
+ 0x062e0036, 0x0336023a, 0x043f0633, 0x06390735, 0x06340637, 0x0b2d0e24,
+ 0x0835ff52, 0x0737fd4e, 0x0f2e161f, 0xff541907, 0x1ef91c03, 0x1c042000,
+ 0x22ff1e06, 0x1e062009, 0x1f131a1b, 0x1a1e2514, 0x1c221146, 0x0143053b,
+ 0x0943101e, 0x12201223, 0x161d181f, 0x1726122b, 0x14290b3f, 0x093b0940,
+ 0xff5efe59, 0xf76cfa4c, 0xfe2c002d, 0x0034fd40, 0xfe3bfc46, 0xfc4bf852,
+ 0xef66f74d, 0x0318002a, 0x00300037, 0xfa3bf947, 0xf453f557, 0xe277013a,
+ 0xfd1dff24, 0x0126022b, 0xfa37003a, 0x0040fd4a, 0xf65a0046, 0xfc1d051f,
+ 0x072a013b, 0xfe3afd48, 0xfd51f561, 0x003a0805, 0x0a0e0e12, 0x0d1b0228,
+ 0x003afd46, 0xfa4ff855, 0x0000f36a, 0xf06af657, 0xeb72ee6e, 0xf262ea6e,
+ 0xeb6aee67, 0xeb6be96c, 0xe670f660, 0xf45ffb5b, 0xf75dea5e, 0xfb560943,
+ 0xfc50f655, 0xff46073c, 0x093a053d, 0x0c320f32, 0x12311136, 0x0a29072e,
+ 0xff330731, 0x08340929, 0x062f0237, 0x0d290a2c, 0x06320535, 0x0d31043f,
+ 0x0640fe45, 0xfe3b0646, 0x0a2c091f, 0x0c2b0335, 0x0e220a26, 0xfd340d28,
+ 0x1120072c, 0x07260d32, 0x0a391a2b, 0x0e0b0b0e, 0x090b120b, 0x150917fe,
+ 0x20f120f1, 0x22eb27e9, 0x2adf29e1, 0x2ee426f4, 0x151d2de8, 0x35d330e6,
+ 0x41d52bed, 0x27f61e09, 0x121a141b, 0x0039f252, 0xfb4bed61, 0xdd7d1b00,
+ 0x1c001ffc, 0x1b062208, 0x1e0a1816, 0x21131620, 0x1a1f1529, 0x1a2c172f,
+ 0x10410e47, 0x083c063f, 0x11411518, 0x17141a17, 0x1b201c17, 0x1c181728,
+ 0x18201c1d, 0x172a1339, 0x1635163d, 0x0b560c28, 0x0b330e3b, 0xfc4ff947,
+ 0xfb45f746, 0xf842f644, 0xed49f445, 0xf046f143, 0xec3eed46, 0xf042ea41,
+ 0xec3f09fe, 0x1af721f7, 0x27f929fe, 0x2d033109, 0x2d1b243b, 0xfa42f923,
+ 0xf92af82d, 0xfb30f438, 0xfa3cfb3e, 0xf842f84c, 0xfb55fa51, 0xf64df951,
+ 0xef50ee49, 0xfc4af653, 0xf747f743, 0xff3df842, 0xf242003b, 0x023b15f3,
+ 0x21f227f9, 0x2efe3302, 0x3c063d11, 0x37222a3e, 0x14f10236, 0x034a14f1,
+ 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331619, 0x22001000, 0xfe090429,
+ 0xe3760241, 0xfa47f34f, 0x05340932, 0xfd460a36, 0x1a221316, 0x28003902,
+ 0x29241a45, 0xd37ff165, 0xfc4cfa47, 0xf34f0534, 0x0645f35a, 0x0034082b,
+ 0xfe45fb52, 0xf660023b, 0x024bfd57, 0xfd640138, 0xfd4afa55, 0x003bfd51,
+ 0xf956fb5f, 0xff42ff4d, 0x0146fe56, 0xfb48003d, 0x0029003f, 0x003f003f,
+ 0xf7530456, 0x0061f948, 0x0d29033e, 0x0d0f0733, 0x0250d97f, 0xee5bef60,
+ 0xe651dd62, 0xe866e961, 0xe577e863, 0xeb6eee66, 0xdc7f0050, 0xfb59f95e,
+ 0xfc5c0027, 0x0041f154, 0xdd7ffe49, 0xf468f75b, 0xe17f0337, 0x07380737,
+ 0x083dfd35, 0x0044f94a, 0xf758f367, 0xf35bf759, 0xf25cf84c, 0xf457e96e,
+ 0xe869f64e, 0xec70ef63, 0xb27fba7f, 0xce7fd27f, 0xfc42fb4e, 0xfc47f848,
+ 0x023bff37, 0xf946fa4b, 0xf859de77, 0xfd4b2014, 0x1e16d47f, 0x0036fb3d,
+ 0x003aff3c, 0xfd3df843, 0xe754f24a, 0xfb410534, 0x0239003d, 0xf745f546,
+ 0x1237fc47, 0x003a073d, 0x09291219, 0x0920052b, 0x092f002c, 0x0033022e,
+ 0x1326fc42, 0x0f260c2a, 0x09220059, 0x042d0a1c, 0x0a1f21f5, 0x34d5120f,
+ 0x1c0023ea, 0x26e72200, 0x27ee20f4, 0x66a20000, 0x38f121fc, 0x1d0a25fb,
+ 0x33e327f7, 0x34de45c6, 0x43c12cfb, 0x200737e3, 0x20010000, 0x1b2421e7,
+ 0x22e224e4, 0x26e426e5, 0x22ee23f0, 0x22f220f8, 0x25fa2300, 0x1e0a1c12,
+ 0x1a191d29, 0x004b0248, 0x084d0e23, 0x121f1123, 0x151e112d, 0x142a122d,
+ 0x1b1a1036, 0x07421038, 0x0b490a43, 0xf674e970, 0xf147f93d, 0x0035fb42,
+ 0xf54df750, 0xf754f657, 0xde7feb65, 0xfd27fb35, 0xf93df54b, 0xf14def5b,
+ 0xe76be76f, 0xe47af54c, 0xf62cf634, 0xf639f73a, 0xf048f945, 0xfc45fb4a,
+ 0xf7560242, 0xf7220120, 0x0b1f0534, 0xfe37fe43, 0x0049f859, 0x03340704,
+ 0x0a081108, 0x10130325, 0xff3dfb49, 0xff46fc4e, 0x0000eb7e, 0xe97cec6e,
+ 0xe67ee77c, 0xef69e579, 0xe575ef66, 0xe675e574, 0xdf7af65f, 0xf264f85f,
+ 0xef6fe472, 0xfa59fe50, 0xfc52f755, 0xf851ff48, 0x05400143, 0x09380045,
+ 0x01450745, 0xf945fa43, 0xf04dfe40, 0x023dfa43, 0xfd400239, 0xfd41fd42,
+ 0x003e0933, 0xff42fe47, 0xfe4bff46, 0xf7480e3c, 0x1025002f, 0x12230b25,
+ 0x0c290a29, 0x02300c29, 0x0d29003b, 0x03321328, 0x03421232, 0x13fa12fa,
+ 0x0e001af4, 0x1ff021e7, 0x21ea25e4, 0x27e22ae2, 0x2fd62ddc, 0x31de29ef,
+ 0x200945b9, 0x3fc142c0, 0x4db636d9, 0x34dd29f6, 0x240028ff, 0x1e0e1c1a,
+ 0x17250c37, 0x0b4125df, 0x27dc28db, 0x26e22edf, 0x2ae228e8, 0x31e326f4,
+ 0x28f626fd, 0x2efb1f14, 0x1d1e192c, 0x0c300b31, 0x1a2d1616, 0x17161b15,
+ 0x21141a1c, 0x1e181b22, 0x122a1927, 0x12320c46, 0x15360e47, 0x0b531920,
+ 0x15311536, 0xfb55fa51, 0xf64df951, 0xef50ee49, 0xfc4af653, 0xf747f743,
+ 0xff3df842, 0xf242003b, 0x023b11f6, 0x20f32af7, 0x31fb3500, 0x4003440a,
+ 0x421b2f39, 0xfb470018, 0xff24fe2a, 0xfe34f739, 0xfa3ffc41, 0xfc43f952,
+ 0xfd51fd4c, 0xf948fa4e, 0xf448f244, 0xfd46fa4c, 0xfb42fb3e, 0x0039fc3d,
+ 0xf73c0136, 0x023a11f6, 0x20f32af7, 0x31fb3500, 0x4003440a, 0x421b2f39,
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331d10,
+ 0x19000e00, 0xf633fd3e, 0xe5631a10, 0xfc55e866, 0x05390639, 0xef490e39,
+ 0x1428140a, 0x1d003600, 0x252a0c61, 0xe07fea75, 0xfe4afc55, 0xe8660539,
+ 0xfa5df258, 0xfa2c0437, 0xf559f167, 0xeb741339, 0x143a0454, 0x0660013f,
+ 0xfb55f36a, 0x053f064b, 0xfd5aff65, 0x0337fc4f, 0xfe4bf461, 0xf932013c,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x0722f758,
+ 0xec7fdc7f, 0xef5bf25f, 0xe754e756, 0xf459ef5b, 0xe17ff24c, 0xee67f35a,
+ 0xdb7f0b50, 0x054c0254, 0x054efa37, 0x043df253, 0xdb7ffb4f, 0xf568f55b,
+ 0xe27f0041, 0xfe4f0048, 0xfc5cfa38, 0x0344f847, 0xf362fc56, 0xf458fb52,
+ 0xfd48fc43, 0xf848f059, 0xf745ff3b, 0x05420439, 0xfc47fe47, 0x023aff4a,
+ 0xfc2cff45, 0x003ef933, 0xfc2ffa2a, 0xfd29fa35, 0x084cf74e, 0xf5530934,
+ 0x0043fb5a, 0x0143f148, 0xfb4bf850, 0xeb53eb40, 0xf31fe740, 0xe35e094b,
+ 0x113ff84a, 0xfb23fe1b, 0x0d5b0341, 0xf945084d, 0xf642033e, 0xfd44ec51,
+ 0x001e0107, 0xfd17eb4a, 0x1042e97c, 0x11252cee, 0x32deea7f, 0x0427002a,
+ 0x07220b1d, 0x081f0625, 0x072a0328, 0x08210d2b, 0x0d24042f, 0x0337023a,
+ 0x063c082c, 0x0b2c0e2a, 0x07300438, 0x04340d25, 0x0931133a, 0x0a300c2d,
+ 0x00451421, 0x083f23ee, 0x21e71cfd, 0x180a1b00, 0x22f234d4, 0x27e81311,
+ 0x1f19241d, 0x1821220f, 0x1e141649, 0x1422131f, 0x1b2c1310, 0x0f240f24,
+ 0x151c1915, 0x1e141f0c, 0x1b10182a, 0x005d0e38, 0x0f391a26, 0xe87fe873,
+ 0xea52f73e, 0x0035003b, 0xf255f359, 0xf35ef55c, 0xe37feb64, 0xf239f443,
+ 0xf547f64d, 0xeb55f058, 0xe968f162, 0xdb7ff652, 0xf830f83d, 0xf842f946,
+ 0xf24bf64f, 0xf753f45c, 0xee6cfc4f, 0xea45f04b, 0xfe3a013a, 0xf34ef753,
+ 0xfc51f363, 0xf351fa26, 0xf33efa3a, 0xfe3bf049, 0xf64cf356, 0xf753f657,
+ 0x0000ea7f, 0xe77fe778, 0xe57fed72, 0xe975e776, 0xe675e871, 0xe476e178,
+ 0xdb7cf65e, 0xf166f663, 0xf36ace7f, 0xfb5c1139, 0xfb56f35e, 0xf45bfe4d,
+ 0x0047ff49, 0x0440f951, 0x05400f39, 0x01430044, 0xf6430144, 0x004d0240,
+ 0x0044fb4e, 0x0737053b, 0x02410e36, 0x0f2c053c, 0x0246fe4c, 0xee560c46,
+ 0x0540f446, 0x0b370538, 0x00450241, 0xfa4a0536, 0x0736fa4c, 0xf552fe4d,
+ 0xfe4d192a, 0x11f310f7, 0x11f41beb, 0x25e229d8, 0x2ad730d1, 0x27e02ed8,
+ 0x34cd2ed7, 0x34d92bed, 0x200b3dc9, 0x38d23ece, 0x51bd2dec, 0x23fe1c0f,
+ 0x22012701, 0x1e111426, 0x122d0f36, 0x004f24f0, 0x25f225ef, 0x2001220f,
+ 0x1d0f1819, 0x22161f10, 0x23121f1c, 0x2129241c, 0x1b2f153e, 0x121f131a,
+ 0x24181817, 0x1b10181e, 0x1f1d1629, 0x162a103c, 0x0f340e3c, 0x034ef07b,
+ 0x15351638, 0x193d1521, 0x1332113d, 0xfd4ef84a, 0xf748f648, 0xee4bf447,
+ 0xf53ffb46, 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc,
+ 0x21ff2107, 0x1f0c2517, 0x1f261440, 0xf747f925, 0xf82cf531, 0xf638f43b,
+ 0xf83ff743, 0xfa44f64f, 0xfd4ef84a, 0xf748f648, 0xee4bf447, 0xf53ffb46,
+ 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc, 0x21ff2107,
+ 0x1f0c2517, 0x1f261440
+};
+
+/*
+ * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
+ * to get the values in matrix order. In addition, the hardware requires bytes
+ * swapped within each subsequent 4 bytes. Both arrays below include both
+ * transformations.
+ */
+static const u32 zig_zag_4x4[] = {
+ 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
+};
+
+static const u32 zig_zag_8x8[] = {
+ 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
+ 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
+ 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
+ 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
+};
+
+static void
+reorder_scaling_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
+ const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
+ const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
+ const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
+ const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ u8 *dst = tbl->scaling_list;
+ const u8 *src;
+ int i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
+ BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
+ BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
+ num_list_4x4 * list_len_4x4 +
+ num_list_8x8 * list_len_8x8);
+
+ src = &scaling->scaling_list_4x4[0][0];
+ for (i = 0; i < num_list_4x4; ++i) {
+ for (j = 0; j < list_len_4x4; ++j)
+ dst[zig_zag_4x4[j]] = src[j];
+ src += list_len_4x4;
+ dst += list_len_4x4;
+ }
+
+ src = &scaling->scaling_list_8x8[0][0];
+ for (i = 0; i < num_list_8x8; ++i) {
+ for (j = 0; j < list_len_8x8; ++j)
+ dst[zig_zag_8x8[j]] = src[j];
+ src += list_len_8x8;
+ dst += list_len_8x8;
+ }
+}
+
+static void prepare_table(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ int i;
+
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; ++i) {
+ tbl->poc[i * 2] = dpb[i].top_field_order_cnt;
+ tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt;
+ }
+
+ tbl->poc[32] = dec_param->top_field_order_cnt;
+ tbl->poc[33] = dec_param->bottom_field_order_cnt;
+
+ reorder_scaling_list(ctx);
+}
+
+struct hantro_h264_reflist_builder {
+ const struct v4l2_h264_dpb_entry *dpb;
+ s32 pocs[HANTRO_H264_DPB_SIZE];
+ u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
+ s32 curpoc;
+ u8 num_valid;
+};
+
+static s32 get_poc(enum v4l2_field field, s32 top_field_order_cnt,
+ s32 bottom_field_order_cnt)
+{
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ return top_field_order_cnt;
+ case V4L2_FIELD_BOTTOM:
+ return bottom_field_order_cnt;
+ default:
+ break;
+ }
+
+ return min(top_field_order_cnt, bottom_field_order_cnt);
+}
+
+static void
+init_reflist_builder(struct hantro_ctx *ctx,
+ struct hantro_h264_reflist_builder *b)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_param;
+ struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ unsigned int i;
+
+ dec_param = ctx->h264_dec.ctrls.decode;
+
+ memset(b, 0, sizeof(*b));
+ b->dpb = dpb;
+ b->curpoc = get_poc(buf->field, dec_param->top_field_order_cnt,
+ dec_param->bottom_field_order_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++) {
+ int buf_idx;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ buf_idx = vb2_find_timestamp(cap_q, dpb[i].reference_ts, 0);
+ if (buf_idx < 0)
+ continue;
+
+ buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
+ b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
+ dpb[i].bottom_field_order_cnt);
+ b->unordered_reflist[b->num_valid] = i;
+ b->num_valid++;
+ }
+
+ for (i = b->num_valid; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
+ b->unordered_reflist[i] = i;
+}
+
+static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
+{
+ const struct hantro_h264_reflist_builder *builder = data;
+ const struct v4l2_h264_dpb_entry *a, *b;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+ a = &builder->dpb[idxa];
+ b = &builder->dpb[idxb];
+
+ if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
+ (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
+ /* Short term pics firt. */
+ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ return -1;
+ else
+ return 1;
+ }
+
+ /*
+ * Short term pics in descending pic num order, long term ones in
+ * ascending order.
+ */
+ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ return b->frame_num - a->frame_num;
+
+ return a->pic_num - b->pic_num;
+}
+
+static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
+{
+ const struct hantro_h264_reflist_builder *builder = data;
+ const struct v4l2_h264_dpb_entry *a, *b;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+ a = &builder->dpb[idxa];
+ b = &builder->dpb[idxb];
+
+ if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
+ (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
+ /* Short term pics firt. */
+ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending pic num order. */
+ if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ return a->pic_num - b->pic_num;
+
+ poca = builder->pocs[idxa];
+ pocb = builder->pocs[idxb];
+
+ /*
+ * Short term pics with POC < cur POC first in POC descending order
+ * followed by short term pics with POC > cur POC in POC ascending
+ * order.
+ */
+ if ((poca < builder->curpoc) != (pocb < builder->curpoc))
+ return POC_CMP(poca, pocb);
+ else if (poca < builder->curpoc)
+ return POC_CMP(pocb, poca);
+
+ return POC_CMP(poca, pocb);
+}
+
+static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
+{
+ const struct hantro_h264_reflist_builder *builder = data;
+ const struct v4l2_h264_dpb_entry *a, *b;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = *((u8 *)ptra);
+ idxb = *((u8 *)ptrb);
+ a = &builder->dpb[idxa];
+ b = &builder->dpb[idxb];
+
+ if ((a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) !=
+ (b->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) {
+ /* Short term pics firt. */
+ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending pic num order. */
+ if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ return a->pic_num - b->pic_num;
+
+ poca = builder->pocs[idxa];
+ pocb = builder->pocs[idxb];
+
+ /*
+ * Short term pics with POC > cur POC first in POC ascending order
+ * followed by short term pics with POC > cur POC in POC descending
+ * order.
+ */
+ if ((poca < builder->curpoc) != (pocb < builder->curpoc))
+ return POC_CMP(pocb, poca);
+ else if (poca < builder->curpoc)
+ return POC_CMP(pocb, poca);
+
+ return POC_CMP(poca, pocb);
+}
+
+static void
+build_p_ref_list(const struct hantro_h264_reflist_builder *builder,
+ u8 *reflist)
+{
+ memcpy(reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist));
+ sort_r(reflist, builder->num_valid, sizeof(*reflist),
+ p_ref_list_cmp, NULL, builder);
+}
+
+static void
+build_b_ref_lists(const struct hantro_h264_reflist_builder *builder,
+ u8 *b0_reflist, u8 *b1_reflist)
+{
+ memcpy(b0_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist));
+ sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
+ b0_ref_list_cmp, NULL, builder);
+
+ memcpy(b1_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist));
+ sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
+ b1_ref_list_cmp, NULL, builder);
+
+ if (builder->num_valid > 1 &&
+ !memcmp(b1_reflist, b0_reflist, builder->num_valid))
+ swap(b1_reflist[0], b1_reflist[1]);
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->top_field_order_cnt == b->top_field_order_cnt &&
+ a->bottom_field_order_cnt == b->bottom_field_order_cnt;
+}
+
+static void update_dpb(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_param;
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ dec_param = ctx->h264_dec.ctrls.decode;
+
+ /* Disable all entries by default. */
+ for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
+ ctx->h264_dec.dpb[i].flags &= ~V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries which are not used yet.
+ */
+ for_each_clear_bit(j, used, ARRAY_SIZE(ctx->h264_dec.dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ if (cdpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE ||
+ !dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(ctx->h264_dec.dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(ctx->h264_dec.dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(ctx->h264_dec.dpb)))
+ return;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
+{
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ struct vb2_buffer *buf;
+ int buf_idx = -1;
+
+ if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ buf_idx = vb2_find_timestamp(cap_q,
+ dpb[dpb_idx].reference_ts, 0);
+
+ if (buf_idx >= 0) {
+ buf = vb2_get_buffer(cap_q, buf_idx);
+ } else {
+ struct vb2_v4l2_buffer *dst_buf;
+
+ /*
+ * If a DPB entry is unused or invalid, address of current
+ * destination buffer is returned.
+ */
+ dst_buf = hantro_get_dst_buf(ctx);
+ buf = &dst_buf->vb2_buf;
+ }
+
+ return buf;
+}
+
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_h264_dec_hw_ctx *h264_ctx = &ctx->h264_dec;
+ struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
+ struct hantro_h264_reflist_builder reflist_builder;
+
+ hantro_prepare_run(ctx);
+
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
+ ctrls->decode =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode))
+ return -EINVAL;
+
+ ctrls->slices =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
+ if (WARN_ON(!ctrls->slices))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ /* Update the DPB with new refs. */
+ update_dpb(ctx);
+
+ /* Prepare data in memory. */
+ prepare_table(ctx);
+
+ /* Build the P/B{0,1} ref lists. */
+ init_reflist_builder(ctx, &reflist_builder);
+ build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
+ build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
+ h264_ctx->reflists.b1);
+ return 0;
+}
+
+void hantro_h264_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+
+ dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
+}
+
+int hantro_h264_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+ struct hantro_h264_dec_priv_tbl *tbl;
+ struct v4l2_pix_format_mplane pix_mp;
+
+ priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
+ GFP_KERNEL);
+ if (!priv->cpu)
+ return -ENOMEM;
+
+ priv->size = sizeof(*tbl);
+ tbl = priv->cpu;
+ memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
+
+ v4l2_fill_pixfmt_mp(&pix_mp, ctx->dst_fmt.pixelformat,
+ ctx->dst_fmt.width, ctx->dst_fmt.height);
+ h264_dec->pic_size = pix_mp.plane_fmt[0].sizeimage;
+
+ return 0;
+}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 3c361c2e9b88..2fab655bf098 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -11,9 +11,13 @@
#include <linux/interrupt.h>
#include <linux/v4l2-controls.h>
+#include <media/h264-ctrls.h>
#include <media/mpeg2-ctrls.h>
+#include <media/vp8-ctrls.h>
#include <media/videobuf2-core.h>
+#define DEC_8190_ALIGN_MASK 0x07U
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -39,6 +43,54 @@ struct hantro_jpeg_enc_hw_ctx {
struct hantro_aux_buf bounce_buffer;
};
+/* Max. number of entries in the DPB (HW limitation). */
+#define HANTRO_H264_DPB_SIZE 16
+
+/**
+ * struct hantro_h264_dec_ctrls
+ * @decode: Decode params
+ * @scaling: Scaling info
+ * @slice: Slice params
+ * @sps: SPS info
+ * @pps: PPS info
+ */
+struct hantro_h264_dec_ctrls {
+ const struct v4l2_ctrl_h264_decode_params *decode;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling;
+ const struct v4l2_ctrl_h264_slice_params *slices;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+};
+
+/**
+ * struct hantro_h264_dec_reflists
+ * @p: P reflist
+ * @b0: B0 reflist
+ * @b1: B1 reflist
+ */
+struct hantro_h264_dec_reflists {
+ u8 p[HANTRO_H264_DPB_SIZE];
+ u8 b0[HANTRO_H264_DPB_SIZE];
+ u8 b1[HANTRO_H264_DPB_SIZE];
+};
+
+/**
+ * struct hantro_h264_dec_hw_ctx
+ * @priv: Private auxiliary buffer for hardware.
+ * @dpb: DPB
+ * @reflists: P/B0/B1 reflists
+ * @ctrls: V4L2 controls attached to a run
+ * @pic_size: Size in bytes of decoded picture, this is needed
+ * to pass the location of motion vectors.
+ */
+struct hantro_h264_dec_hw_ctx {
+ struct hantro_aux_buf priv;
+ struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
+ struct hantro_h264_dec_reflists reflists;
+ struct hantro_h264_dec_ctrls ctrls;
+ size_t pic_size;
+};
+
/**
* struct hantro_mpeg2_dec_hw_ctx
* @qtable: Quantization table
@@ -48,6 +100,16 @@ struct hantro_mpeg2_dec_hw_ctx {
};
/**
+ * struct hantro_vp8d_hw_ctx
+ * @segment_map: Segment map buffer.
+ * @prob_tbl: Probability table buffer.
+ */
+struct hantro_vp8_dec_hw_ctx {
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf prob_tbl;
+};
+
+/**
* struct hantro_codec_ops - codec mode specific operations
*
* @init: If needed, can be used for initialization.
@@ -82,16 +144,27 @@ extern const struct hantro_variant rk3399_vpu_variant;
extern const struct hantro_variant rk3328_vpu_variant;
extern const struct hantro_variant rk3288_vpu_variant;
+extern const u32 hantro_vp8_dec_mc_filter[8][6];
+
void hantro_watchdog(struct work_struct *work);
void hantro_run(struct hantro_ctx *ctx);
void hantro_irq_done(struct hantro_dev *vpu, unsigned int bytesused,
enum vb2_buffer_state result);
+void hantro_prepare_run(struct hantro_ctx *ctx);
+void hantro_finish_run(struct hantro_ctx *ctx);
void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
+struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
+void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_h264_dec_init(struct hantro_ctx *ctx);
+void hantro_h264_dec_exit(struct hantro_ctx *ctx);
+
void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
@@ -99,4 +172,11 @@ void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
+void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx);
+void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx);
+int hantro_vp8_dec_init(struct hantro_ctx *ctx);
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx);
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr);
+
#endif /* HANTRO_HW_H_ */
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 68f45ee66821..3dae52abb96c 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -239,6 +239,15 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
/* Fill remaining fields */
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
+ /*
+ * The H264 decoder needs extra space on the output buffers
+ * to store motion vectors. This is needed for reference
+ * frames.
+ */
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+ pix_mp->plane_fmt[0].sizeimage +=
+ 128 * DIV_ROUND_UP(pix_mp->width, 16) *
+ DIV_ROUND_UP(pix_mp->height, 16);
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -344,6 +353,8 @@ hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
break;
case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ case V4L2_PIX_FMT_H264_SLICE:
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
break;
default:
diff --git a/drivers/staging/media/hantro/hantro_vp8.c b/drivers/staging/media/hantro/hantro_vp8.c
new file mode 100644
index 000000000000..0e02d147b189
--- /dev/null
+++ b/drivers/staging/media/hantro/hantro_vp8.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+/*
+ * probs table with packed
+ */
+struct vp8_prob_tbl_packed {
+ u8 prob_mb_skip_false;
+ u8 prob_intra;
+ u8 prob_ref_last;
+ u8 prob_ref_golden;
+ u8 prob_segment[3];
+ u8 padding0;
+
+ u8 prob_luma_16x16_pred_mode[4];
+ u8 prob_chroma_pred_mode[3];
+ u8 padding1;
+
+ /* mv prob */
+ u8 prob_mv_context[2][19];
+ u8 padding2[2];
+
+ /* coeff probs */
+ u8 prob_coeffs[4][8][3][11];
+ u8 padding3[96];
+};
+
+/*
+ * filter taps taken to 7-bit precision,
+ * reference RFC6386#Page-16, filters[8][6]
+ */
+const u32 hantro_vp8_dec_mc_filter[8][6] = {
+ { 0, 0, 128, 0, 0, 0 },
+ { 0, -6, 123, 12, -1, 0 },
+ { 2, -11, 108, 36, -8, 1 },
+ { 0, -9, 93, 50, -6, 0 },
+ { 3, -16, 77, 77, -16, 3 },
+ { 0, -6, 50, 93, -9, 0 },
+ { 1, -8, 36, 108, -11, 2 },
+ { 0, -1, 12, 123, -6, 0 }
+};
+
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_entropy_header *entropy = &hdr->entropy_header;
+ u32 i, j, k;
+ u8 *dst;
+
+ /* first probs */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+
+ dst[0] = hdr->prob_skip_false;
+ dst[1] = hdr->prob_intra;
+ dst[2] = hdr->prob_last;
+ dst[3] = hdr->prob_gf;
+ dst[4] = hdr->segment_header.segment_probs[0];
+ dst[5] = hdr->segment_header.segment_probs[1];
+ dst[6] = hdr->segment_header.segment_probs[2];
+ dst[7] = 0;
+
+ dst += 8;
+ dst[0] = entropy->y_mode_probs[0];
+ dst[1] = entropy->y_mode_probs[1];
+ dst[2] = entropy->y_mode_probs[2];
+ dst[3] = entropy->y_mode_probs[3];
+ dst[4] = entropy->uv_mode_probs[0];
+ dst[5] = entropy->uv_mode_probs[1];
+ dst[6] = entropy->uv_mode_probs[2];
+ dst[7] = 0; /*unused */
+
+ /* mv probs */
+ dst += 8;
+ dst[0] = entropy->mv_probs[0][0]; /* is short */
+ dst[1] = entropy->mv_probs[1][0];
+ dst[2] = entropy->mv_probs[0][1]; /* sign */
+ dst[3] = entropy->mv_probs[1][1];
+ dst[4] = entropy->mv_probs[0][8 + 9];
+ dst[5] = entropy->mv_probs[0][9 + 9];
+ dst[6] = entropy->mv_probs[1][8 + 9];
+ dst[7] = entropy->mv_probs[1][9 + 9];
+ dst += 8;
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 8; j += 4) {
+ dst[0] = entropy->mv_probs[i][j + 9 + 0];
+ dst[1] = entropy->mv_probs[i][j + 9 + 1];
+ dst[2] = entropy->mv_probs[i][j + 9 + 2];
+ dst[3] = entropy->mv_probs[i][j + 9 + 3];
+ dst += 4;
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ dst[0] = entropy->mv_probs[i][0 + 2];
+ dst[1] = entropy->mv_probs[i][1 + 2];
+ dst[2] = entropy->mv_probs[i][2 + 2];
+ dst[3] = entropy->mv_probs[i][3 + 2];
+ dst[4] = entropy->mv_probs[i][4 + 2];
+ dst[5] = entropy->mv_probs[i][5 + 2];
+ dst[6] = entropy->mv_probs[i][6 + 2];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+
+ /* coeff probs (header part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 7);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][0];
+ dst[1] = entropy->coeff_probs[i][j][k][1];
+ dst[2] = entropy->coeff_probs[i][j][k][2];
+ dst[3] = entropy->coeff_probs[i][j][k][3];
+ dst += 4;
+ }
+ }
+ }
+
+ /* coeff probs (footer part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 55);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][4];
+ dst[1] = entropy->coeff_probs[i][j][k][5];
+ dst[2] = entropy->coeff_probs[i][j][k][6];
+ dst[3] = entropy->coeff_probs[i][j][k][7];
+ dst[4] = entropy->coeff_probs[i][j][k][8];
+ dst[5] = entropy->coeff_probs[i][j][k][9];
+ dst[6] = entropy->coeff_probs[i][j][k][10];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+ }
+ }
+}
+
+int hantro_vp8_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_aux_buf *aux_buf;
+ unsigned int mb_width, mb_height;
+ size_t segment_map_size;
+ int ret;
+
+ /* segment map table size calculation */
+ mb_width = DIV_ROUND_UP(ctx->dst_fmt.width, 16);
+ mb_height = DIV_ROUND_UP(ctx->dst_fmt.height, 16);
+ segment_map_size = round_up(DIV_ROUND_UP(mb_width * mb_height, 4), 64);
+
+ /*
+ * In context init the dma buffer for segment map must be allocated.
+ * And the data in segment map buffer must be set to all zero.
+ */
+ aux_buf = &ctx->vp8_dec.segment_map;
+ aux_buf->size = segment_map_size;
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu)
+ return -ENOMEM;
+
+ /*
+ * Allocate probability table buffer,
+ * total 1208 bytes, 4K page is far enough.
+ */
+ aux_buf = &ctx->vp8_dec.prob_tbl;
+ aux_buf->size = sizeof(struct vp8_prob_tbl_packed);
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu) {
+ ret = -ENOMEM;
+ goto err_free_seg_map;
+ }
+
+ return 0;
+
+err_free_seg_map:
+ dma_free_coherent(vpu->dev, ctx->vp8_dec.segment_map.size,
+ ctx->vp8_dec.segment_map.cpu,
+ ctx->vp8_dec.segment_map.dma);
+
+ return ret;
+}
+
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_vp8_dec_hw_ctx *vp8_dec = &ctx->vp8_dec;
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev, vp8_dec->segment_map.size,
+ vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
+ dma_free_coherent(vpu->dev, vp8_dec->prob_tbl.size,
+ vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
+}
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index bcacc4f51093..6bfcc47d1e58 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -62,6 +62,19 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.codec_mode = HANTRO_MODE_NONE,
},
{
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = H264_MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = H264_MB_DIM,
+ },
+ },
+ {
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
@@ -74,6 +87,19 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.step_height = MPEG2_MB_DIM,
},
},
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = VP8_MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = VP8_MB_DIM,
+ },
+ },
};
static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
@@ -149,12 +175,24 @@ static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
.init = hantro_jpeg_enc_init,
.exit = hantro_jpeg_enc_exit,
},
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = rk3288_vpu_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = rk3288_vpu_dec_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = rk3288_vpu_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
};
/*
@@ -177,7 +215,8 @@ const struct hantro_variant rk3288_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rk3288_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
- .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3288_vpu_codec_ops,
.irqs = rk3288_irqs,
.num_irqs = ARRAY_SIZE(rk3288_irqs),
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
index 5718f8063542..14d14bc6b12b 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -73,6 +73,19 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.step_height = MPEG2_MB_DIM,
},
},
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = 48,
+ .max_width = 3840,
+ .step_width = VP8_MB_DIM,
+ .min_height = 48,
+ .max_height = 2160,
+ .step_height = VP8_MB_DIM,
+ },
+ },
};
static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
@@ -154,6 +167,12 @@ static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = rk3399_vpu_vp8_dec_run,
+ .reset = rk3399_vpu_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
};
/*
@@ -176,7 +195,8 @@ const struct hantro_variant rk3399_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rk3399_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
- .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
.irqs = rk3399_irqs,
.num_irqs = ARRAY_SIZE(rk3399_irqs),
@@ -184,3 +204,20 @@ const struct hantro_variant rk3399_vpu_variant = {
.clk_names = rk3399_clk_names,
.num_clocks = ARRAY_SIZE(rk3399_clk_names)
};
+
+static const struct hantro_irq rk3328_irqs[] = {
+ { "vdpu", rk3399_vdpu_irq },
+};
+
+const struct hantro_variant rk3328_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rk3328_irqs,
+ .num_irqs = ARRAY_SIZE(rk3328_irqs),
+ .init = rk3399_vpu_hw_init,
+ .clk_names = rk3399_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3399_clk_names),
+};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index ae66354d2d93..06162f569b5e 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -113,14 +113,12 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_jpeg_ctx jpeg_ctx;
- struct media_request *src_req;
u32 reg;
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
- src_req = src_buf->vb2_buf.req_obj.req;
- v4l2_ctrl_request_setup(src_req, &ctx->ctrl_handler);
+ hantro_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
@@ -157,9 +155,7 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
- v4l2_ctrl_request_complete(src_req, &ctx->ctrl_handler);
-
/* Kick the watchdog and start encoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ hantro_finish_run(ctx);
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
}
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index 8685bddfbcab..e7ba5c0441cc 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -169,12 +169,10 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
const struct v4l2_mpeg2_picture *picture;
u32 reg;
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
- /* Apply request controls if any */
- v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
- &ctx->ctrl_handler);
+ hantro_prepare_run(ctx);
slice_params = hantro_get_ctrl(ctx,
V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
@@ -254,12 +252,8 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
&dst_buf->vb2_buf,
sequence, picture, slice_params);
- /* Controls no longer in-use, we can complete them */
- v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
- &ctx->ctrl_handler);
-
/* Kick the watchdog and start decoding */
- schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ hantro_finish_run(ctx);
reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
vdpu_write(vpu, reg, VDPU_SWREG(57));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
new file mode 100644
index 000000000000..f17e32620b08
--- /dev/null
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec vp8 decode driver
+ *
+ * Copyright (C) 2014 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Copyright (C) 2015 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include <media/vp8-ctrls.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+#define VDPU_REG_DEC_CTRL0 0x0c8
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) \
+ (0x230 + ((((i) < 5) ? (i) : ((i) + 1)) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT6 0x278
+
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { VDPU_REG_ADDR_STR, 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(0), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(1), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(2), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(3), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(4), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(5), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(6), 0, 0xffffffff },
+};
+
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { VDPU_REG_FILTER_LEVEL, 18, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 12, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 6, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { VDPU_REG_FILTER_MB_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { VDPU_REG_FILTER_REF_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { VDPU_REG_VP8_QUANTER0, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER0, 0, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 0, 0x7ff },
+};
+
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { VDPU_REG_VP8_QUANTER0, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER0, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER2, 27, 0x1f },
+};
+
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { VDPU_REG_VP8_CTRL0, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 20, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 24, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 18, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 12, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 6, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][6] = {
+ {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT1, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 12, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 10, 0x3 },
+ { VDPU_REG_PRED_FLT2, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 8, 0x3},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT4, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 6, 0x3 },
+ { VDPU_REG_PRED_FLT5, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 4, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT6, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 2, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 2, 0x3 },
+ { VDPU_REG_PRED_FLT8, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 0, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT9, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 12, 0x3ff },
+ { 0, 0, 0},
+ },
+};
+
+static const struct hantro_reg vp8_dec_mb_start_bit = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 18,
+ .mask = 0x3f
+};
+
+static const struct hantro_reg vp8_dec_mb_aligned_data_len = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 0,
+ .mask = 0x3fffff
+};
+
+static const struct hantro_reg vp8_dec_num_dct_partitions = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 24,
+ .mask = 0xf
+};
+
+static const struct hantro_reg vp8_dec_stream_len = {
+ .base = VDPU_REG_STREAM_LEN,
+ .shift = 0,
+ .mask = 0xffffff
+};
+
+static const struct hantro_reg vp8_dec_mb_width = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 23,
+ .mask = 0x1ff
+};
+
+static const struct hantro_reg vp8_dec_mb_height = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 11,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_mb_width_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 3,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_mb_height_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 0,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_bool_range = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 0,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_bool_value = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 8,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_filter_disable = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 8,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_skip_mode = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 9,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_start_dec = {
+ .base = VDPU_REG_EN_FLAGS,
+ .shift = 0,
+ .mask = 1
+};
+
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ const struct v4l2_vp8_loopfilter_header *lf = &hdr->lf_header;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = VDPU_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= VDPU_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_FILTER_MB_ADJ);
+
+ if (lf->flags & V4L2_VP8_LF_HEADER_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_quantization_header *q = &hdr->quant_header;
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) +
+ src_dma, VDPU_REG_VP8_ADDR_CTRL_PART);
+ hantro_reg_write(vpu, &vp8_dec_mb_start_bit, mb_start_bits);
+ hantro_reg_write(vpu, &vp8_dec_mb_aligned_data_len, mb_size);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ hantro_reg_write(vpu, &vp8_dec_num_dct_partitions,
+ hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ hantro_reg_write(vpu, &vp8_dec_stream_len, dct_part_total_len);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int i, j;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 6; j++) {
+ if (vp8_dec_pred_bc_tap[i][j].base != 0)
+ hantro_reg_write(vpu,
+ &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j]);
+ }
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ struct vb2_queue *cap_q;
+ dma_addr_t ref;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
+
+ ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ WARN_ON(!ref && hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
+
+ ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ WARN_ON(!ref && hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT)
+ ref |= VDPU_REG_VP8_AREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(3));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame_header *hdr)
+{
+ const struct v4l2_vp8_segment_header *seg = &hdr->segment_header;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ VDPU_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = VDPU_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED) {
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP)
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_SEGMENT_VAL);
+
+ /* set output frame buffer address */
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, dst_dma, VDPU_REG_ADDR_DST);
+}
+
+void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame_header *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER);
+ if (WARN_ON(!hdr))
+ return;
+
+ /* Reset segment_map buffer in keyframe */
+ if (VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ /*
+ * Extensive testing shows that the hardware does not properly
+ * clear the internal state from previous a decoding run. This
+ * causes corruption in decoded frames for multi-instance use cases.
+ * A soft reset before programming the registers has been found
+ * to resolve those problems.
+ */
+ ctx->codec_ops->reset(ctx);
+
+ reg = VDPU_REG_CONFIG_DEC_TIMEOUT_E
+ | VDPU_REG_CONFIG_DEC_CLK_GATE_E;
+ if (!VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= VDPU_REG_DEC_CTRL0_PIC_INTER_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_EN_FLAGS);
+
+ reg = VDPU_REG_CONFIG_DEC_STRENDIAN_E
+ | VDPU_REG_CONFIG_DEC_INSWAP32_E
+ | VDPU_REG_CONFIG_DEC_STRSWAP32_E
+ | VDPU_REG_CONFIG_DEC_OUTSWAP32_E
+ | VDPU_REG_CONFIG_DEC_IN_ENDIAN
+ | VDPU_REG_CONFIG_DEC_OUT_ENDIAN;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DATA_ENDIAN);
+
+ reg = VDPU_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_AXI_CTRL);
+
+ reg = VDPU_REG_DEC_CTRL0_DEC_MODE(10);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DEC_FORMAT);
+
+ if (!(hdr->flags & V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF))
+ hantro_reg_write(vpu, &vp8_dec_skip_mode, 1);
+ if (hdr->lf_header.level == 0)
+ hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
+
+ /* Frame dimensions */
+ mb_width = VP8_MB_WIDTH(width);
+ mb_height = VP8_MB_HEIGHT(height);
+
+ hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
+ hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
+ hantro_reg_write(vpu, &vp8_dec_mb_width_ext, mb_width >> 9);
+ hantro_reg_write(vpu, &vp8_dec_mb_height_ext, mb_height >> 8);
+
+ /* Boolean decoder */
+ hantro_reg_write(vpu, &vp8_dec_bool_range, hdr->coder_state.range);
+ hantro_reg_write(vpu, &vp8_dec_bool_value, hdr->coder_state.value);
+
+ reg = vdpu_read(vpu, VDPU_REG_VP8_DCT_START_BIT);
+ if (hdr->version != 3)
+ reg |= VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= VDPU_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_DCT_START_BIT);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+ cfg_ref(ctx, hdr);
+ cfg_buffers(ctx, hdr);
+
+ hantro_finish_run(ctx);
+
+ hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
+}
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index 4c726345dc25..8f1ae50a4abd 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_IMX_MEDIA
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
+ select V4L2_MEM2MEM_DEV
help
Say yes here to enable support for video4linux media controller
driver for the i.MX5/6 SOC.
@@ -22,11 +23,11 @@ config VIDEO_IMX_CSI
A video4linux camera sensor interface driver for i.MX5/6.
config VIDEO_IMX7_CSI
- tristate "i.MX7 Camera Sensor Interface driver"
+ tristate "i.MX6UL/L / i.MX7 Camera Sensor Interface driver"
depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
default y
help
Enable support for video4linux camera sensor interface driver for
- i.MX7.
+ i.MX6UL/L or i.MX7.
endmenu
endif
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index aa6c4b4ad37e..9bd9e873ba7c 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
- imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o
+ imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o \
+ imx-media-csc-scaler.o
imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
imx-media-of.o imx-media-utils.o
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
new file mode 100644
index 000000000000..2b635ebf62d6
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
+ *
+ * Copyright (C) 2011 Pengutronix, Sascha Hauer
+ * Copyright (C) 2018 Pengutronix, Philipp Zabel
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <video/imx-ipu-v3.h>
+#include <video/imx-ipu-image-convert.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx-media.h"
+
+#define fh_to_ctx(__fh) container_of(__fh, struct ipu_csc_scaler_ctx, fh)
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct ipu_csc_scaler_priv {
+ struct imx_media_video_dev vdev;
+
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+
+ struct imx_media_dev *md;
+
+ struct mutex mutex; /* mem2mem device mutex */
+};
+
+#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
+
+/* Per-queue, driver-specific private data */
+struct ipu_csc_scaler_q_data {
+ struct v4l2_pix_format cur_fmt;
+ struct v4l2_rect rect;
+};
+
+struct ipu_csc_scaler_ctx {
+ struct ipu_csc_scaler_priv *priv;
+
+ struct v4l2_fh fh;
+ struct ipu_csc_scaler_q_data q_data[2];
+ struct ipu_image_convert_ctx *icc;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotate;
+ bool hflip;
+ bool vflip;
+ enum ipu_rotate_mode rot_mode;
+ unsigned int sequence;
+};
+
+static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[V4L2_M2M_SRC];
+ else
+ return &ctx->q_data[V4L2_M2M_DST];
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static void job_abort(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+
+ if (ctx->icc)
+ ipu_image_convert_abort(ctx->icc);
+}
+
+static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+
+ src_buf->sequence = ctx->sequence++;
+ dst_buf->sequence = src_buf->sequence;
+
+ v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+ kfree(run);
+}
+
+static void device_run(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct ipu_image_convert_run *run;
+ int ret;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ run = kzalloc(sizeof(*run), GFP_KERNEL);
+ if (!run)
+ goto err;
+
+ run->ctx = ctx->icc;
+ run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ ret = ipu_image_convert_queue(run);
+ if (ret < 0) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
+ "%s: failed to queue: %d\n", __func__, ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/*
+ * Video ioctls
+ */
+static int ipu_csc_scaler_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "imx-media-csc-scaler", sizeof(cap->driver));
+ strscpy(cap->card, "imx-media-csc-scaler", sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:imx-media-csc-scaler",
+ sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ u32 fourcc;
+ int ret;
+
+ ret = imx_media_enum_format(&fourcc, f->index, CS_SEL_ANY);
+ if (ret)
+ return ret;
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix = q_data->cur_fmt;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
+ struct ipu_image test_in, test_out;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct ipu_csc_scaler_q_data *q_data_in =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ test_out.pix = f->fmt.pix;
+ test_in.pix = q_data_in->cur_fmt;
+ } else {
+ struct ipu_csc_scaler_q_data *q_data_out =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ test_in.pix = f->fmt.pix;
+ test_out.pix = q_data_out->cur_fmt;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ test_out.pix : test_in.pix;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
+ f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
+ f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
+ f->fmt.pix.quantization = q_data->cur_fmt.quantization;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_q_data *q_data;
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: queue busy\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+
+ ret = ipu_csc_scaler_try_fmt(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ q_data->cur_fmt.width = f->fmt.pix.width;
+ q_data->cur_fmt.height = f->fmt.pix.height;
+ q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
+ q_data->cur_fmt.field = f->fmt.pix.field;
+ q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
+ q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
+
+ /* Reset cropping/composing rectangle */
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = q_data->cur_fmt.width;
+ q_data->rect.height = q_data->cur_fmt.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* Set colorimetry on the output queue */
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ /* Propagate colorimetry to the capture queue */
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ }
+
+ /*
+ * TODO: Setting colorimetry on the capture queue is currently not
+ * supported by the V4L2 API
+ */
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->target == V4L2_SEL_TGT_CROP ||
+ s->target == V4L2_SEL_TGT_COMPOSE) {
+ s->r = q_data->rect;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->cur_fmt.width;
+ s->r.height = q_data->cur_fmt.height;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+
+ /* The input's frame width to the IC must be a multiple of 8 pixels
+ * When performing resizing the frame width must be multiple of burst
+ * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
+ */
+ if (s->flags & V4L2_SEL_FLAG_GE)
+ s->r.width = round_up(s->r.width, 8);
+ if (s->flags & V4L2_SEL_FLAG_LE)
+ s->r.width = round_down(s->r.width, 8);
+ s->r.width = clamp_t(unsigned int, s->r.width, 8,
+ round_down(q_data->cur_fmt.width, 8));
+ s->r.height = clamp_t(unsigned int, s->r.height, 1,
+ q_data->cur_fmt.height);
+ s->r.left = clamp_t(unsigned int, s->r.left, 0,
+ q_data->cur_fmt.width - s->r.width);
+ s->r.top = clamp_t(unsigned int, s->r.top, 0,
+ q_data->cur_fmt.height - s->r.height);
+
+ /* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
+ q_data->rect = s->r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
+ .vidioc_querycap = ipu_csc_scaler_querycap,
+
+ .vidioc_enum_fmt_vid_cap = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_cap = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_cap = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_cap = ipu_csc_scaler_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_out = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_out = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_out = ipu_csc_scaler_s_fmt,
+
+ .vidioc_g_selection = ipu_csc_scaler_g_selection,
+ .vidioc_s_selection = ipu_csc_scaler_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ size = q_data->cur_fmt.sizeimage;
+
+ *nbuffers = count;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
+ count, size);
+
+ return 0;
+}
+
+static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned long size;
+
+ dev_dbg(ctx->priv->dev, "type: %d\n", vq->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(ctx->priv->dev, "%s: field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->cur_fmt.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_dbg(ctx->priv->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void ipu_image_from_q_data(struct ipu_image *im,
+ struct ipu_csc_scaler_q_data *q_data)
+{
+ struct v4l2_pix_format *fmt = &q_data->cur_fmt;
+
+ im->pix = *fmt;
+ if (fmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ if (fmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ im->rect = q_data->rect;
+}
+
+static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct ipu_soc *ipu = priv->md->ipu[0];
+ struct ipu_csc_scaler_q_data *q_data;
+ struct vb2_queue *other_q;
+ struct ipu_image in, out;
+
+ other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!vb2_is_streaming(other_q))
+ return 0;
+
+ if (ctx->icc) {
+ v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
+ ipu_image_convert_unprepare(ctx->icc);
+ }
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ipu_image_from_q_data(&in, q_data);
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ipu_image_from_q_data(&out, q_data);
+
+ ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
+ ctx->rot_mode,
+ ipu_ic_pp_complete, ctx);
+ if (IS_ERR(ctx->icc)) {
+ struct vb2_v4l2_buffer *buf;
+ int ret = PTR_ERR(ctx->icc);
+
+ ctx->icc = NULL;
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
+ __func__, ret);
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *buf;
+
+ if (ctx->icc) {
+ ipu_image_convert_unprepare(ctx->icc);
+ ctx->icc = NULL;
+ }
+
+ ctx->sequence = 0;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops ipu_csc_scaler_qops = {
+ .queue_setup = ipu_csc_scaler_queue_setup,
+ .buf_prepare = ipu_csc_scaler_buf_prepare,
+ .buf_queue = ipu_csc_scaler_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = ipu_csc_scaler_start_streaming,
+ .stop_streaming = ipu_csc_scaler_stop_streaming,
+};
+
+static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct ipu_csc_scaler_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &ipu_csc_scaler_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->priv->mutex;
+ src_vq->dev = ctx->priv->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &ipu_csc_scaler_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->priv->mutex;
+ dst_vq->dev = ctx->priv->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
+ struct ipu_csc_scaler_ctx,
+ ctrl_hdlr);
+ enum ipu_rotate_mode rot_mode;
+ int rotate;
+ bool hflip, vflip;
+ int ret = 0;
+
+ rotate = ctx->rotate;
+ hflip = ctx->hflip;
+ vflip = ctx->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ rotate = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
+ if (ret)
+ return ret;
+
+ if (rot_mode != ctx->rot_mode) {
+ struct v4l2_pix_format *in_fmt, *out_fmt;
+ struct ipu_image test_in, test_out;
+
+ in_fmt = &ctx->q_data[V4L2_M2M_SRC].cur_fmt;
+ out_fmt = &ctx->q_data[V4L2_M2M_DST].cur_fmt;
+
+ test_in.pix = *in_fmt;
+ test_out.pix = *out_fmt;
+
+ if (ipu_rot_mode_is_irt(rot_mode) !=
+ ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* Switch width & height to keep aspect ratio intact */
+ test_out.pix.width = out_fmt->height;
+ test_out.pix.height = out_fmt->width;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ /* Check if output format needs to be changed */
+ if (test_in.pix.width != in_fmt->width ||
+ test_in.pix.height != in_fmt->height ||
+ test_in.pix.bytesperline != in_fmt->bytesperline ||
+ test_in.pix.sizeimage != in_fmt->sizeimage) {
+ struct vb2_queue *out_q;
+
+ out_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_busy(out_q))
+ return -EBUSY;
+ }
+
+ /* Check if capture format needs to be changed */
+ if (test_out.pix.width != out_fmt->width ||
+ test_out.pix.height != out_fmt->height ||
+ test_out.pix.bytesperline != out_fmt->bytesperline ||
+ test_out.pix.sizeimage != out_fmt->sizeimage) {
+ struct vb2_queue *cap_q;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(cap_q))
+ return -EBUSY;
+ }
+
+ *in_fmt = test_in.pix;
+ *out_fmt = test_out.pix;
+
+ ctx->rot_mode = rot_mode;
+ ctx->rotate = rotate;
+ ctx->hflip = hflip;
+ ctx->vflip = vflip;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
+ .s_ctrl = ipu_csc_scaler_s_ctrl,
+};
+
+static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ if (hdlr->error) {
+ v4l2_ctrl_handler_free(hdlr);
+ return hdlr->error;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+}
+
+#define DEFAULT_WIDTH 720
+#define DEFAULT_HEIGHT 576
+static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
+ .cur_fmt = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = DEFAULT_WIDTH,
+ .sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ },
+ .rect = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ },
+};
+
+/*
+ * File operations
+ */
+static int ipu_csc_scaler_open(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->rot_mode = IPU_ROTATE_NONE;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->priv = priv;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
+ &ipu_csc_scaler_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctx;
+ }
+
+ ret = ipu_csc_scaler_init_controls(ctx);
+ if (ret)
+ goto err_ctrls;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
+
+ ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
+ ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
+
+ dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+
+ return 0;
+
+err_ctrls:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int ipu_csc_scaler_release(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations ipu_csc_scaler_fops = {
+ .owner = THIS_MODULE,
+ .open = ipu_csc_scaler_open,
+ .release = ipu_csc_scaler_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static void ipu_csc_scaler_video_device_release(struct video_device *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = video_get_drvdata(vdev);
+
+ v4l2_m2m_release(priv->m2m_dev);
+ video_device_release(vdev);
+ kfree(priv);
+}
+
+static const struct video_device ipu_csc_scaler_videodev_template = {
+ .name = "ipu_ic_pp csc/scaler",
+ .fops = &ipu_csc_scaler_fops,
+ .ioctl_ops = &ipu_csc_scaler_ioctl_ops,
+ .minor = -1,
+ .release = ipu_csc_scaler_video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = vdev->vfd;
+ int ret;
+
+ vfd->v4l2_dev = &priv->md->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ return 0;
+}
+
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ mutex_lock(&priv->mutex);
+
+ video_unregister_device(vfd);
+
+ mutex_unlock(&priv->mutex);
+}
+
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *md)
+{
+ struct ipu_csc_scaler_priv *priv;
+ struct video_device *vfd;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->md = md;
+ priv->dev = md->md.dev;
+
+ mutex_init(&priv->mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ ret = -ENOMEM;
+ goto err_vfd;
+ }
+
+ *vfd = ipu_csc_scaler_videodev_template;
+ vfd->lock = &priv->mutex;
+ priv->vdev.vfd = vfd;
+
+ INIT_LIST_HEAD(&priv->vdev.list);
+
+ video_set_drvdata(vfd, priv);
+
+ priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(priv->m2m_dev)) {
+ ret = PTR_ERR(priv->m2m_dev);
+ v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
+ ret);
+ goto err_m2m;
+ }
+
+ return &priv->vdev;
+
+err_m2m:
+ video_set_drvdata(vfd, NULL);
+err_vfd:
+ kfree(priv);
+ return ERR_PTR(ret);
+}
+
+MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 6ac371f6e971..2c3c2adca683 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -38,9 +38,34 @@ static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
}
/* async subdev complete notifier */
+static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ /* call the imx5/6/7 common probe completion handler */
+ ret = imx_media_probe_complete(notifier);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ if (IS_ERR(imxmd->m2m_vdev)) {
+ ret = PTR_ERR(imxmd->m2m_vdev);
+ goto unlock;
+ }
+
+ ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+/* async subdev complete notifier */
static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
.bound = imx_media_subdev_bound,
- .complete = imx_media_probe_complete,
+ .complete = imx6_media_probe_complete,
};
static int imx_media_probe(struct platform_device *pdev)
@@ -85,6 +110,7 @@ static int imx_media_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&imxmd->notifier);
imx_media_unregister_ipu_internal_subdevs(imxmd);
v4l2_async_notifier_cleanup(&imxmd->notifier);
+ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
index cb1e4cdd5079..d4237e1a4241 100644
--- a/drivers/staging/media/imx/imx-media-internal-sd.c
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -210,6 +210,10 @@ int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
mutex_lock(&imxmd->mutex);
+ /* record this IPU */
+ if (!imxmd->ipu[ipu_id])
+ imxmd->ipu[ipu_id] = ipu;
+
/* register the synchronous subdevs */
for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
intsd = &int_subdev[i];
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 9088c4b720a3..4cc6a7462ae2 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -841,7 +841,7 @@ find_pipeline_entity(struct media_entity *start, u32 grp_id,
if (sd->grp_id & grp_id)
return &sd->entity;
} else if (buftype && is_media_entity_v4l2_video_device(start)) {
- vfd = media_entity_to_video_device(pad->entity);
+ vfd = media_entity_to_video_device(start);
if (buftype == vfd->queue->type)
return &vfd->entity;
}
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 4d124a86b358..11861191324a 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -136,9 +136,15 @@ struct imx_media_dev {
/* master video device list */
struct list_head vdev_list;
+ /* IPUs this media driver control, valid after subdevs bound */
+ struct ipu_soc *ipu[2];
+
/* for async subdev registration */
struct v4l2_async_notifier notifier;
+ /* IC scaler/CSC mem2mem video device */
+ struct imx_media_video_dev *m2m_vdev;
+
/* the IPU internal subdev's registered synchronously */
struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
};
@@ -269,6 +275,12 @@ struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
+/* imx-media-csc-scaler.c */
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
+
/* subdev group ids */
#define IMX_MEDIA_GRP_ID_CSI2 BIT(8)
#define IMX_MEDIA_GRP_ID_CSI BIT(9)
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index f29e28df36ed..bfa4b254c4e4 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -243,7 +243,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
}
/* Waits for low-power LP-11 state on data and clock lanes. */
-static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
{
u32 mask, reg;
int ret;
@@ -254,11 +254,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
(reg & mask) == mask, 0, 500000);
if (ret) {
- v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
- return ret;
+ v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
+ v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
}
-
- return 0;
}
/* Wait for active clock on the clock lane. */
@@ -316,9 +314,7 @@ static int csi2_start(struct csi2_dev *csi2)
csi2_enable(csi2, true);
/* Step 5 */
- ret = csi2_dphy_wait_stopstate(csi2);
- if (ret)
- goto err_assert_reset;
+ csi2_dphy_wait_stopstate(csi2);
/* Step 6 */
ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 500b4c08d967..bfd6b5fbf484 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * V4L2 Capture CSI Subdev for Freescale i.MX7 SOC
+ * V4L2 Capture CSI Subdev for Freescale i.MX6UL/L / i.MX7 SOC
*
* Copyright (c) 2019 Linaro Ltd
*
@@ -765,6 +765,7 @@ static int imx7_csi_configure(struct imx7_csi *csi)
struct v4l2_pix_format *out_pix = &vdev->fmt.fmt.pix;
__u32 in_code = csi->format_mbus[IMX7_CSI_PAD_SINK].code;
u32 cr1, cr18;
+ int width = out_pix->width;
if (out_pix->field == V4L2_FIELD_INTERLACED) {
imx7_csi_deinterlace_enable(csi, true);
@@ -774,15 +775,27 @@ static int imx7_csi_configure(struct imx7_csi *csi)
imx7_csi_buf_stride_set(csi, 0);
}
- imx7_csi_set_imagpara(csi, out_pix->width, out_pix->height);
+ cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ if (!csi->is_csi2) {
+ if (out_pix->pixelformat == V4L2_PIX_FMT_UYVY ||
+ out_pix->pixelformat == V4L2_PIX_FMT_YUYV)
+ width *= 2;
+
+ imx7_csi_set_imagpara(csi, width, out_pix->height);
+
+ cr18 |= (BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+ BIT_BASEADDR_CHG_ERR_EN);
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
- if (!csi->is_csi2)
return 0;
+ }
+
+ imx7_csi_set_imagpara(csi, width, out_pix->height);
cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
cr1 &= ~BIT_GCLK_MODE;
- cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
cr18 &= BIT_MIPI_DATA_FORMAT_MASK;
cr18 |= BIT_DATA_FROM_MIPI;
@@ -817,11 +830,9 @@ static void imx7_csi_enable(struct imx7_csi *csi)
{
imx7_csi_sw_reset(csi);
- if (csi->is_csi2) {
- imx7_csi_dmareq_rff_enable(csi);
- imx7_csi_hw_enable_irq(csi);
- imx7_csi_hw_enable(csi);
- }
+ imx7_csi_dmareq_rff_enable(csi);
+ imx7_csi_hw_enable_irq(csi);
+ imx7_csi_hw_enable(csi);
}
static void imx7_csi_disable(struct imx7_csi *csi)
@@ -1194,10 +1205,8 @@ static int imx7_csi_probe(struct platform_device *pdev)
}
csi->irq = platform_get_irq(pdev, 0);
- if (csi->irq < 0) {
- dev_err(dev, "Missing platform resources data\n");
+ if (csi->irq < 0)
return csi->irq;
- }
csi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->regbase))
@@ -1302,6 +1311,7 @@ static int imx7_csi_remove(struct platform_device *pdev)
static const struct of_device_id imx7_csi_of_match[] = {
{ .compatible = "fsl,imx7-csi" },
+ { .compatible = "fsl,imx6ul-csi" },
{ },
};
MODULE_DEVICE_TABLE(of, imx7_csi_of_match);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index d1cdf011c8f1..73d8354e618c 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -975,10 +975,8 @@ static int mipi_csis_probe(struct platform_device *pdev)
return PTR_ERR(state->regs);
state->irq = platform_get_irq(pdev, 0);
- if (state->irq < 0) {
- dev_err(dev, "Failed to get irq\n");
+ if (state->irq < 0)
return state->irq;
- }
ret = mipi_csis_clk_get(state);
if (ret < 0)
diff --git a/drivers/staging/media/ipu3/ipu3-tables.h b/drivers/staging/media/ipu3/ipu3-tables.h
index a1bf3286f380..9f719c48b432 100644
--- a/drivers/staging/media/ipu3/ipu3-tables.h
+++ b/drivers/staging/media/ipu3/ipu3-tables.h
@@ -4,6 +4,8 @@
#ifndef __IPU3_TABLES_H
#define __IPU3_TABLES_H
+#include <linux/bitops.h>
+
#include "ipu3-abi.h"
#define IMGU_BDS_GRANULARITY 32 /* Downscaling granularity */
@@ -12,7 +14,7 @@
#define IMGU_SCALER_DOWNSCALE_4TAPS_LEN 128
#define IMGU_SCALER_DOWNSCALE_2TAPS_LEN 64
-#define IMGU_SCALER_FP ((u32)1 << 31) /* 1.0 in fixed point */
+#define IMGU_SCALER_FP BIT(31) /* 1.0 in fixed point */
#define IMGU_XNR3_VMEM_LUT_LEN 16
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
index a7372395a101..06a61f31ca50 100644
--- a/drivers/staging/media/ipu3/ipu3.c
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -778,8 +778,7 @@ out:
static int __maybe_unused imgu_resume(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+ struct imgu_device *imgu = dev_get_drvdata(dev);
int r = 0;
unsigned int pipe;
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
index 3a21a8cec799..95102a4bdc62 100644
--- a/drivers/staging/media/meson/vdec/esparser.c
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -301,10 +301,8 @@ int esparser_init(struct platform_device *pdev, struct amvdec_core *core)
int irq;
irq = platform_get_irq_byname(pdev, "esparser");
- if (irq < 0) {
- dev_err(dev, "Failed getting ESPARSER IRQ from dtb\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, esparser_isr, IRQF_SHARED,
"esparserirq", core);
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index c8be1db532ab..1a966cb2f3a6 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1276,7 +1276,6 @@ static int iss_probe(struct platform_device *pdev)
/* Interrupt */
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
- dev_err(iss->dev, "No IRQ resource\n");
ret = -ENODEV;
goto error_iss;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c307707480f7..54144dc9f509 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -31,61 +31,61 @@
static struct iss_format_info formats[] = {
{ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_GREY, 8, "Greyscale 8 bpp", },
+ V4L2_PIX_FMT_GREY, 8, },
{ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_Y10, 10, "Greyscale 10 bpp", },
+ V4L2_PIX_FMT_Y10, 10, },
{ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
- V4L2_PIX_FMT_Y12, 12, "Greyscale 12 bpp", },
+ V4L2_PIX_FMT_Y12, 12, },
{ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR8, 8, "BGGR Bayer 8 bpp", },
+ V4L2_PIX_FMT_SBGGR8, 8, },
{ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG8, 8, "GBRG Bayer 8 bpp", },
+ V4L2_PIX_FMT_SGBRG8, 8, },
{ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG8, 8, "GRBG Bayer 8 bpp", },
+ V4L2_PIX_FMT_SGRBG8, 8, },
{ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB8, 8, "RGGB Bayer 8 bpp", },
+ V4L2_PIX_FMT_SRGGB8, 8, },
{ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
MEDIA_BUS_FMT_SGRBG10_1X10, 0,
- V4L2_PIX_FMT_SGRBG10DPCM8, 8, "GRBG Bayer 10 bpp DPCM8", },
+ V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
{ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR10, 10, "BGGR Bayer 10 bpp", },
+ V4L2_PIX_FMT_SBGGR10, 10, },
{ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG10, 10, "GBRG Bayer 10 bpp", },
+ V4L2_PIX_FMT_SGBRG10, 10, },
{ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG10, 10, "GRBG Bayer 10 bpp", },
+ V4L2_PIX_FMT_SGRBG10, 10, },
{ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB10, 10, "RGGB Bayer 10 bpp", },
+ V4L2_PIX_FMT_SRGGB10, 10, },
{ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
- V4L2_PIX_FMT_SBGGR12, 12, "BGGR Bayer 12 bpp", },
+ V4L2_PIX_FMT_SBGGR12, 12, },
{ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
- V4L2_PIX_FMT_SGBRG12, 12, "GBRG Bayer 12 bpp", },
+ V4L2_PIX_FMT_SGBRG12, 12, },
{ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
- V4L2_PIX_FMT_SGRBG12, 12, "GRBG Bayer 12 bpp", },
+ V4L2_PIX_FMT_SGRBG12, 12, },
{ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
- V4L2_PIX_FMT_SRGGB12, 12, "RGGB Bayer 12 bpp", },
+ V4L2_PIX_FMT_SRGGB12, 12, },
{ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_UYVY8_1X16, 0,
- V4L2_PIX_FMT_UYVY, 16, "YUV 4:2:2 (UYVY)", },
+ V4L2_PIX_FMT_UYVY, 16, },
{ MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV8_1X16, 0,
- V4L2_PIX_FMT_YUYV, 16, "YUV 4:2:2 (YUYV)", },
+ V4L2_PIX_FMT_YUYV, 16, },
{ MEDIA_BUS_FMT_YUYV8_1_5X8, MEDIA_BUS_FMT_YUYV8_1_5X8,
MEDIA_BUS_FMT_YUYV8_1_5X8, 0,
- V4L2_PIX_FMT_NV12, 8, "YUV 4:2:0 (NV12)", },
+ V4L2_PIX_FMT_NV12, 8, },
};
const struct iss_format_info *
@@ -563,8 +563,6 @@ iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
if (index == 0) {
f->pixelformat = info->pixelformat;
- strscpy(f->description, info->description,
- sizeof(f->description));
return 0;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index f22489edb562..8b3dd92021e1 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -36,7 +36,6 @@ struct v4l2_pix_format;
* shifted to be 8 bits per pixel. =0 if format is not shiftable.
* @pixelformat: V4L2 pixel format FCC identifier
* @bpp: Bits per pixel
- * @description: Human-readable format description
*/
struct iss_format_info {
u32 code;
@@ -45,7 +44,6 @@ struct iss_format_info {
u32 flavor;
u32 pixelformat;
unsigned int bpp;
- const char *description;
};
enum iss_pipeline_stream_state {
diff --git a/drivers/staging/media/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c
index a6232dcd59bc..7b9448e3c9ba 100644
--- a/drivers/staging/media/soc_camera/soc_camera.c
+++ b/drivers/staging/media/soc_camera/soc_camera.c
@@ -869,8 +869,6 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
format = icd->user_formats[f->index].host_fmt;
- if (format->name)
- strscpy(f->description, format->name, sizeof(f->description));
f->pixelformat = format->fourcc;
return 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 370937edfc14..2d3ea8b74dfd 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -29,47 +29,72 @@
static const struct cedrus_control cedrus_controls[] = {
{
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
- .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ },
.codec = CEDRUS_CODEC_MPEG2,
.required = true,
},
{
- .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
- .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ },
.codec = CEDRUS_CODEC_MPEG2,
.required = false,
},
{
- .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
- .elem_size = sizeof(struct v4l2_ctrl_h264_decode_params),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
+ },
.codec = CEDRUS_CODEC_H264,
.required = true,
},
{
- .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
- .elem_size = sizeof(struct v4l2_ctrl_h264_slice_params),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
+ },
.codec = CEDRUS_CODEC_H264,
.required = true,
},
{
- .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
- .elem_size = sizeof(struct v4l2_ctrl_h264_sps),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS,
+ },
.codec = CEDRUS_CODEC_H264,
.required = true,
},
{
- .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
- .elem_size = sizeof(struct v4l2_ctrl_h264_pps),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PPS,
+ },
.codec = CEDRUS_CODEC_H264,
.required = true,
},
{
- .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
- .elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix),
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
+ },
.codec = CEDRUS_CODEC_H264,
.required = true,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
+ .max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H264,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
+ .max = V4L2_MPEG_VIDEO_H264_START_CODE_NONE,
+ .def = V4L2_MPEG_VIDEO_H264_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H264,
+ .required = false,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -106,12 +131,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
return -ENOMEM;
for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
- struct v4l2_ctrl_config cfg = {};
-
- cfg.elem_size = cedrus_controls[i].elem_size;
- cfg.id = cedrus_controls[i].id;
-
- ctrl = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ ctrl = v4l2_ctrl_new_custom(hdl, &cedrus_controls[i].cfg,
+ NULL);
if (hdl->error) {
v4l2_err(&dev->v4l2_dev,
"Failed to create new custom control\n");
@@ -178,7 +199,7 @@ static int cedrus_request_validate(struct media_request *req)
continue;
ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
- cedrus_controls[i].id);
+ cedrus_controls[i].cfg.id);
if (!ctrl_test) {
v4l2_info(&ctx->dev->v4l2_dev,
"Missing required codec control\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 3f476d0fd981..2f017a651848 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -49,8 +49,7 @@ enum cedrus_h264_pic_type {
};
struct cedrus_control {
- u32 id;
- u32 elem_size;
+ struct v4l2_ctrl_config cfg;
enum cedrus_codec codec;
unsigned char required:1;
};
@@ -100,8 +99,6 @@ struct cedrus_ctx {
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl **ctrls;
- struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME];
-
union {
struct {
void *mv_col_buf;
@@ -187,7 +184,7 @@ static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
if (index < 0)
return 0;
- buf = ctx->dst_bufs[index];
+ buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index bdad87eb9d79..56ca4c9ad01c 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -46,7 +46,7 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
break;
- case V4L2_PIX_FMT_H264_SLICE_RAW:
+ case V4L2_PIX_FMT_H264_SLICE:
run.h264.decode_params = cedrus_find_control_data(ctx,
V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
run.h264.pps = cedrus_find_control_data(ctx,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index a30bb283f69f..d6a782703c9b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -118,7 +118,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
if (buf_idx < 0)
continue;
- cedrus_buf = vb2_to_cedrus_buffer(ctx->dst_bufs[buf_idx]);
+ cedrus_buf = vb2_to_cedrus_buffer(cap_q->bufs[buf_idx]);
position = cedrus_buf->codec.h264.position;
used_dpbs |= BIT(position);
@@ -193,7 +193,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
if (buf_idx < 0)
continue;
- ref_buf = to_vb2_v4l2_buffer(ctx->dst_bufs[buf_idx]);
+ ref_buf = to_vb2_v4l2_buffer(cap_q->bufs[buf_idx]);
cedrus_buf = vb2_v4l2_to_cedrus_buffer(ref_buf);
position = cedrus_buf->codec.h264.position;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index c34aec7c6e40..a942cd9bed57 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -79,9 +79,6 @@ void cedrus_dst_format_set(struct cedrus_dev *dev,
reg = VE_PRIMARY_OUT_FMT_NV12;
cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
- reg = VE_CHROMA_BUF_LEN_SDRT(chroma_size / 2);
- cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
-
reg = chroma_size / 2;
cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
@@ -160,11 +157,8 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
dev->capabilities = variant->capabilities;
irq_dec = platform_get_irq(dev->pdev, 0);
- if (irq_dec <= 0) {
- dev_err(dev->dev, "Failed to get IRQ\n");
-
+ if (irq_dec <= 0)
return irq_dec;
- }
ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
0, dev_name(dev->dev), dev);
if (ret) {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index 3e9931416e45..ddd29788d685 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -110,7 +110,7 @@
#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(0, 7))
+#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index e2b530b1a956..eeee3efd247b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -38,7 +38,7 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
- .pixelformat = V4L2_PIX_FMT_H264_SLICE_RAW,
+ .pixelformat = V4L2_PIX_FMT_H264_SLICE,
.directions = CEDRUS_DECODE_SRC,
},
{
@@ -104,7 +104,7 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
- case V4L2_PIX_FMT_H264_SLICE_RAW:
+ case V4L2_PIX_FMT_H264_SLICE:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
@@ -411,26 +411,6 @@ static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
}
}
-static int cedrus_buf_init(struct vb2_buffer *vb)
-{
- struct vb2_queue *vq = vb->vb2_queue;
- struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
-
- if (!V4L2_TYPE_IS_OUTPUT(vq->type))
- ctx->dst_bufs[vb->index] = vb;
-
- return 0;
-}
-
-static void cedrus_buf_cleanup(struct vb2_buffer *vb)
-{
- struct vb2_queue *vq = vb->vb2_queue;
- struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
-
- if (!V4L2_TYPE_IS_OUTPUT(vq->type))
- ctx->dst_bufs[vb->index] = NULL;
-}
-
static int cedrus_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -469,7 +449,7 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_MPEG2;
break;
- case V4L2_PIX_FMT_H264_SLICE_RAW:
+ case V4L2_PIX_FMT_H264_SLICE:
ctx->current_codec = CEDRUS_CODEC_H264;
break;
@@ -517,8 +497,6 @@ static void cedrus_buf_request_complete(struct vb2_buffer *vb)
static struct vb2_ops cedrus_qops = {
.queue_setup = cedrus_queue_setup,
.buf_prepare = cedrus_buf_prepare,
- .buf_init = cedrus_buf_init,
- .buf_cleanup = cedrus_buf_cleanup,
.buf_queue = cedrus_buf_queue,
.buf_out_validate = cedrus_buf_out_validate,
.buf_request_complete = cedrus_buf_request_complete,
diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig
index 2e7f644ae591..ba49ea50b8c0 100644
--- a/drivers/staging/media/tegra-vde/Kconfig
+++ b/drivers/staging/media/tegra-vde/Kconfig
@@ -3,7 +3,7 @@ config TEGRA_VDE
tristate "NVIDIA Tegra Video Decoder Engine driver"
depends on ARCH_TEGRA || COMPILE_TEST
select DMA_SHARED_BUFFER
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA if (IOMMU_SUPPORT || COMPILE_TEST)
select SRAM
help
Say Y here to enable support for the NVIDIA Tegra video decoder
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index d0cc0b746107..724d098aeef0 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -463,10 +463,8 @@ static int comp_probe(struct most_interface *iface, int channel_id,
spin_lock_init(&c->unlink);
INIT_KFIFO(c->fifo);
retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
- if (retval) {
- pr_info("failed to alloc channel kfifo");
+ if (retval)
goto err_del_cdev_and_free_channel;
- }
init_waitqueue_head(&c->wq);
mutex_init(&c->io_mutex);
spin_lock_irqsave(&ch_list_lock, cl_flags);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index b9841adb7181..8e9a0b67c6ed 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -303,7 +303,8 @@ static ssize_t set_datatype_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
- return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
+ return snprintf(buf, PAGE_SIZE, "%s",
+ ch_data_type[i].name);
}
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
@@ -721,6 +722,7 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
return link_channel_to_component(c, comp, link_name, comp_param);
}
+
/**
* remove_link_store - store function for remove_link attribute
* @drv: device driver
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 31fbc1a75b06..64c979155a49 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -129,25 +129,6 @@ bool dim2_sysfs_get_state_cb(void)
}
/**
- * dimcb_io_read - callback from HAL to read an I/O register
- * @ptr32: register address
- */
-u32 dimcb_io_read(u32 __iomem *ptr32)
-{
- return readl(ptr32);
-}
-
-/**
- * dimcb_io_write - callback from HAL to write value to an I/O register
- * @ptr32: register address
- * @value: value to write
- */
-void dimcb_io_write(u32 __iomem *ptr32, u32 value)
-{
- writel(value, ptr32);
-}
-
-/**
* dimcb_on_error - callback from HAL to report miscommunication between
* HDM and HAL
* @error_id: Error ID
@@ -797,7 +778,6 @@ static int dim2_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, AHB0_INT_IDX);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get ahb0_int irq: %d\n", irq);
ret = irq;
goto err_shutdown_dim;
}
@@ -811,7 +791,6 @@ static int dim2_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, MLB_INT_IDX);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to get mlb_int irq: %d\n", irq);
ret = irq;
goto err_shutdown_dim;
}
diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c
index 699e02f83bd4..39e17a7d2f24 100644
--- a/drivers/staging/most/dim2/hal.c
+++ b/drivers/staging/most/dim2/hal.c
@@ -13,6 +13,7 @@
#include "reg.h"
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/io.h>
/*
* Size factor for isochronous DBR buffer.
@@ -143,13 +144,13 @@ static void free_dbr(int offs, int size)
static void dim2_transfer_madr(u32 val)
{
- dimcb_io_write(&g.dim2->MADR, val);
+ writel(val, &g.dim2->MADR);
/* wait for transfer completion */
- while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
+ while ((readl(&g.dim2->MCTL) & 1) != 1)
continue;
- dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
}
static void dim2_clear_dbr(u16 addr, u16 size)
@@ -159,8 +160,8 @@ static void dim2_clear_dbr(u16 addr, u16 size)
u16 const end_addr = addr + size;
u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
- dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
- dimcb_io_write(&g.dim2->MDAT0, 0);
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
+ writel(0, &g.dim2->MDAT0);
for (; addr < end_addr; addr++)
dim2_transfer_madr(cmd | addr);
@@ -170,28 +171,28 @@ static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
{
dim2_transfer_madr(ctr_addr);
- return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
+ return readl((&g.dim2->MDAT0) + mdat_idx);
}
static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
{
enum { MADR_WNR_BIT = 31 };
- dimcb_io_write(&g.dim2->MCTL, 0); /* clear transfer complete */
+ writel(0, &g.dim2->MCTL); /* clear transfer complete */
if (mask[0] != 0)
- dimcb_io_write(&g.dim2->MDAT0, value[0]);
+ writel(value[0], &g.dim2->MDAT0);
if (mask[1] != 0)
- dimcb_io_write(&g.dim2->MDAT1, value[1]);
+ writel(value[1], &g.dim2->MDAT1);
if (mask[2] != 0)
- dimcb_io_write(&g.dim2->MDAT2, value[2]);
+ writel(value[2], &g.dim2->MDAT2);
if (mask[3] != 0)
- dimcb_io_write(&g.dim2->MDAT3, value[3]);
+ writel(value[3], &g.dim2->MDAT3);
- dimcb_io_write(&g.dim2->MDWE0, mask[0]);
- dimcb_io_write(&g.dim2->MDWE1, mask[1]);
- dimcb_io_write(&g.dim2->MDWE2, mask[2]);
- dimcb_io_write(&g.dim2->MDWE3, mask[3]);
+ writel(mask[0], &g.dim2->MDWE0);
+ writel(mask[1], &g.dim2->MDWE1);
+ writel(mask[2], &g.dim2->MDWE2);
+ writel(mask[3], &g.dim2->MDWE3);
dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
}
@@ -356,15 +357,13 @@ static void dim2_configure_channel(
dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
- dimcb_io_write(&g.dim2->ACMR0,
- dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
+ writel(readl(&g.dim2->ACMR0) | bit_mask(ch_addr), &g.dim2->ACMR0);
}
static void dim2_clear_channel(u8 ch_addr)
{
/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
- dimcb_io_write(&g.dim2->ACMR0,
- dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
+ writel(readl(&g.dim2->ACMR0) & ~bit_mask(ch_addr), &g.dim2->ACMR0);
dim2_clear_cat(AHB_CAT, ch_addr);
dim2_clear_adt(ch_addr);
@@ -373,7 +372,7 @@ static void dim2_clear_channel(u8 ch_addr)
dim2_clear_cdt(ch_addr);
/* clear channel status bit */
- dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+ writel(bit_mask(ch_addr), &g.dim2->ACSR0);
}
/* -------------------------------------------------------------------------- */
@@ -471,7 +470,7 @@ static inline bool check_bytes_per_frame(u32 bytes_per_frame)
return true;
}
-static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
{
u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
@@ -517,20 +516,20 @@ static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
static void dim2_cleanup(void)
{
/* disable MediaLB */
- dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
+ writel(false << MLBC0_MLBEN_BIT, &g.dim2->MLBC0);
dim2_clear_ctram();
/* disable mlb_int interrupt */
- dimcb_io_write(&g.dim2->MIEN, 0);
+ writel(0, &g.dim2->MIEN);
/* clear status for all dma channels */
- dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
- dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
+ writel(0xFFFFFFFF, &g.dim2->ACSR0);
+ writel(0xFFFFFFFF, &g.dim2->ACSR1);
/* mask interrupts for all channels */
- dimcb_io_write(&g.dim2->ACMR0, 0);
- dimcb_io_write(&g.dim2->ACMR1, 0);
+ writel(0, &g.dim2->ACMR0);
+ writel(0, &g.dim2->ACMR1);
}
static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
@@ -538,23 +537,22 @@ static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
dim2_cleanup();
/* configure and enable MediaLB */
- dimcb_io_write(&g.dim2->MLBC0,
- enable_6pin << MLBC0_MLBPEN_BIT |
- mlb_clock << MLBC0_MLBCLK_SHIFT |
- g.fcnt << MLBC0_FCNT_SHIFT |
- true << MLBC0_MLBEN_BIT);
+ writel(enable_6pin << MLBC0_MLBPEN_BIT |
+ mlb_clock << MLBC0_MLBCLK_SHIFT |
+ g.fcnt << MLBC0_FCNT_SHIFT |
+ true << MLBC0_MLBEN_BIT,
+ &g.dim2->MLBC0);
/* activate all HBI channels */
- dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
- dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
+ writel(0xFFFFFFFF, &g.dim2->HCMR0);
+ writel(0xFFFFFFFF, &g.dim2->HCMR1);
/* enable HBI */
- dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
+ writel(bit_mask(HCTL_EN_BIT), &g.dim2->HCTL);
/* configure DMA */
- dimcb_io_write(&g.dim2->ACTL,
- ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
- true << ACTL_SCE_BIT);
+ writel(ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+ true << ACTL_SCE_BIT, &g.dim2->ACTL);
}
static bool dim2_is_mlb_locked(void)
@@ -562,12 +560,12 @@ static bool dim2_is_mlb_locked(void)
u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
bit_mask(MLBC1_LOCKERR_BIT);
- u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
+ u32 const c1 = readl(&g.dim2->MLBC1);
u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
- dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
- return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
- (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
+ writel(c1 & nda_mask, &g.dim2->MLBC1);
+ return (readl(&g.dim2->MLBC1) & mask1) == 0 &&
+ (readl(&g.dim2->MLBC0) & mask0) != 0;
}
/* -------------------------------------------------------------------------- */
@@ -590,7 +588,7 @@ static inline bool service_channel(u8 ch_addr, u8 idx)
dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
/* clear channel status bit */
- dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+ writel(bit_mask(ch_addr), &g.dim2->ACSR0);
return true;
}
@@ -652,7 +650,7 @@ static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
- buf_size != norm_ctrl_async_buffer_size(buf_size))
+ buf_size != dim_norm_ctrl_async_buffer_size(buf_size))
return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
"Bad control/async buffer size");
@@ -776,13 +774,8 @@ static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
void dim_service_mlb_int_irq(void)
{
- dimcb_io_write(&g.dim2->MS0, 0);
- dimcb_io_write(&g.dim2->MS1, 0);
-}
-
-u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
-{
- return norm_ctrl_async_buffer_size(buf_size);
+ writel(0, &g.dim2->MS0);
+ writel(0, &g.dim2->MS1);
}
/**
@@ -829,7 +822,7 @@ u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
if (is_tx && !g.atx_dbr.ch_addr) {
g.atx_dbr.ch_addr = ch->addr;
dbrcnt_init(ch->addr, ch->dbr_size);
- dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
+ writel(bit_mask(20), &g.dim2->MIEN);
}
return ret;
@@ -896,7 +889,7 @@ u8 dim_destroy_channel(struct dim_channel *ch)
return DIM_ERR_DRIVER_NOT_INITIALIZED;
if (ch->addr == g.atx_dbr.ch_addr) {
- dimcb_io_write(&g.dim2->MIEN, 0);
+ writel(0, &g.dim2->MIEN);
g.atx_dbr.ch_addr = 0;
}
diff --git a/drivers/staging/most/dim2/hal.h b/drivers/staging/most/dim2/hal.h
index fca6c22de8a6..20531449acab 100644
--- a/drivers/staging/most/dim2/hal.h
+++ b/drivers/staging/most/dim2/hal.h
@@ -97,10 +97,6 @@ bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
-u32 dimcb_io_read(u32 __iomem *ptr32);
-
-void dimcb_io_write(u32 __iomem *ptr32, u32 value);
-
void dimcb_on_error(u8 error_id, const char *error_message);
#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index aababdf2be12..26a31854c636 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -69,7 +69,7 @@ struct net_dev_context {
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
-static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
+static DEFINE_SPINLOCK(list_lock); /* list_head, ch->linked = false, dev_hold */
static struct core_component comp;
static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
@@ -509,7 +509,6 @@ static int __init most_net_init(void)
{
int err;
- spin_lock_init(&list_lock);
mutex_init(&probe_disc_mt);
err = most_register_component(&comp);
if (err)
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 342f390d68b3..79817061fcfa 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -802,8 +802,11 @@ static int __init audio_init(void)
if (ret)
pr_err("Failed to register %s\n", comp.name);
ret = most_register_configfs_subsys(&comp);
- if (ret)
+ if (ret) {
pr_err("Failed to register %s configfs subsys\n", comp.name);
+ most_deregister_component(&comp);
+ }
+
return ret;
}
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 6f6e98ab0550..250af9fb704d 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -54,7 +54,7 @@ struct comp_fh {
};
static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
-static struct spinlock list_lock;
+static DEFINE_SPINLOCK(list_lock);
static inline bool data_ready(struct most_video_dev *mdev)
{
@@ -538,7 +538,6 @@ static int __init comp_init(void)
{
int err;
- spin_lock_init(&list_lock);
err = most_register_component(&comp);
if (err)
return err;
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index 60db06768c8a..d964642d95a3 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -675,10 +675,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ if (irq < 0)
return -EINVAL;
- }
ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
0, dev_name(&pdev->dev), hsdma);
if (ret) {
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 89fa813142ab..6b98827da57f 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -400,6 +400,7 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
err = of_pci_get_devfn(child);
if (err < 0) {
+ of_node_put(child);
dev_err(dev, "failed to parse devfn: %d\n", err);
return err;
}
@@ -407,8 +408,10 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
@@ -614,17 +617,12 @@ static int mt7621_pcie_request_resources(struct mt7621_pcie *pcie,
struct list_head *res)
{
struct device *dev = pcie->dev;
- int err;
pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
pci_add_resource(res, &pcie->busn);
- err = devm_request_pci_bus_resources(dev, res);
- if (err < 0)
- return err;
-
- return 0;
+ return devm_request_pci_bus_resources(dev, res);
}
static int mt7621_pcie_register_host(struct pci_host_bridge *host,
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index 9b52d44abef1..d0f06790d38f 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -358,12 +358,15 @@ static int rt2880_pinmux_probe(struct platform_device *pdev)
gpiobase = of_get_property(np, "ralink,gpio-base", NULL);
if (!ngpio || !gpiobase) {
dev_err(&pdev->dev, "failed to load chip info\n");
+ of_node_put(np);
return -EINVAL;
}
range = devm_kzalloc(p->dev, sizeof(*range), GFP_KERNEL);
- if (!range)
+ if (!range) {
+ of_node_put(np);
return -ENOMEM;
+ }
range->name = "pio";
range->npins = __be32_to_cpu(*ngpio);
range->base = __be32_to_cpu(*gpiobase);
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 08027a36e0bc..360ec0407740 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -767,7 +767,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct nvec_chip *nvec;
struct nvec_msg *msg;
- struct resource *res;
void __iomem *base;
char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
@@ -790,16 +789,13 @@ static int tegra_nvec_probe(struct platform_device *pdev)
return -ENODEV;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
nvec->irq = platform_get_irq(pdev, 0);
- if (nvec->irq < 0) {
- dev_err(dev, "no irq resource?\n");
+ if (nvec->irq < 0)
return -ENODEV;
- }
i2c_clk = devm_clk_get(dev, "div-clk");
if (IS_ERR(i2c_clk)) {
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index cd2b777073c4..a5321cc692c5 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -3512,7 +3512,7 @@ static const struct hc_driver octeon_hc_driver = {
.product_desc = "Octeon Host Controller",
.hcd_priv_size = sizeof(struct octeon_hcd),
.irq = octeon_usb_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
.start = octeon_usb_start,
.stop = octeon_usb_stop,
.urb_enqueue = octeon_usb_urb_enqueue,
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8847a11c212f..33762f2e9a44 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -794,7 +794,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
- strcpy(dev->name, "pow%d");
+ strscpy(dev->name, "pow%d", sizeof(dev->name));
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
@@ -866,39 +866,39 @@ static int cvm_oct_probe(struct platform_device *pdev)
case CVMX_HELPER_INTERFACE_MODE_NPI:
dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "npi%d");
+ strscpy(dev->name, "npi%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
- strcpy(dev->name, "xaui%d");
+ strscpy(dev->name, "xaui%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
dev->netdev_ops = &cvm_oct_npi_netdev_ops;
- strcpy(dev->name, "loop%d");
+ strscpy(dev->name, "loop%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
dev->netdev_ops = &cvm_oct_spi_netdev_ops;
- strcpy(dev->name, "spi%d");
+ strscpy(dev->name, "spi%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_GMII:
priv->phy_mode = PHY_INTERFACE_MODE_GMII;
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
- strcpy(dev->name, "eth%d");
+ strscpy(dev->name, "eth%d", sizeof(dev->name));
cvm_set_rgmii_delay(priv, interface,
port_index);
break;
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index fe09efbc7f77..d8296f2ae872 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -8,10 +8,6 @@ TODO:
internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- - convert all uses of the old GPIO API from <linux/gpio.h> to the
- GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
- lines from device tree, ACPI or board files, board files should
- use <linux/gpio/machine.h>
- allow simultaneous XO-1 and XO-1.5 support
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
index 21cffdb86ecf..4a0d34b4ad37 100644
--- a/drivers/staging/pi433/Documentation/pi433.txt
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -14,7 +14,7 @@ until something gets received terminates the read request.
The driver supports on the fly reloading of the hardware fifo of the rf
chip, thus enabling for much longer telegrams than the hardware fifo size.
-Discription of driver operation
+Description of driver operation
===============================
a) transmission
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 5854551d0a52..900424db9b97 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -826,10 +826,8 @@ static int gdma_dma_probe(struct platform_device *pdev)
tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ if (irq < 0)
return -EINVAL;
- }
ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
0, dev_name(&pdev->dev), dma_dev);
if (ret) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index a24b40761af2..815dfee11968 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -1200,7 +1200,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_indicate_connect(padapter);
} else {
- pwlan = _rtw_alloc_network(pmlmepriv);
+ pwlan = rtw_alloc_network(pmlmepriv);
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 51c3dd6d7ffb..02c476f45b33 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -108,7 +108,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* 1. Read the first byte to check if efuse is empty!!! */
/* */
/* */
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
if (rtemp8 != 0xFF) {
efuse_utilized++;
eFuse_Addr++;
@@ -124,10 +124,10 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* Check PG header for section num. */
if ((rtemp8 & 0x1F) == 0x0F) { /* extended header */
u1temp = (rtemp8 & 0xE0) >> 5;
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
if ((rtemp8 & 0x0F) == 0x0F) {
eFuse_Addr++;
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
eFuse_Addr++;
@@ -147,13 +147,13 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (!(wren & 0x01)) {
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] = (rtemp8 & 0xff);
if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
break;
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00);
@@ -165,7 +165,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
}
}
/* Read next PG header */
- rtemp8 = *(phymap+eFuse_Addr);
+ rtemp8 = *(phymap + eFuse_Addr);
if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
efuse_utilized++;
@@ -178,8 +178,8 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* */
for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
- efuseTbl[(i*8)+(j*2)] = (eFuseWord[i][j] & 0xff);
- efuseTbl[(i*8)+((j*2)+1)] = ((eFuseWord[i][j] >> 8) & 0xff);
+ efuseTbl[(i * 8) + (j * 2)] = (eFuseWord[i][j] & 0xff);
+ efuseTbl[(i * 8) + ((j * 2) + 1)] = ((eFuseWord[i][j] >> 8) & 0xff);
}
}
@@ -187,7 +187,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
/* 4. Copy from Efuse map to output pointer memory!!! */
/* */
for (i = 0; i < _size_byte; i++)
- pbuf[i] = efuseTbl[_offset+i];
+ pbuf[i] = efuseTbl[_offset + i];
/* */
/* 5. Calculate Efuse utilization. */
@@ -218,16 +218,16 @@ static void efuse_read_phymap_from_txpktbuf(
u8 *pos = content;
if (bcnhead < 0) /* if not valid */
- bcnhead = usb_read8(adapter, REG_TDECTRL+1);
+ bcnhead = usb_read8(adapter, REG_TDECTRL + 1);
DBG_88E("%s bcnhead:%d\n", __func__, bcnhead);
usb_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
- dbg_addr = bcnhead*128/8; /* 8-bytes addressing */
+ dbg_addr = bcnhead * 128 / 8; /* 8-bytes addressing */
while (1) {
- usb_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr+i);
+ usb_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr + i);
usb_write8(adapter, REG_TXPKTBUF_DBG, 0);
start = jiffies;
@@ -246,34 +246,34 @@ static void efuse_read_phymap_from_txpktbuf(
u16 aaa;
lenc[0] = usb_read8(adapter, REG_PKTBUF_DBG_DATA_L);
- lenc[1] = usb_read8(adapter, REG_PKTBUF_DBG_DATA_L+1);
+ lenc[1] = usb_read8(adapter, REG_PKTBUF_DBG_DATA_L + 1);
aaabak = le16_to_cpup((__le16 *)lenc);
lenbak = le16_to_cpu(*((__le16 *)lenc));
aaa = le16_to_cpup((__le16 *)&lo32);
len = le16_to_cpu(*((__le16 *)&lo32));
- limit = min_t(u16, len-2, limit);
+ limit = min_t(u16, len - 2, limit);
DBG_88E("%s len:%u, lenbak:%u, aaa:%u, aaabak:%u\n", __func__, len, lenbak, aaa, aaabak);
- memcpy(pos, ((u8 *)&lo32)+2, (limit >= count+2) ? 2 : limit-count);
- count += (limit >= count+2) ? 2 : limit-count;
- pos = content+count;
+ memcpy(pos, ((u8 *)&lo32) + 2, (limit >= count + 2) ? 2 : limit - count);
+ count += (limit >= count + 2) ? 2 : limit - count;
+ pos = content + count;
} else {
- memcpy(pos, ((u8 *)&lo32), (limit >= count+4) ? 4 : limit-count);
- count += (limit >= count+4) ? 4 : limit-count;
- pos = content+count;
+ memcpy(pos, ((u8 *)&lo32), (limit >= count + 4) ? 4 : limit - count);
+ count += (limit >= count + 4) ? 4 : limit - count;
+ pos = content + count;
}
- if (limit > count && len-2 > count) {
- memcpy(pos, (u8 *)&hi32, (limit >= count+4) ? 4 : limit-count);
- count += (limit >= count+4) ? 4 : limit-count;
- pos = content+count;
+ if (limit > count && len - 2 > count) {
+ memcpy(pos, (u8 *)&hi32, (limit >= count + 4) ? 4 : limit - count);
+ count += (limit >= count + 4) ? 4 : limit - count;
+ pos = content + count;
}
- if (limit <= count || len-2 <= count)
+ if (limit <= count || len - 2 <= count)
break;
i++;
}
@@ -288,7 +288,7 @@ static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset
u8 physical_map[512];
u16 size = 512;
- usb_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ usb_write8(padapter, REG_TDECTRL + 1, txpktbuf_bndy);
memset(physical_map, 0xFF, 512);
usb_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
status = iol_execute(padapter, CMD_READ_EFUSE_MAP);
@@ -323,7 +323,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e
efuse_OneByteWrite(pAdapter, start_addr++, data[1]);
efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[0]);
- efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[1]);
+ efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[1]);
if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
badworden &= (~BIT(0));
}
@@ -333,7 +333,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e
efuse_OneByteWrite(pAdapter, start_addr++, data[3]);
efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[2]);
- efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[3]);
+ efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[3]);
if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
badworden &= (~BIT(1));
}
@@ -343,7 +343,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e
efuse_OneByteWrite(pAdapter, start_addr++, data[5]);
efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[4]);
- efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[5]);
+ efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[5]);
if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
badworden &= (~BIT(2));
}
@@ -353,7 +353,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e
efuse_OneByteWrite(pAdapter, start_addr++, data[7]);
efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[6]);
- efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[7]);
+ efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[7]);
if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
badworden &= (~BIT(3));
}
@@ -371,7 +371,7 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
while (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) &&
AVAILABLE_EFUSE_ADDR(efuse_addr)) {
if (efuse_data != 0xFF) {
- if ((efuse_data&0x1F) == 0x0F) { /* extended header */
+ if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
hoffset = efuse_data;
efuse_addr++;
efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data);
@@ -383,12 +383,12 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
hworden = efuse_data & 0x0F;
}
} else {
- hoffset = (efuse_data>>4) & 0x0F;
+ hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
}
word_cnts = Efuse_CalculateWordCnts(hworden);
/* read next header */
- efuse_addr = efuse_addr + (word_cnts*2)+1;
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
} else {
break;
}
@@ -439,15 +439,15 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
continue;
}
} else {
- hoffset = (efuse_data>>4) & 0x0F;
+ hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
}
word_cnts = Efuse_CalculateWordCnts(hworden);
bDataEmpty = true;
if (hoffset == offset) {
- for (tmpidx = 0; tmpidx < word_cnts*2; tmpidx++) {
- if (efuse_OneByteRead(pAdapter, efuse_addr+1+tmpidx, &efuse_data)) {
+ for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
+ if (efuse_OneByteRead(pAdapter, efuse_addr + 1 + tmpidx, &efuse_data)) {
tmpdata[tmpidx] = efuse_data;
if (efuse_data != 0xff)
bDataEmpty = false;
@@ -456,11 +456,11 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
if (!bDataEmpty) {
ReadState = PG_STATE_DATA;
} else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts*2)+1;
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
ReadState = PG_STATE_HEADER;
}
} else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts*2)+1;
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
ReadState = PG_STATE_HEADER;
}
} else {
@@ -469,7 +469,7 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
} else if (ReadState & PG_STATE_DATA) {
/* Data section Read ------------- */
efuse_WordEnableDataRead(hworden, tmpdata, data);
- efuse_addr = efuse_addr + (word_cnts*2)+1;
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
ReadState = PG_STATE_HEADER;
}
}
@@ -491,7 +491,7 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata)) {
/* check if data exist */
- badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pFixPkt->word_en, originaldata);
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pFixPkt->word_en, originaldata);
if (badworden != 0xf) { /* write fail */
PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata);
@@ -501,10 +501,10 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
else
efuse_addr = Efuse_GetCurrentSize(pAdapter);
} else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
}
} else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
}
*pAddr = efuse_addr;
return true;
@@ -601,7 +601,7 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
} else {
struct pgpkt fixPkt;
- fixPkt.offset = (tmp_header>>4) & 0x0F;
+ fixPkt.offset = (tmp_header >> 4) & 0x0F;
fixPkt.word_en = tmp_header & 0x0F;
fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr))
@@ -619,7 +619,7 @@ static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u
u32 PgWriteSuccess = 0;
badworden = 0x0f;
- badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pTargetPkt->word_en, pTargetPkt->data);
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data);
if (badworden == 0x0F) {
/* write ok */
return true;
@@ -681,8 +681,8 @@ static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts,
bool ret = false;
u8 i, efuse_data;
- for (i = 0; i < (word_cnts*2); i++) {
- if (efuse_OneByteRead(pAdapter, (startAddr+i), &efuse_data) && (efuse_data != 0xFF))
+ for (i = 0; i < (word_cnts * 2); i++) {
+ if (efuse_OneByteRead(pAdapter, (startAddr + i), &efuse_data) && (efuse_data != 0xFF))
ret = true;
}
return ret;
@@ -721,7 +721,7 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
}
} else {
cur_header = efuse_data;
- curPkt.offset = (cur_header>>4) & 0x0F;
+ curPkt.offset = (cur_header >> 4) & 0x0F;
curPkt.word_en = cur_header & 0x0F;
}
@@ -729,10 +729,10 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
/* if same header is found but no data followed */
/* write some part of data followed by the header. */
if ((curPkt.offset == pTargetPkt->offset) &&
- (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr+1)) &&
+ (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr + 1)) &&
wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
/* Here to write partial data */
- badworden = Efuse_WordEnableDataWrite(pAdapter, startAddr+1, matched_wden, pTargetPkt->data);
+ badworden = Efuse_WordEnableDataWrite(pAdapter, startAddr + 1, matched_wden, pTargetPkt->data);
if (badworden != 0x0F) {
u32 PgWriteSuccess = 0;
/* if write fail on some words, write these bad words again */
@@ -746,13 +746,13 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
}
/* partial write ok, update the target packet for later use */
for (i = 0; i < 4; i++) {
- if ((matched_wden & (0x1<<i)) == 0) /* this word has been written */
- pTargetPkt->word_en |= (0x1<<i); /* disable the word */
+ if ((matched_wden & (0x1 << i)) == 0) /* this word has been written */
+ pTargetPkt->word_en |= (0x1 << i); /* disable the word */
}
pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
}
/* read from next header */
- startAddr = startAddr + (curPkt.word_cnts*2) + 1;
+ startAddr = startAddr + (curPkt.word_cnts * 2) + 1;
} else {
/* not used header, 0xff */
*pAddr = startAddr;
@@ -763,20 +763,9 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
return ret;
}
-static bool
-hal_EfusePgCheckAvailableAddr(
- struct adapter *pAdapter,
- u8 efuseType
- )
-{
- if (Efuse_GetCurrentSize(pAdapter) >= EFUSE_MAP_LEN_88E)
- return false;
- return true;
-}
-
static void hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData, struct pgpkt *pTargetPkt)
{
- memset((void *)pTargetPkt->data, 0xFF, sizeof(u8)*8);
+ memset((void *)pTargetPkt->data, 0xFF, sizeof(u8) * 8);
pTargetPkt->offset = offset;
pTargetPkt->word_en = word_en;
efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
@@ -789,7 +778,7 @@ bool Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pD
u16 startAddr = 0;
u8 efuseType = EFUSE_WIFI;
- if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType))
+ if (Efuse_GetCurrentSize(pAdapter) >= EFUSE_MAP_LEN_88E)
return false;
hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
@@ -826,13 +815,13 @@ u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data)
u8 tmpidx = 0;
u8 result;
- usb_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr & 0xff));
- usb_write8(pAdapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
- (usb_read8(pAdapter, EFUSE_CTRL+2) & 0xFC));
+ usb_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
+ usb_write8(pAdapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) |
+ (usb_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC));
- usb_write8(pAdapter, EFUSE_CTRL+3, 0x72);/* read cmd */
+ usb_write8(pAdapter, EFUSE_CTRL + 3, 0x72);/* read cmd */
- while (!(0x80 & usb_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ while (!(0x80 & usb_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
tmpidx++;
if (tmpidx < 100) {
*data = usb_read8(pAdapter, EFUSE_CTRL);
@@ -849,15 +838,15 @@ u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data)
u8 tmpidx = 0;
u8 result;
- usb_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr&0xff));
- usb_write8(pAdapter, EFUSE_CTRL+2,
- (usb_read8(pAdapter, EFUSE_CTRL+2) & 0xFC) |
- (u8)((addr>>8) & 0x03));
+ usb_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
+ usb_write8(pAdapter, EFUSE_CTRL + 2,
+ (usb_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC) |
+ (u8)((addr >> 8) & 0x03));
usb_write8(pAdapter, EFUSE_CTRL, data);/* data */
- usb_write8(pAdapter, EFUSE_CTRL+3, 0xF2);/* write cmd */
+ usb_write8(pAdapter, EFUSE_CTRL + 3, 0xF2);/* write cmd */
- while ((0x80 & usb_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ while ((0x80 & usb_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
tmpidx++;
if (tmpidx < 100)
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 28b3cdd10397..cc1b5438c04c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -59,7 +59,7 @@ static u8 WIFI_OFDMRATES[] = {
int rtw_get_bit_value_from_ieee_value(u8 val)
{
- unsigned char dot11_rate_table[] = {
+ static const unsigned char dot11_rate_table[] = {
2, 4, 11, 22, 12, 18, 24, 36, 48,
72, 96, 108, 0}; /* last element must be zero!! */
int i = 0;
@@ -275,7 +275,7 @@ unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit)
uint len;
u16 val16;
__le16 le_tmp;
- unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
+ static const unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
u8 *pbuf = pie;
int limit_new = limit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index d2f7a88e992e..1ec3b237212e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -104,7 +104,7 @@ void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
}
}
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
/* _queue *free_queue) */
{
struct wlan_network *pnetwork;
@@ -119,7 +119,7 @@ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
list_del_init(&pnetwork->list);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
- ("_rtw_alloc_network: ptr=%p\n", &pnetwork->list));
+ ("rtw_alloc_network: ptr=%p\n", &pnetwork->list));
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
@@ -272,11 +272,6 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
return ie + 8;
}
-static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
-{
- return _rtw_alloc_network(pmlmepriv);
-}
-
int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
{
int ret = true;
@@ -827,7 +822,7 @@ void rtw_indicate_disconnect(struct adapter *padapter)
inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
{
- rtw_os_indicate_scan_done(padapter, aborted);
+ indicate_wx_scan_complete_event(padapter);
}
static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 6f3c03201f64..18dc9fc1c04a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -4854,7 +4854,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
}
rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
- /* Set_NETYPE0_MSR(padapter, type); */
+ /* Set_MSR(padapter, type); */
return H2C_SUCCESS;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 9caf7041ad60..d4278361e002 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -145,8 +145,8 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
{
- list_del_init(&(precvframe->list));
- list_add_tail(&(precvframe->list), get_list_head(queue));
+ list_del_init(&precvframe->list);
+ list_add_tail(&precvframe->list, get_list_head(queue));
return _SUCCESS;
}
@@ -219,7 +219,7 @@ static int recvframe_chkmic(struct adapter *adapter,
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
@@ -1373,11 +1373,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
/* append to first fragment frame's tail (if privacy frame, pull the ICV) */
skb_trim(prframe->pkt, prframe->pkt->len - prframe->attrib.icv_len);
- /* memcpy */
- memcpy(skb_tail_pointer(prframe->pkt), pnfhdr->pkt->data,
- pnfhdr->pkt->len);
-
- skb_put(prframe->pkt, pnfhdr->pkt->len);
+ skb_put_data(prframe->pkt, pnfhdr->pkt->data, pnfhdr->pkt->len);
prframe->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = plist->next;
@@ -1500,7 +1496,7 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
struct rx_pkt_attrib *pattrib;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
- struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
nr_subframes = 0;
pattrib = &prframe->attrib;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 2f90f60f1681..435c0fbec54a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -87,29 +87,28 @@ static u8 crc32_reverseBit(u8 data)
static void crc32_init(void)
{
- if (bcrc32initialized == 1) {
+ int i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
+ if (bcrc32initialized == 1)
return;
- } else {
- int i, j;
- u32 c;
- u8 *p = (u8 *)&c, *p1;
- u8 k;
-
- c = 0x12340000;
-
- for (i = 0; i < 256; ++i) {
- k = crc32_reverseBit((u8)i);
- for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
- p1 = (u8 *)&crc32_table[i];
-
- p1[0] = crc32_reverseBit(p[3]);
- p1[1] = crc32_reverseBit(p[2]);
- p1[2] = crc32_reverseBit(p[1]);
- p1[3] = crc32_reverseBit(p[0]);
- }
- bcrc32initialized = 1;
+
+ c = 0x12340000;
+
+ for (i = 0; i < 256; ++i) {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j)
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ p1 = (u8 *)&crc32_table[i];
+
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
}
+ bcrc32initialized = 1;
}
static __le32 getcrc32(u8 *buf, int len)
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 7bfc5b7c2757..c985b1468d41 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -270,14 +270,9 @@ void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
}
-static void Set_NETYPE0_MSR(struct adapter *padapter, u8 type)
-{
- rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
-}
-
void Set_MSR(struct adapter *padapter, u8 type)
{
- Set_NETYPE0_MSR(padapter, type);
+ rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
}
inline u8 rtw_get_oper_ch(struct adapter *adapter)
@@ -1179,15 +1174,10 @@ void Update_RA_Entry(struct adapter *padapter, u32 mac_id)
rtw_hal_update_ra_mask(padapter, mac_id, 0);
}
-static void enable_rate_adaptive(struct adapter *padapter, u32 mac_id)
-{
- Update_RA_Entry(padapter, mac_id);
-}
-
void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
{
/* rate adaptive */
- enable_rate_adaptive(padapter, psta->mac_id);
+ Update_RA_Entry(padapter, psta->mac_id);
}
/* Update RRSR and Rate for USERATE */
@@ -1476,8 +1466,3 @@ void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
{
rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL);
}
-
-void beacon_timing_control(struct adapter *padapter)
-{
- rtw_hal_bcn_related_reg_setting(padapter);
-}
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 11e0bb9c67d7..51882858fcf0 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -653,7 +653,7 @@ static bool config_parafile(struct adapter *adapt)
bool rtl88eu_phy_bb_config(struct adapter *adapt)
{
- int rtstatus = true;
+ bool rtstatus;
u32 regval;
u8 crystal_cap;
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 02aeb12c9870..47b1bf5a6143 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -218,11 +218,11 @@ static bool rtl88e_phy_config_rf_with_headerfile(struct adapter *adapt)
return true;
}
-static bool rf6052_conf_para(struct adapter *adapt)
+bool rtl88eu_phy_rf_config(struct adapter *adapt)
{
struct hal_data_8188e *hal_data = adapt->HalData;
u32 u4val = 0;
- bool rtstatus = true;
+ bool rtstatus;
struct bb_reg_def *pphyreg;
pphyreg = &hal_data->PHYRegDef[RF90_PATH_A];
@@ -246,13 +246,3 @@ static bool rf6052_conf_para(struct adapter *adapt)
return rtstatus;
}
-
-static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
-{
- return rf6052_conf_para(adapt);
-}
-
-bool rtl88eu_phy_rf_config(struct adapter *adapt)
-{
- return rtl88e_phy_rf6052_config(adapt);
-}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index ac5552050752..16a57b31a439 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -183,14 +183,14 @@ static void _InitTxBufferBoundary(struct adapter *Adapter, u8 txpktbuf_bndy)
usb_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
usb_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
usb_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
- usb_write8(Adapter, REG_TDECTRL+1, txpktbuf_bndy);
+ usb_write8(Adapter, REG_TDECTRL + 1, txpktbuf_bndy);
}
static void _InitPageBoundary(struct adapter *Adapter)
{
/* RX Page Boundary */
/* */
- u16 rxff_bndy = MAX_RX_DMA_BUFFER_SIZE_88E-1;
+ u16 rxff_bndy = MAX_RX_DMA_BUFFER_SIZE_88E - 1;
usb_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
}
@@ -504,7 +504,7 @@ static void usb_AggSettingRxUpdate(struct adapter *Adapter)
switch (haldata->UsbRxAggMode) {
case USB_RX_AGG_DMA:
usb_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
- usb_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, haldata->UsbRxAggPageTimeout);
+ usb_write8(Adapter, REG_RXDMA_AGG_PG_TH + 1, haldata->UsbRxAggPageTimeout);
break;
case USB_RX_AGG_USB:
usb_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
@@ -512,7 +512,7 @@ static void usb_AggSettingRxUpdate(struct adapter *Adapter)
break;
case USB_RX_AGG_MIX:
usb_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
- usb_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, (haldata->UsbRxAggPageTimeout & 0x1F));/* 0x280[12:8] */
+ usb_write8(Adapter, REG_RXDMA_AGG_PG_TH + 1, (haldata->UsbRxAggPageTimeout & 0x1F));/* 0x280[12:8] */
usb_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
usb_write8(Adapter, REG_USB_AGG_TO, haldata->UsbRxAggBlockTimeout);
break;
@@ -569,9 +569,9 @@ static void _InitBeaconParameters(struct adapter *Adapter)
haldata->RegBcnCtrlVal = usb_read8(Adapter, REG_BCN_CTRL);
haldata->RegTxPause = usb_read8(Adapter, REG_TXPAUSE);
- haldata->RegFwHwTxQCtrl = usb_read8(Adapter, REG_FWHW_TXQ_CTRL+2);
- haldata->RegReg542 = usb_read8(Adapter, REG_TBTT_PROHIBIT+2);
- haldata->RegCR_1 = usb_read8(Adapter, REG_CR+1);
+ haldata->RegFwHwTxQCtrl = usb_read8(Adapter, REG_FWHW_TXQ_CTRL + 2);
+ haldata->RegReg542 = usb_read8(Adapter, REG_TBTT_PROHIBIT + 2);
+ haldata->RegCR_1 = usb_read8(Adapter, REG_CR + 1);
}
static void _BeaconFunctionEnable(struct adapter *Adapter,
@@ -579,7 +579,7 @@ static void _BeaconFunctionEnable(struct adapter *Adapter,
{
usb_write8(Adapter, REG_BCN_CTRL, (BIT(4) | BIT(3) | BIT(1)));
- usb_write8(Adapter, REG_RD_CTRL+1, 0x6F);
+ usb_write8(Adapter, REG_RD_CTRL + 1, 0x6F);
}
/* Set CCK and OFDM Block "ON" */
@@ -633,7 +633,7 @@ enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
DBG_88E("pwrdown, 0x5c(BIT(7))=%02x\n", val8);
rfpowerstate = (val8 & BIT(7)) ? rf_off : rf_on;
} else { /* rf on/off */
- usb_write8(adapt, REG_MAC_PINMUX_CFG, usb_read8(adapt, REG_MAC_PINMUX_CFG)&~(BIT(3)));
+ usb_write8(adapt, REG_MAC_PINMUX_CFG, usb_read8(adapt, REG_MAC_PINMUX_CFG) & ~(BIT(3)));
val8 = usb_read8(adapt, REG_GPIO_IO_SEL);
DBG_88E("GPIO_IN=%02x\n", val8);
rfpowerstate = (val8 & BIT(3)) ? rf_on : rf_off;
@@ -770,7 +770,7 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
value8 = usb_read8(Adapter, REG_TX_RPT_CTRL);
usb_write8(Adapter, REG_TX_RPT_CTRL, (value8 | BIT(1) | BIT(0)));
/* Set MAX RPT MACID */
- usb_write8(Adapter, REG_TX_RPT_CTRL+1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
+ usb_write8(Adapter, REG_TX_RPT_CTRL + 1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
/* Tx RPT Timer. Unit: 32us */
usb_write16(Adapter, REG_TX_RPT_TIME, 0xCdf0);
@@ -827,10 +827,10 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
pwrctrlpriv->rf_pwrstate = rf_on;
/* enable Tx report. */
- usb_write8(Adapter, REG_FWHW_TXQ_CTRL+1, 0x0F);
+ usb_write8(Adapter, REG_FWHW_TXQ_CTRL + 1, 0x0F);
/* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
- usb_write8(Adapter, REG_EARLY_MODE_CONTROL+3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
+ usb_write8(Adapter, REG_EARLY_MODE_CONTROL + 3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
/* tynli_test_tx_report. */
usb_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
@@ -880,7 +880,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
val8 = usb_read8(Adapter, REG_TX_RPT_CTRL);
- usb_write8(Adapter, REG_TX_RPT_CTRL, val8&(~BIT(1)));
+ usb_write8(Adapter, REG_TX_RPT_CTRL, val8 & (~BIT(1)));
/* stop rx */
usb_write8(Adapter, REG_CR, 0x0);
@@ -894,9 +894,9 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
val8 = usb_read8(Adapter, REG_MCUFWDL);
if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */
/* Reset MCU 0x2[10]=0. */
- val8 = usb_read8(Adapter, REG_SYS_FUNC_EN+1);
+ val8 = usb_read8(Adapter, REG_SYS_FUNC_EN + 1);
val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
- usb_write8(Adapter, REG_SYS_FUNC_EN+1, val8);
+ usb_write8(Adapter, REG_SYS_FUNC_EN + 1, val8);
}
/* reset MCU ready status */
@@ -905,17 +905,17 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
/* YJ,add,111212 */
/* Disable 32k */
val8 = usb_read8(Adapter, REG_32K_CTRL);
- usb_write8(Adapter, REG_32K_CTRL, val8&(~BIT(0)));
+ usb_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0)));
/* Card disable power action flow */
rtl88eu_pwrseqcmdparsing(Adapter, PWR_CUT_ALL_MSK,
Rtl8188E_NIC_DISABLE_FLOW);
/* Reset MCU IO Wrapper */
- val8 = usb_read8(Adapter, REG_RSV_CTRL+1);
- usb_write8(Adapter, REG_RSV_CTRL+1, (val8&(~BIT(3))));
- val8 = usb_read8(Adapter, REG_RSV_CTRL+1);
- usb_write8(Adapter, REG_RSV_CTRL+1, val8 | BIT(3));
+ val8 = usb_read8(Adapter, REG_RSV_CTRL + 1);
+ usb_write8(Adapter, REG_RSV_CTRL + 1, (val8 & (~BIT(3))));
+ val8 = usb_read8(Adapter, REG_RSV_CTRL + 1);
+ usb_write8(Adapter, REG_RSV_CTRL + 1, val8 | BIT(3));
/* YJ,test add, 111207. For Power Consumption. */
val8 = usb_read8(Adapter, GPIO_IN);
@@ -923,9 +923,9 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
usb_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */
val8 = usb_read8(Adapter, REG_GPIO_IO_SEL);
- usb_write8(Adapter, REG_GPIO_IO_SEL, (val8<<4));
- val8 = usb_read8(Adapter, REG_GPIO_IO_SEL+1);
- usb_write8(Adapter, REG_GPIO_IO_SEL+1, val8|0x0F);/* Reg0x43 */
+ usb_write8(Adapter, REG_GPIO_IO_SEL, (val8 << 4));
+ val8 = usb_read8(Adapter, REG_GPIO_IO_SEL + 1);
+ usb_write8(Adapter, REG_GPIO_IO_SEL + 1, val8 | 0x0F);/* Reg0x43 */
usb_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
Adapter->HalData->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
@@ -1103,11 +1103,11 @@ static void ResumeTxBeacon(struct adapter *adapt)
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
- usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) | BIT(6));
+ usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) | BIT(6));
haldata->RegFwHwTxQCtrl |= BIT(6);
- usb_write8(adapt, REG_TBTT_PROHIBIT+1, 0xff);
+ usb_write8(adapt, REG_TBTT_PROHIBIT + 1, 0xff);
haldata->RegReg542 |= BIT(0);
- usb_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+ usb_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
}
static void StopTxBeacon(struct adapter *adapt)
@@ -1117,11 +1117,11 @@ static void StopTxBeacon(struct adapter *adapt)
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
- usb_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) & (~BIT(6)));
+ usb_write8(adapt, REG_FWHW_TXQ_CTRL + 2, (haldata->RegFwHwTxQCtrl) & (~BIT(6)));
haldata->RegFwHwTxQCtrl &= (~BIT(6));
- usb_write8(adapt, REG_TBTT_PROHIBIT+1, 0x64);
+ usb_write8(adapt, REG_TBTT_PROHIBIT + 1, 0x64);
haldata->RegReg542 &= ~(BIT(0));
- usb_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+ usb_write8(adapt, REG_TBTT_PROHIBIT + 2, haldata->RegReg542);
/* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
}
@@ -1135,7 +1135,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4));
/* set net_type */
- val8 = usb_read8(Adapter, MSR)&0x0c;
+ val8 = usb_read8(Adapter, MSR) & 0x0c;
val8 |= mode;
usb_write8(Adapter, MSR, val8);
@@ -1176,7 +1176,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
/* enable BCN0 Function for if1 */
/* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
- usb_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP|EN_BCN_FUNCTION | BIT(1)));
+ usb_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP | EN_BCN_FUNCTION | BIT(1)));
/* dis BCN1 ATIM WND if if2 is station */
usb_write8(Adapter, REG_BCN_CTRL_1, usb_read8(Adapter, REG_BCN_CTRL_1) | BIT(0));
@@ -1191,7 +1191,7 @@ static void hw_var_set_macaddr(struct adapter *Adapter, u8 variable, u8 *val)
reg_macid = REG_MACID;
for (idx = 0; idx < 6; idx++)
- usb_write8(Adapter, (reg_macid+idx), val[idx]);
+ usb_write8(Adapter, (reg_macid + idx), val[idx]);
}
static void hw_var_set_bssid(struct adapter *Adapter, u8 variable, u8 *val)
@@ -1202,7 +1202,7 @@ static void hw_var_set_bssid(struct adapter *Adapter, u8 variable, u8 *val)
reg_bssid = REG_BSSID;
for (idx = 0; idx < 6; idx++)
- usb_write8(Adapter, (reg_bssid+idx), val[idx]);
+ usb_write8(Adapter, (reg_bssid + idx), val[idx]);
}
static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
@@ -1214,7 +1214,7 @@ static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
if (*((u8 *)val))
usb_write8(Adapter, bcn_ctrl_reg, (EN_BCN_FUNCTION | EN_TXBCN_RPT));
else
- usb_write8(Adapter, bcn_ctrl_reg, usb_read8(Adapter, bcn_ctrl_reg)&(~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
+ usb_write8(Adapter, bcn_ctrl_reg, usb_read8(Adapter, bcn_ctrl_reg) & (~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
}
void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
@@ -1228,7 +1228,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
{
u8 val8;
- val8 = usb_read8(Adapter, MSR)&0x0c;
+ val8 = usb_read8(Adapter, MSR) & 0x0c;
val8 |= *((u8 *)val);
usb_write8(Adapter, MSR, val8);
}
@@ -1274,8 +1274,8 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
BrateCfg |= 0x01; /* default enable 1M ACK rate */
/* Set RRSR rate table. */
usb_write8(Adapter, REG_RRSR, BrateCfg & 0xff);
- usb_write8(Adapter, REG_RRSR+1, (BrateCfg >> 8) & 0xff);
- usb_write8(Adapter, REG_RRSR+2, usb_read8(Adapter, REG_RRSR+2)&0xf0);
+ usb_write8(Adapter, REG_RRSR + 1, (BrateCfg >> 8) & 0xff);
+ usb_write8(Adapter, REG_RRSR + 2, usb_read8(Adapter, REG_RRSR + 2) & 0xf0);
/* Set RTS initial rate */
while (BrateCfg > 0x1) {
@@ -1298,27 +1298,27 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
+ tsf = pmlmeext->TSFValue - do_div(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval * 1024)) - 1024; /* us */
- if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
StopTxBeacon(Adapter);
/* disable related TSF function */
- usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL)&(~BIT(3)));
+ usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) & (~BIT(3)));
usb_write32(Adapter, REG_TSFTR, tsf);
- usb_write32(Adapter, REG_TSFTR+4, tsf>>32);
+ usb_write32(Adapter, REG_TSFTR + 4, tsf >> 32);
/* enable related TSF function */
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(3));
- if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE))
ResumeTxBeacon(Adapter);
}
break;
case HW_VAR_CHECK_BSSID:
if (*((u8 *)val)) {
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
} else {
u32 val32;
@@ -1357,19 +1357,19 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if ((is_client_associated_to_ap(Adapter)) ||
- ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
+ ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE)) {
/* enable to rx data frame */
usb_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
/* enable update TSF */
- usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
- } else if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
+ } else if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
usb_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
/* enable update TSF */
- usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
}
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
+ usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR) | RCR_CBSSID_BCN);
}
break;
case HW_VAR_MLME_JOIN:
@@ -1382,7 +1382,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
/* enable to rx data frame.Accept all data frame */
usb_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
- usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ usb_write32(Adapter, REG_RCR, usb_read32(Adapter, REG_RCR) | RCR_CBSSID_DATA | RCR_CBSSID_BCN);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
RetryLimit = (haldata->CustomerID == RT_CID_CCX) ? 7 : 48;
@@ -1394,9 +1394,9 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
} else if (type == 2) {
/* sta add event call back */
/* enable update TSF */
- usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) & (~BIT(4)));
- if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE))
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))
RetryLimit = 0x7;
}
usb_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
@@ -1432,21 +1432,21 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_RESP_SIFS:
/* RESP_SIFS for CCK */
usb_write8(Adapter, REG_R2T_SIFS, val[0]); /* SIFS_T2T_CCK (0x08) */
- usb_write8(Adapter, REG_R2T_SIFS+1, val[1]); /* SIFS_R2T_CCK(0x08) */
+ usb_write8(Adapter, REG_R2T_SIFS + 1, val[1]); /* SIFS_R2T_CCK(0x08) */
/* RESP_SIFS for OFDM */
usb_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
- usb_write8(Adapter, REG_T2T_SIFS+1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
+ usb_write8(Adapter, REG_T2T_SIFS + 1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
break;
case HW_VAR_ACK_PREAMBLE:
{
u8 regTmp;
u8 bShortPreamble = *((bool *)val);
/* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
- regTmp = (haldata->nCur40MhzPrimeSC)<<5;
+ regTmp = (haldata->nCur40MhzPrimeSC) << 5;
if (bShortPreamble)
regTmp |= 0x80;
- usb_write8(Adapter, REG_RRSR+2, regTmp);
+ usb_write8(Adapter, REG_RRSR + 2, regTmp);
}
break;
case HW_VAR_SEC_CFG:
@@ -1480,12 +1480,13 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
/* filled id in CAM config 2 byte */
if (i == 0)
- ulContent |= (ucIndex & 0x03) | ((u16)(ulEncAlgo)<<2);
+ ulContent |= (ucIndex & 0x03) | ((u16)(ulEncAlgo) << 2);
else
ulContent = 0;
/* polling bit, and No Write enable, and address */
- ulCommand = CAM_CONTENT_COUNT*ucIndex+i;
- ulCommand = ulCommand | CAM_POLLINIG|CAM_WRITE;
+ ulCommand = CAM_CONTENT_COUNT * ucIndex + i;
+ ulCommand = ulCommand | CAM_POLLINIG |
+ CAM_WRITE;
/* write content 0 is equall to mark invalid */
usb_write32(Adapter, WCAMI, ulContent); /* delay_ms(40); */
usb_write32(Adapter, RWCAM, ulCommand); /* delay_ms(40); */
@@ -1589,13 +1590,13 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
FactorToSet = 0xf;
for (index = 0; index < 4; index++) {
- if ((pRegToSet[index] & 0xf0) > (FactorToSet<<4))
- pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet<<4);
+ if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4))
+ pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4);
if ((pRegToSet[index] & 0x0f) > FactorToSet)
pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
- usb_write8(Adapter, (REG_AGGLEN_LMT+index), pRegToSet[index]);
+ usb_write8(Adapter, (REG_AGGLEN_LMT + index), pRegToSet[index]);
}
}
}
@@ -1681,9 +1682,9 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
if (!pwrpriv->bkeepfwalive) {
/* RX DMA stop */
- usb_write32(Adapter, REG_RXPKT_NUM, (usb_read32(Adapter, REG_RXPKT_NUM)|RW_RELEASE_EN));
+ usb_write32(Adapter, REG_RXPKT_NUM, (usb_read32(Adapter, REG_RXPKT_NUM) | RW_RELEASE_EN));
do {
- if (!(usb_read32(Adapter, REG_RXPKT_NUM)&RXDMA_IDLE))
+ if (!(usb_read32(Adapter, REG_RXPKT_NUM) & RXDMA_IDLE))
break;
} while (trycnt--);
if (trycnt == 0)
@@ -1706,8 +1707,8 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
{
u8 maxMacid = *val;
- DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid+1);
- usb_write8(Adapter, REG_TX_RPT_CTRL+1, maxMacid+1);
+ DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid + 1);
+ usb_write8(Adapter, REG_TX_RPT_CTRL + 1, maxMacid + 1);
}
break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
@@ -1715,7 +1716,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
break;
case HW_VAR_BCN_VALID:
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
- usb_write8(Adapter, REG_TDECTRL+2, usb_read8(Adapter, REG_TDECTRL+2) | BIT(0));
+ usb_write8(Adapter, REG_TDECTRL + 2, usb_read8(Adapter, REG_TDECTRL + 2) | BIT(0));
break;
default:
break;
@@ -1733,7 +1734,7 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
break;
case HW_VAR_BCN_VALID:
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
- val[0] = (BIT(0) & usb_read8(Adapter, REG_TDECTRL+2)) ? true : false;
+ val[0] = (BIT(0) & usb_read8(Adapter, REG_TDECTRL + 2)) ? true : false;
break;
case HW_VAR_FWLPS_RF_ON:
{
@@ -1764,7 +1765,7 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
*val = Adapter->HalData->bMacPwrCtrlOn;
break;
case HW_VAR_CHK_HI_QUEUE_EMPTY:
- *val = ((usb_read32(Adapter, REG_HGQ_INFORMATION)&0x0000ff00) == 0) ? true : false;
+ *val = ((usb_read32(Adapter, REG_HGQ_INFORMATION) & 0x0000ff00) == 0) ? true : false;
break;
default:
break;
@@ -1888,7 +1889,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
psta = pmlmeinfo->FW_sta_info[mac_id].psta;
- if (psta == NULL)
+ if (!psta)
return;
switch (mac_id) {
case 0:/* for infra mode */
@@ -1925,7 +1926,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
mask &= rate_bitmap;
- init_rate = get_highest_rate_idx(mask)&0x3f;
+ init_rate = get_highest_rate_idx(mask) & 0x3f;
ODM_RA_UpdateRateInfo_8188E(odmpriv, mac_id, raid, mask, shortGIrate);
@@ -1934,7 +1935,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
psta->init_rate = init_rate;
}
-void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
+void beacon_timing_control(struct adapter *adapt)
{
u32 value32;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
diff --git a/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h b/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h
index 53afcea21c96..bd915a1f2511 100644
--- a/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h
+++ b/drivers/staging/rtl8188eu/include/hal8188e_phy_reg.h
@@ -16,55 +16,10 @@
/* 5. Other definition for BB/RF R/W */
/* */
-
-/* */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 1. Page1(0x100) */
-/* */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
-/* 2. Page2(0x200) */
-/* The following two definition are only used for USB interface. */
-#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB r/w cmd address. */
-#define RF_BB_CMD_DATA 0x02c4 /* RF/BB r/w cmd data. */
-
/* 3. Page8(0x800) */
#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting */
-
-#define rFPGA0_TxInfo 0x804 /* Status report?? */
-#define rFPGA0_PSDFunction 0x808
-
#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
-#define rFPGA0_RFTiming1 0x810 /* Useless now */
-#define rFPGA0_RFTiming2 0x814
-
#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
#define rFPGA0_XA_HSSIParameter2 0x824
#define rFPGA0_XB_HSSIParameter1 0x828
@@ -73,9 +28,6 @@
#define rFPGA0_XA_LSSIParameter 0x840
#define rFPGA0_XB_LSSIParameter 0x844
-#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
-#define rFPGA0_RFSleepUpParameter 0x854
-
#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
#define rFPGA0_XCD_SwitchControl 0x85c
@@ -86,181 +38,63 @@
#define rFPGA0_XCD_RFInterfaceSW 0x874
#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
-#define rFPGA0_XCD_RFParameter 0x87c
-
-/* Crystal cap setting RF-R/W protection for parameter4?? */
-#define rFPGA0_AnalogParameter1 0x880
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888
-/* enable ad/da clock1 for dual-phy */
-#define rFPGA0_AdDaClockEn 0x888
-#define rFPGA0_AnalogParameter4 0x88c
#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-#define rFPGA0_PSDReport 0x8b4 /* Useless now */
-/* Transceiver A HSPI Readback */
#define TransceiverA_HSPI_Readback 0x8b8
-/* Transceiver B HSPI Readback */
#define TransceiverB_HSPI_Readback 0x8bc
-/* Useless now RF Interface Readback Value */
#define rFPGA0_XAB_RFInterfaceRB 0x8e0
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
/* 4. Page9(0x900) */
/* RF mode & OFDM TxSC RF BW Setting?? */
#define rFPGA1_RFMOD 0x900
-#define rFPGA1_TxBlock 0x904 /* Useless now */
-#define rFPGA1_DebugSelect 0x908 /* Useless now */
-#define rFPGA1_TxInfo 0x90c /* Useless now Status report */
-
/* 5. PageA(0xA00) */
/* Set Control channel to upper or lower - required only for 40MHz */
#define rCCK0_System 0xa00
-/* Disable init gain now Select RX path by RSSI */
-#define rCCK0_AFESetting 0xa04
-/* Disable init gain now Init gain */
-#define rCCK0_CCA 0xa08
-
-/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
- * RX LNA Threshold useless now. Not the same as 90 series
- */
-#define rCCK0_RxAGC1 0xa0c
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-
-#define rCCK0_RxHP 0xa14
-
-/* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter1 0xa18
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
-
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
-
/* */
/* PageB(0xB00) */
/* */
-#define rPdp_AntA 0xb00
-#define rPdp_AntA_4 0xb04
-#define rConfig_Pmpd_AntA 0xb28
#define rConfig_AntA 0xb68
#define rConfig_AntB 0xb6c
-#define rPdp_AntB 0xb70
-#define rPdp_AntB_4 0xb74
-#define rConfig_Pmpd_AntB 0xb98
-#define rAPK 0xbd8
/* */
/* 6. PageC(0xC00) */
/* */
-#define rOFDM0_LSTF 0xc00
-
#define rOFDM0_TRxPathEnable 0xc04
#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
/* RxIQ DC offset, Rx digital filter, DC notch filter */
#define rOFDM0_XARxAFE 0xc10
#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
#define rOFDM0_XBRxAFE 0xc18
#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-
-#define rOFDM0_RxDetector1 0xc30 /*PD,BW & SBD DM tune init gain*/
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
#define rOFDM0_XAAGCCore2 0xc54
#define rOFDM0_XBAGCCore1 0xc58
#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
#define rOFDM0_XATxAFE 0xc84
#define rOFDM0_XBTxIQImbalance 0xc88
#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
#define rOFDM0_XDTxAFE 0xc9c
#define rOFDM0_RxIQExtAnta 0xca0
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-
/* */
/* 7. PageD(0xD00) */
/* */
#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-#define rOFDM1_CFO 0xd08 /* No setting now */
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-
-#define rOFDM_ShortCFOAB 0xdac /* No setting now */
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
-
/* */
/* 8. PageE(0xE00) */
@@ -292,10 +126,6 @@
#define rRx_IQK 0xe44
#define rIQK_AGC_Pts 0xe48
#define rIQK_AGC_Rsp 0xe4c
-#define rTx_IQK_Tone_B 0xe50
-#define rRx_IQK_Tone_B 0xe54
-#define rTx_IQK_PI_B 0xe58
-#define rRx_IQK_PI_B 0xe5c
#define rIQK_AGC_Cont 0xe60
#define rBlue_Tooth 0xe6c
@@ -311,17 +141,13 @@
#define rTx_Power_Before_IQK_A 0xe94
#define rTx_Power_After_IQK_A 0xe9c
-#define rRx_Power_Before_IQK_A 0xea0
#define rRx_Power_Before_IQK_A_2 0xea4
-#define rRx_Power_After_IQK_A 0xea8
#define rRx_Power_After_IQK_A_2 0xeac
#define rTx_Power_Before_IQK_B 0xeb4
#define rTx_Power_After_IQK_B 0xebc
-#define rRx_Power_Before_IQK_B 0xec0
#define rRx_Power_Before_IQK_B_2 0xec4
-#define rRx_Power_After_IQK_B 0xec8
#define rRx_Power_After_IQK_B_2 0xecc
#define rRx_OFDM 0xed0
@@ -332,751 +158,44 @@
#define rPMPD_ANAEN 0xeec
/* */
-/* 7. RF Register 0x00-0x2E (RF 8256) */
-/* RF-0222D 0x00-3F */
-/* */
-/* Zebra1 */
-#define rZebra1_HSSIEnable 0x0 /* Useless now */
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-#define rZebra1_Channel 0x7 /* RF channel switch */
-
-/* endif */
-#define rZebra1_TxGain 0x8 /* Useless now */
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
-
-/* Zebra4 */
-#define rGlobalCtrl 0 /* Useless now */
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11 /* Useless now */
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
-
-/* */
/* RL6052 Register definition */
/* */
#define RF_AC 0x00 /* */
-
-#define RF_IQADJ_G1 0x01 /* */
-#define RF_IQADJ_G2 0x02 /* */
-
-#define RF_POW_TRSW 0x05 /* */
-
-#define RF_GAIN_RX 0x06 /* */
-#define RF_GAIN_TX 0x07 /* */
-
-#define RF_TXM_IDAC 0x08 /* */
-#define RF_IPA_G 0x09 /* */
-#define RF_TXBIAS_G 0x0A
-#define RF_TXPA_AG 0x0B
-#define RF_IPA_A 0x0C /* */
-#define RF_TXBIAS_A 0x0D
-#define RF_BS_PA_APSET_G9_G11 0x0E
-#define RF_BS_IQGEN 0x0F /* */
-
-#define RF_MODE1 0x10 /* */
-#define RF_MODE2 0x11 /* */
-
-#define RF_RX_AGC_HP 0x12 /* */
-#define RF_TX_AGC 0x13 /* */
-#define RF_BIAS 0x14 /* */
-#define RF_IPA 0x15 /* */
-#define RF_TXBIAS 0x16
-#define RF_POW_ABILITY 0x17 /* */
#define RF_CHNLBW 0x18 /* RF channel and BW switch */
-#define RF_TOP 0x19 /* */
-
-#define RF_RX_G1 0x1A /* */
-#define RF_RX_G2 0x1B /* */
-
-#define RF_RX_BB2 0x1C /* */
-#define RF_RX_BB1 0x1D /* */
-
-#define RF_RCK1 0x1E /* */
-#define RF_RCK2 0x1F /* */
-
-#define RF_TX_G1 0x20 /* */
-#define RF_TX_G2 0x21 /* */
-#define RF_TX_G3 0x22 /* */
-
-#define RF_TX_BB1 0x23 /* */
-
-#define RF_T_METER_92D 0x42 /* */
#define RF_T_METER_88E 0x42 /* */
-#define RF_T_METER 0x24 /* */
-
-#define RF_SYN_G1 0x25 /* RF TX Power control */
-#define RF_SYN_G2 0x26 /* RF TX Power control */
-#define RF_SYN_G3 0x27 /* RF TX Power control */
-#define RF_SYN_G4 0x28 /* RF TX Power control */
-#define RF_SYN_G5 0x29 /* RF TX Power control */
-#define RF_SYN_G6 0x2A /* RF TX Power control */
-#define RF_SYN_G7 0x2B /* RF TX Power control */
-#define RF_SYN_G8 0x2C /* RF TX Power control */
-
#define RF_RCK_OS 0x30 /* RF TX PA control */
#define RF_TXPA_G1 0x31 /* RF TX PA control */
#define RF_TXPA_G2 0x32 /* RF TX PA control */
-#define RF_TXPA_G3 0x33 /* RF TX PA control */
-#define RF_TX_BIAS_A 0x35
-#define RF_TX_BIAS_D 0x36
-#define RF_LOBF_9 0x38
-#define RF_RXRF_A3 0x3C /* */
-#define RF_TRSW 0x3F
-
-#define RF_TXRF_A2 0x41
-#define RF_TXPA_G4 0x46
-#define RF_TXPA_A4 0x4B
-#define RF_0x52 0x52
#define RF_WE_LUT 0xEF
-
/* */
/* Bit Mask */
/* */
-/* 1. Page1(0x100) */
-#define bBBResetB 0x100 /* Useless now? */
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-#define IS_BB_REG_OFFSET_92S(_Offset) \
- ((_Offset >= 0x800) && (_Offset <= 0xfff))
/* 2. Page8(0x800) */
#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
#define bCCKEn 0x1000000
#define bOFDMEn 0x2000000
-#define bOFDMRxADCPhase 0x10000 /* Useless now */
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-
-#define bAntennaSelect 0x0300
-
-#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-
-#define bPAStart 0xf0000000 /* Useless now */
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* change gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-
-/* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
-#define b3WireDataLength 0x800
-#define b3WireAddressLength 0x400
-
-#define b3WireRFPowerDown 0x1 /* Useless now */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf
-
-#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
-
-#define bRFSI_TRSW 0x20 /* Useless now */
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-
#define bLSSIReadAddress 0x7f800000 /* T65 RF */
-
#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-
#define bLSSIReadBackData 0xfffff /* T65 RF */
-#define bLSSIReadOKFlag 0x1000 /* Useless now */
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-
-/* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
-#define bADClkPhase 0x4000000
-
-#define b80MClkDelay 0x18000000 /* Useless */
-#define bAFEWatchDogEnable 0x20000000
-
-/* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
-#define bXtalCap01 0xc0000000
-#define bXtalCap23 0x3
-#define bXtalCap92x 0x0f000000
-#define bXtalCap 0x0f000000
-
-#define bIntDifClkEnable 0x400 /* Useless */
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-#define bCCKRxAGCFormat 0x200
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* 3. Page9(0x900) */
-#define bOFDMTxSC 0x30000000 /* Useless */
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and HWord, LWord */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
-
-/* 4. PageA(0xA00) */
-#define bCCKBBMode 0x3 /* Useless */
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-
#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 */
-#define bCCKScramble 0x8 /* Useless */
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* 5. PageC(0xC00) */
-#define bNumOfSTF 0x3 /* Useless */
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* threshold for high power */
-#define bRSSI_Gen 0x7f000000 /* threshold for ant diversity */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-#define bDAFormat 0x40000
-#define bTxChEmuEnable 0x01000000
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-#define bExtLNAGain 0x7c00
-
-/* 6. PageE(0xE00) */
-#define bSTBCEn 0x4 /* Useless */
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf /* Useless */
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3 /* Useless */
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1 /* Useless */
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000 /* Useless */
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
-
-/* 7. RF Register */
-/* Zebra1 */
-#define bZebra1_HSSIEnable 0x8 /* Useless */
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
-#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100 /* Useless */
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
-
-/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc /* Useless */
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-
/* */
/* Other Definition */
/* */
-/* byte endable for sb_write */
-#define bByte0 0x1 /* Useless */
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
/* for PutRegsetting & GetRegSetting BitMask */
#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
#define bMaskDWord 0xffffffff
#define bMask12Bits 0xfff
-#define bMaskH4Bits 0xf0000000
#define bMaskOFDM_D 0xffc00000
-#define bMaskCCK 0x3f3f3f3f
/* for PutRFRegsetting & GetRFRegSetting BitMask */
#define bRFRegOffsetMask 0xfffff
-#define bEnable 0x1 /* Useless */
-#define bDisable 0x0
-
-#define LeftAntenna 0x0 /* Useless */
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms Useless */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0 /* Useless */
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff /* Useless */
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-#define bPMACControl 0x0 /* Useless */
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define PathA 0x0 /* Useless */
-#define PathB 0x1
-#define PathC 0x2
-#define PathD 0x3
-
-/*--------------------------Define Parameters-------------------------------*/
-
-
#endif
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 8b65fcba1967..516a89647003 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -199,7 +199,7 @@ void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level);
void rtw_hal_clone_data(struct adapter *dst_adapt,
struct adapter *src_adapt);
-void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+void beacon_timing_control(struct adapter *padapter);
u32 rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
u32 RegAddr, u32 BitMask);
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index eda16c06336a..8e919441c2aa 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -13,7 +13,6 @@
void rtw_init_mlme_timer(struct adapter *padapter);
void rtw_os_indicate_disconnect(struct adapter *adapter);
void rtw_os_indicate_connect(struct adapter *adapter);
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
void rtw_reset_securitypriv(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index cfe5698fbbb1..c0114ad79788 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -80,8 +80,6 @@ void rtw_free_netdev(struct net_device *netdev);
#define FUNC_ADPT_FMT "%s(%s)"
#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
-u64 rtw_modular64(u64 x, u64 y);
-
/* Macros for handling unaligned memory accesses */
#define RTW_GET_BE24(a) ((((u32)(a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 9abb7c320192..010f0c42368a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -333,7 +333,7 @@ void rtw_dynamic_check_timer_handlder(struct timer_list *t);
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
int rtw_if_up(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 327f7d1bc20c..d70780c8fd62 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -535,7 +535,6 @@ void report_del_sta_event(struct adapter *padapter,
void report_add_sta_event(struct adapter *padapter, unsigned char *addr,
int cam_idx);
-void beacon_timing_control(struct adapter *padapter);
u8 set_tx_beacon_cmd(struct adapter *padapter);
unsigned int setup_beacon_frame(struct adapter *padapter,
unsigned char *beacon_frame);
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index e660bd4d91ef..321b2c46479c 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -27,11 +27,6 @@ void rtw_os_indicate_connect(struct adapter *adapter)
netif_carrier_on(adapter->pnetdev);
}
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
-{
- indicate_wx_scan_complete_event(padapter);
-}
-
static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
void rtw_reset_securitypriv(struct adapter *adapter)
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 105f3f21bdea..69d4b1d66b6f 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -59,11 +59,6 @@ RETURN:
return;
}
-u64 rtw_modular64(u64 x, u64 y)
-{
- return do_div(x, y);
-}
-
void rtw_buf_free(u8 **buf, u32 *buf_len)
{
*buf_len = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index eedf2cd831d1..aaab0d577453 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -122,8 +122,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
precvframe->pkt = pkt_copy;
skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
- memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
- skb_put(precvframe->pkt, skb_len);
+ skb_put_data(pkt_copy, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
} else {
DBG_88E("%s: alloc_skb fail , drop frag frame\n",
__func__);
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 11528d17bb3c..1007eea6c8fc 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -15,6 +15,7 @@ config RTLLIB_CRYPTO_CCMP
tristate "Support for rtllib CCMP crypto"
depends on RTLLIB
select CRYPTO_AES
+ select CRYPTO_CCM
default y
help
CCMP crypto driver for rtllib.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 1b7e3fda7905..20e494186c9e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -618,7 +618,7 @@ static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev,
static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- bool bHighpowerstate, viviflag = false;
+ bool viviflag = false;
struct dcmd_txcmd tx_cmd;
u8 powerlevelOFDM24G;
int i = 0, j = 0, k = 0;
@@ -632,7 +632,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
- bHighpowerstate = priv->bDynamicTxHighPower;
powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24);
RF_Type = priv->rf_type;
@@ -1901,7 +1900,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
u8 cck_default_Rx = 0x2;
u8 cck_optional_Rx = 0x3;
long tmp_cck_max_pwdb = 0, tmp_cck_min_pwdb = 0, tmp_cck_sec_pwdb = 0;
- u8 cck_rx_ver2_max_index = 0, cck_rx_ver2_min_index = 0;
+ u8 cck_rx_ver2_max_index = 0;
u8 cck_rx_ver2_sec_index = 0;
u8 cur_rf_rssi;
long cur_cck_pwdb;
@@ -1984,7 +1983,6 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
if (rf_num == 1) {
cck_rx_ver2_max_index = i;
- cck_rx_ver2_min_index = i;
cck_rx_ver2_sec_index = i;
tmp_cck_max_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
@@ -1997,7 +1995,6 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
tmp_cck_sec_pwdb = cur_cck_pwdb;
tmp_cck_min_pwdb = cur_cck_pwdb;
cck_rx_ver2_sec_index = i;
- cck_rx_ver2_min_index = i;
}
} else {
if (cur_cck_pwdb > tmp_cck_max_pwdb) {
@@ -2027,13 +2024,10 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
(cur_cck_pwdb > tmp_cck_min_pwdb)) {
;
} else if (cur_cck_pwdb == tmp_cck_min_pwdb) {
- if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb) {
+ if (tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
- }
} else if (cur_cck_pwdb < tmp_cck_min_pwdb) {
tmp_cck_min_pwdb = cur_cck_pwdb;
- cck_rx_ver2_min_index = i;
}
}
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 2dd57e88276e..328f410daa03 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -479,7 +479,6 @@ enum wireless_mode {
#define P80211_OUI_LEN 3
struct rtllib_snap_hdr {
-
u8 dsap; /* always 0xAA */
u8 ssap; /* always 0xAA */
u8 ctrl; /* always 0x03 */
@@ -1940,7 +1939,7 @@ int rtllib_encrypt_fragment(
int hdr_len);
int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
-void rtllib_txb_free(struct rtllib_txb *);
+void rtllib_txb_free(struct rtllib_txb *txb);
/* rtllib_rx.c */
int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
@@ -2132,7 +2131,7 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
return escaped;
}
- snprintf(escaped, sizeof(escaped), "%*pEn", essid_len, essid);
+ snprintf(escaped, sizeof(escaped), "%*pE", essid_len, essid);
return escaped;
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 2581ed6d14fa..0cbf4a1a326b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -17,6 +17,7 @@
#include "rtllib.h"
#include <linux/crypto.h>
+#include <crypto/aead.h>
#include <linux/scatterlist.h>
@@ -39,20 +40,13 @@ struct rtllib_ccmp_data {
int key_idx;
- struct crypto_tfm *tfm;
+ struct crypto_aead *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
- tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
- u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+ u8 tx_aad[2 * AES_BLOCK_LEN];
+ u8 rx_aad[2 * AES_BLOCK_LEN];
};
-static void rtllib_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
-{
- crypto_cipher_encrypt_one((void *)tfm, ct, pt);
-}
-
static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
@@ -62,7 +56,7 @@ static void *rtllib_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
+ priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
pr_debug("Could not allocate crypto API aes\n");
priv->tfm = NULL;
@@ -73,7 +67,7 @@ static void *rtllib_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
- crypto_free_cipher((void *)priv->tfm);
+ crypto_free_aead(priv->tfm);
kfree(priv);
}
@@ -86,31 +80,18 @@ static void rtllib_ccmp_deinit(void *priv)
struct rtllib_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
- crypto_free_cipher((void *)_priv->tfm);
+ crypto_free_aead(_priv->tfm);
kfree(priv);
}
-static inline void xor_block(u8 *b, u8 *a, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- b[i] ^= a[i];
-}
-
-
-
-static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct rtllib_hdr_4addr *hdr,
- u8 *pn, size_t dlen, u8 *b0, u8 *auth,
- u8 *s0)
+static int ccmp_init_iv_and_aad(struct rtllib_hdr_4addr *hdr,
+ u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
- u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) ==
@@ -128,18 +109,19 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc = *pos & 0x0f;
aad_len += 2;
}
- /* CCM Initial Block:
- * Flag (Include authentication header, M=3 (8-octet MIC),
- * L=1 (2-octet Dlen))
- * Nonce: 0x00 | A2 | PN
- * Dlen
+ /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+ * mode authentication are not allowed to collide, yet both are derived
+ * from the same vector. We only set L := 1 here to indicate that the
+ * data size can be represented in (L+1) bytes. The CCM layer will take
+ * care of storing the data length in the top (L+1) bytes and setting
+ * and clearing the other bits as is required to derive the two IVs.
*/
- b0[0] = 0x59;
- b0[1] = qc;
- memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
- memcpy(b0 + 8, pn, CCMP_PN_LEN);
- b0[14] = (dlen >> 8) & 0xff;
- b0[15] = dlen & 0xff;
+ iv[0] = 0x1;
+
+ /* Nonce: QC | A2 | PN */
+ iv[1] = qc;
+ memcpy(iv + 2, hdr->addr2, ETH_ALEN);
+ memcpy(iv + 8, pn, CCMP_PN_LEN);
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
@@ -149,31 +131,21 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
* QC (if present)
*/
pos = (u8 *) hdr;
- aad[0] = 0; /* aad_len >> 8 */
- aad[1] = aad_len & 0xff;
- aad[2] = pos[0] & 0x8f;
- aad[3] = pos[1] & 0xc7;
- memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+ aad[0] = pos[0] & 0x8f;
+ aad[1] = pos[1] & 0xc7;
+ memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *) &hdr->seq_ctl;
- aad[22] = pos[0] & 0x0f;
- aad[23] = 0; /* all bits masked */
- memset(aad + 24, 0, 8);
+ aad[20] = pos[0] & 0x0f;
+ aad[21] = 0; /* all bits masked */
+ memset(aad + 22, 0, 8);
if (a4_included)
- memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+ memcpy(aad + 22, hdr->addr4, ETH_ALEN);
if (qc_included) {
- aad[a4_included ? 30 : 24] = qc;
+ aad[a4_included ? 28 : 22] = qc;
/* rest of QC masked */
}
- /* Start with the first block and AAD */
- rtllib_ccmp_aes_encrypt(tfm, b0, auth);
- xor_block(auth, aad, AES_BLOCK_LEN);
- rtllib_ccmp_aes_encrypt(tfm, auth, auth);
- xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
- rtllib_ccmp_aes_encrypt(tfm, auth, auth);
- b0[0] &= 0x07;
- b0[14] = b0[15] = 0;
- rtllib_ccmp_aes_encrypt(tfm, b0, s0);
+ return aad_len;
}
@@ -181,7 +153,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct rtllib_ccmp_data *key = priv;
- int data_len, i;
+ int i;
u8 *pos;
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
@@ -191,7 +163,6 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
@@ -213,40 +184,37 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
-
hdr = (struct rtllib_hdr_4addr *) skb->data;
if (!tcb_desc->bHwSec) {
- int blocks, last, len;
- u8 *mic;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
-
- mic = skb_put(skb, CCMP_MIC_LEN);
-
- ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len,
- b0, b, s0);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Authentication */
- xor_block(b, pos, len);
- rtllib_ccmp_aes_encrypt(key->tfm, b, b);
- /* Encryption, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- rtllib_ccmp_aes_encrypt(key->tfm, b0, e);
- xor_block(pos, e, len);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->tx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ int data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
+
+ skb_put(skb, CCMP_MIC_LEN);
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
+ data_len + CCMP_MIC_LEN);
- for (i = 0; i < CCMP_MIC_LEN; i++)
- mic[i] = b[i] ^ s0[i];
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_encrypt(req);
+ aead_request_free(req);
+
+ return ret;
}
+
return 0;
}
@@ -302,35 +270,31 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -4;
}
if (!tcb_desc->bHwSec) {
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN -
- CCMP_MIC_LEN;
- u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
- u8 *b0 = key->rx_b0;
- u8 *b = key->rx_b;
- u8 *a = key->rx_a;
- int i, blocks, last, len;
-
-
- ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
- xor_block(mic, b, CCMP_MIC_LEN);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Decrypt, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- rtllib_ccmp_aes_encrypt(key->tfm, b0, b);
- xor_block(pos, b, len);
- /* Authentication */
- xor_block(a, pos, len);
- rtllib_ccmp_aes_encrypt(key->tfm, a, a);
- pos += len;
- }
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->rx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if(!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], pos, data_len);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_decrypt(req);
+ aead_request_free(req);
- if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+ if (ret) {
if (net_ratelimit()) {
pr_debug("CCMP: decrypt failed: STA= %pM\n",
hdr->addr2);
@@ -354,7 +318,7 @@ static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_ccmp_data *data = priv;
int keyidx;
- struct crypto_tfm *tfm = data->tfm;
+ struct crypto_aead *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
@@ -371,7 +335,9 @@ static int rtllib_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
- crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
+ if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) ||
+ crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN))
+ return -1;
} else if (len == 0) {
data->key_set = 0;
} else {
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 0c19ac2bc3bf..0bae0a0a4cbe 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1300,7 +1300,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
struct rx_ts_record *pTS = NULL;
u16 fc, sc, SeqNum = 0;
u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0;
- u8 *payload;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
u8 bssid[ETH_ALEN] = {0};
@@ -1412,7 +1411,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Parse rx data frame (For AMSDU) */
/* skb: hdr + (possible reassembled) full plaintext payload */
- payload = skb->data + hdrlen;
rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
if (!rxb)
goto rx_dropped;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index e29e8d6f4611..f2f7529e7c80 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1382,15 +1382,10 @@ rtllib_association_req(struct rtllib_network *beacon,
ieee->assocreq_ies = NULL;
ies = &(hdr->info_element[0].id);
ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
- ieee->assocreq_ies = kmalloc(ieee->assocreq_ies_len, GFP_ATOMIC);
- if (ieee->assocreq_ies)
- memcpy(ieee->assocreq_ies, ies, ieee->assocreq_ies_len);
- else {
- netdev_info(ieee->dev,
- "%s()Warning: can't alloc memory for assocreq_ies\n",
- __func__);
+ ieee->assocreq_ies = kmemdup(ies, ieee->assocreq_ies_len, GFP_ATOMIC);
+ if (!ieee->assocreq_ies)
ieee->assocreq_ies_len = 0;
- }
+
return skb;
}
@@ -2259,17 +2254,12 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->assocresp_ies = NULL;
ies = &(assoc_resp->info_element[0].id);
ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
- ieee->assocresp_ies = kmalloc(ieee->assocresp_ies_len,
+ ieee->assocresp_ies = kmemdup(ies,
+ ieee->assocresp_ies_len,
GFP_ATOMIC);
- if (ieee->assocresp_ies)
- memcpy(ieee->assocresp_ies, ies,
- ieee->assocresp_ies_len);
- else {
- netdev_info(ieee->dev,
- "%s()Warning: can't alloc memory for assocresp_ies\n",
- __func__);
+ if (!ieee->assocresp_ies)
ieee->assocresp_ies_len = 0;
- }
+
rtllib_associate_complete(ieee);
} else {
/* aid could not been allocated */
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 22c2165e8b1c..1edca5c304fb 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -6,3 +6,5 @@ config RTL8192U
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CCM
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 130ddfe9868f..bc642076b96f 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -12,7 +12,7 @@ void rtl8192u_dot11d_init(struct ieee80211_device *ieee)
dot11d_info->state = DOT11D_STATE_NONE;
dot11d_info->country_ie_len = 0;
memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
RESET_CIE_WATCHDOG(ieee);
netdev_info(ieee->dev, "rtl8192u_dot11d_init()\n");
@@ -25,8 +25,8 @@ void dot11d_reset(struct ieee80211_device *ieee)
u32 i;
struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
/* Clear old channel map */
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
/* Set new channel map */
for (i = 1; i <= 11; i++)
(dot11d_info->channel_map)[i] = 1;
@@ -56,8 +56,8 @@ void dot11d_update_country_ie(struct ieee80211_device *dev, u8 *pTaddr,
u8 i, j, NumTriples, MaxChnlNum;
struct chnl_txpower_triple *pTriple;
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER+1);
- memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER+1);
+ memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
+ memset(dot11d_info->max_tx_pwr_dbm_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
pTriple = (struct chnl_txpower_triple *)(pCoutryIe + 3);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index d36963469015..9576b647f6b1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -169,9 +169,9 @@ struct cb_desc {
#define MGN_MCS14 0x8e
#define MGN_MCS15 0x8f
-#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A || \
+#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A || \
priv->ieee80211->current_network.mode == IEEE_N_24G || \
- priv->ieee80211->current_network.mode == IEEE_N_5G) ? \
+ priv->ieee80211->current_network.mode == IEEE_N_5G) ? \
16 : 10)
#define MGMT_QUEUE_NUM 5
@@ -387,7 +387,7 @@ struct ieee_param {
#define IEEE80211_STYPE_ACK 0x00D0
#define IEEE80211_STYPE_CFEND 0x00E0
#define IEEE80211_STYPE_CFENDACK 0x00F0
-#define IEEE80211_STYPE_BLOCKACK 0x0094
+#define IEEE80211_STYPE_BLOCKACK 0x0094
/* data */
#define IEEE80211_STYPE_DATA 0x0000
@@ -452,23 +452,23 @@ do { if (ieee80211_debug_level & (level)) \
printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
//wb added to debug out data buf
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
-#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do { if ((ieee80211_debug_level & (level)) == (level)) \
- { \
- int i; \
- u8 *pdata = (u8 *) data; \
- printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
- for (i = 0; i < (int)(datalen); i++) \
- { \
- printk("%2x ", pdata[i]); \
- if ((i + 1) % 16 == 0) printk("\n"); \
- } \
- printk("\n"); \
- } \
+#define IEEE80211_DEBUG_DATA(level, data, datalen) \
+ do { if ((ieee80211_debug_level & (level)) == (level)) \
+ { \
+ int i; \
+ u8 *pdata = (u8 *)data; \
+ printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
+ for (i = 0; i < (int)(datalen); i++) { \
+ printk("%2x ", pdata[i]); \
+ if ((i + 1) % 16 == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
} while (0)
#else
#define IEEE80211_DEBUG (level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while(0)
+#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while (0)
#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -1649,10 +1649,8 @@ struct ieee80211_device {
struct list_head Rx_TS_Pending_List;
struct list_head Rx_TS_Unused_List;
struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
-//#ifdef TO_DO_LIST
struct rx_reorder_entry RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
-//#endif
// Qos related. Added by Annie, 2005-11-01.
// PSTA_QOS pStaQos;
u8 ForcedPriority; // Force per-packet priority 1~7. (default: 0, not to force it.)
@@ -2015,8 +2013,8 @@ struct ieee80211_device {
#define IEEE_A (1<<0)
#define IEEE_B (1<<1)
#define IEEE_G (1<<2)
-#define IEEE_N_24G (1<<4)
-#define IEEE_N_5G (1<<5)
+#define IEEE_N_24G (1<<4)
+#define IEEE_N_5G (1<<5)
#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Generate a 802.11 header */
@@ -2426,7 +2424,7 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
return escaped;
}
- snprintf(escaped, sizeof(escaped), "%*pEn", essid_len, essid);
+ snprintf(escaped, sizeof(escaped), "%*pE", essid_len, essid);
return escaped;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 36987fccac5d..01012dddcd73 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -176,7 +176,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
}
-static void *ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
+static void *ieee80211_crypt_null_init(int keyidx) { return (void *)1; }
static void ieee80211_crypt_null_deinit(void *priv) {}
static struct ieee80211_crypto_ops ieee80211_crypt_null = {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index d7188b3f3190..c241cf484023 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -19,6 +19,7 @@
#include "ieee80211.h"
#include <linux/crypto.h>
+#include <crypto/aead.h>
#include <linux/scatterlist.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -44,20 +45,13 @@ struct ieee80211_ccmp_data {
int key_idx;
- struct crypto_tfm *tfm;
+ struct crypto_aead *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
- tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
- u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+ u8 tx_aad[2 * AES_BLOCK_LEN];
+ u8 rx_aad[2 * AES_BLOCK_LEN];
};
-static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
- const u8 pt[16], u8 ct[16])
-{
- crypto_cipher_encrypt_one((void *)tfm, ct, pt);
-}
-
static void *ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
@@ -67,7 +61,7 @@ static void *ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
+ priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
pr_debug("ieee80211_crypt_ccmp: could not allocate crypto API aes\n");
priv->tfm = NULL;
@@ -79,7 +73,7 @@ static void *ieee80211_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
- crypto_free_cipher((void *)priv->tfm);
+ crypto_free_aead(priv->tfm);
kfree(priv);
}
@@ -91,28 +85,17 @@ static void ieee80211_ccmp_deinit(void *priv)
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
- crypto_free_cipher((void *)_priv->tfm);
+ crypto_free_aead(_priv->tfm);
kfree(priv);
}
-static inline void xor_block(u8 *b, u8 *a, size_t len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- b[i] ^= a[i];
-}
-
-static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct rtl_80211_hdr_4addr *hdr,
- u8 *pn, size_t dlen, u8 *b0, u8 *auth,
- u8 *s0)
+static int ccmp_init_iv_and_aad(struct rtl_80211_hdr_4addr *hdr,
+ u8 *pn, u8 *iv, u8 *aad)
{
u8 *pos, qc = 0;
size_t aad_len;
u16 fc;
int a4_included, qc_included;
- u8 aad[2 * AES_BLOCK_LEN];
fc = le16_to_cpu(hdr->frame_ctl);
a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
@@ -133,18 +116,20 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
qc = *pos & 0x0f;
aad_len += 2;
}
- /* CCM Initial Block:
- * Flag (Include authentication header, M=3 (8-octet MIC),
- * L=1 (2-octet Dlen))
- * Nonce: 0x00 | A2 | PN
- * Dlen
+
+ /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+ * mode authentication are not allowed to collide, yet both are derived
+ * from the same vector. We only set L := 1 here to indicate that the
+ * data size can be represented in (L+1) bytes. The CCM layer will take
+ * care of storing the data length in the top (L+1) bytes and setting
+ * and clearing the other bits as is required to derive the two IVs.
*/
- b0[0] = 0x59;
- b0[1] = qc;
- memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
- memcpy(b0 + 8, pn, CCMP_PN_LEN);
- b0[14] = (dlen >> 8) & 0xff;
- b0[15] = dlen & 0xff;
+ iv[0] = 0x1;
+
+ /* Nonce: QC | A2 | PN */
+ iv[1] = qc;
+ memcpy(iv + 2, hdr->addr2, ETH_ALEN);
+ memcpy(iv + 8, pn, CCMP_PN_LEN);
/* AAD:
* FC with bits 4..6 and 11..13 masked to zero; 14 is always one
@@ -154,38 +139,27 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
* QC (if present)
*/
pos = (u8 *)hdr;
- aad[0] = 0; /* aad_len >> 8 */
- aad[1] = aad_len & 0xff;
- aad[2] = pos[0] & 0x8f;
- aad[3] = pos[1] & 0xc7;
- memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+ aad[0] = pos[0] & 0x8f;
+ aad[1] = pos[1] & 0xc7;
+ memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
pos = (u8 *)&hdr->seq_ctl;
- aad[22] = pos[0] & 0x0f;
- aad[23] = 0; /* all bits masked */
- memset(aad + 24, 0, 8);
+ aad[20] = pos[0] & 0x0f;
+ aad[21] = 0; /* all bits masked */
+ memset(aad + 22, 0, 8);
if (a4_included)
- memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+ memcpy(aad + 22, hdr->addr4, ETH_ALEN);
if (qc_included) {
- aad[a4_included ? 30 : 24] = qc;
+ aad[a4_included ? 28 : 22] = qc;
/* rest of QC masked */
}
- /* Start with the first block and AAD */
- ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
- xor_block(auth, aad, AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
- ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
- b0[0] &= 0x07;
- b0[14] = 0;
- b0[15] = 0;
- ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
+ return aad_len;
}
static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
- int data_len, i;
+ int i;
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
@@ -195,7 +169,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
@@ -220,36 +193,34 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
- int blocks, last, len;
- u8 *mic;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
-
- /* mic is moved to here by john */
- mic = skb_put(skb, CCMP_MIC_LEN);
-
- ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Authentication */
- xor_block(b, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
- /* Encryption, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
- xor_block(pos, e, len);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->tx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
- for (i = 0; i < CCMP_MIC_LEN; i++)
- mic[i] = b[i] ^ s0[i];
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad);
+
+ skb_put(skb, CCMP_MIC_LEN);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN,
+ data_len + CCMP_MIC_LEN);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_encrypt(req);
+ aead_request_free(req);
+
+ return ret;
}
return 0;
}
@@ -309,33 +280,31 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -4;
}
if (!tcb_desc->bHwSec) {
- size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
- u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
- u8 *b0 = key->rx_b0;
- u8 *b = key->rx_b;
- u8 *a = key->rx_a;
- int i, blocks, last, len;
-
- ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
- xor_block(mic, b, CCMP_MIC_LEN);
-
- blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last = data_len % AES_BLOCK_LEN;
-
- for (i = 1; i <= blocks; i++) {
- len = (i == blocks && last) ? last : AES_BLOCK_LEN;
- /* Decrypt, with counter */
- b0[14] = (i >> 8) & 0xff;
- b0[15] = i & 0xff;
- ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
- xor_block(pos, b, len);
- /* Authentication */
- xor_block(a, pos, len);
- ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
- pos += len;
- }
+ struct aead_request *req;
+ struct scatterlist sg[2];
+ u8 *aad = key->rx_aad;
+ u8 iv[AES_BLOCK_LEN];
+ int aad_len, ret;
+ size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN;
+
+ req = aead_request_alloc(key->tfm, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad);
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], aad, aad_len);
+ sg_set_buf(&sg[1], pos, data_len);
+
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, aad_len);
+ aead_request_set_crypt(req, sg, sg, data_len, iv);
+
+ ret = crypto_aead_decrypt(req);
+ aead_request_free(req);
- if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+ if (ret) {
if (net_ratelimit()) {
netdev_dbg(skb->dev, "CCMP: decrypt failed: STA=%pM\n",
hdr->addr2);
@@ -358,12 +327,11 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
int keyidx;
- struct crypto_tfm *tfm = data->tfm;
+ struct crypto_aead *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));
data->key_idx = keyidx;
- data->tfm = tfm;
if (len == CCMP_TK_LEN) {
memcpy(data->key, key, CCMP_TK_LEN);
data->key_set = 1;
@@ -375,7 +343,9 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
- crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
+ if (crypto_aead_setauthsize(tfm, CCMP_MIC_LEN) ||
+ crypto_aead_setkey(tfm, data->key, CCMP_TK_LEN))
+ return -1;
} else if (len == 0) {
data->key_set = 0;
} else {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 0927b2b15151..6f4710171151 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -160,7 +160,7 @@ static inline u16 Hi16(u32 val)
static inline u16 Mk16(u8 hi, u8 lo)
{
- return lo | (((u16) hi) << 8);
+ return lo | (((u16)hi) << 8);
}
static const u16 Sbox[256] = {
@@ -238,7 +238,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
* Make temporary area overlap WEP seed so that the final copy can be
* avoided on little endian hosts.
*/
- u16 *PPK = (u16 *) &WEPSeed[4];
+ u16 *PPK = (u16 *)&WEPSeed[4];
/* Step 1 - make copy of TTAK and bring in TSC */
PPK[0] = TTAK[0];
@@ -299,7 +299,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
skb->len < hdr_len)
return -1;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tcb_desc->bHwSec) {
if (!tkey->tx_phase1_done) {
@@ -343,7 +343,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len+4);
+ sg_init_one(&sg, pos, len + 4);
skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -383,7 +383,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & BIT(5))) {
@@ -435,7 +435,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen+4);
+ sg_init_one(&sg, pos, plen + 4);
skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -523,7 +523,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
{
struct rtl_80211_hdr_4addr *hdr11;
- hdr11 = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr11 = (struct rtl_80211_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_TODS:
@@ -556,7 +556,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
printk(KERN_DEBUG "Invalid packet for Michael MIC add "
@@ -599,7 +599,7 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+ wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
@@ -609,7 +609,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
u8 mic[8];
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (!tkey->key_set)
return -1;
@@ -626,7 +626,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtl_80211_hdr_4addr *hdr;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 805493a0870d..26482c3dcd1c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len+4);
+ sg_init_one(&sg, pos, len + 4);
skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -192,7 +192,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen+4);
+ sg_init_one(&sg, pos, plen + 4);
skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 0a3e478fccd6..5c33bcb0db2e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -103,17 +103,17 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
u8 tid;
if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else {
- tid = 0;
+ tid = 0;
}
if (frag == 0) {
@@ -124,7 +124,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
2 /* alignment */ +
8 /* WEP */ +
ETH_ALEN /* WDS */ +
- (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
+ (IEEE80211_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */);
if (!skb)
return NULL;
@@ -145,7 +145,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
} else {
/* received a fragment of a frame for which the head fragment
* should have already been received */
- entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2,
+ entry = ieee80211_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
hdr->addr1);
if (entry) {
entry->last_frag = frag;
@@ -169,18 +169,18 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
struct rtl_80211_hdr_4addrqos *hdr_4addrqos;
u8 tid;
- if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else {
- tid = 0;
+ tid = 0;
}
entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
@@ -216,10 +216,10 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
- ieee80211_rx_mgt(ieee,(struct rtl_80211_hdr_4addr *)skb->data,rx_stats);
+ ieee80211_rx_mgt(ieee, (struct rtl_80211_hdr_4addr *)skb->data, rx_stats);
/* if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) */
- if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))/* use ADDR1 to perform address matching for Management frames */
- {
+ if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) {
+ /* use ADDR1 to perform address matching for Management frames */
dev_kfree_skb_any(skb);
return 0;
}
@@ -281,11 +281,11 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+static unsigned char rfc1042_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+static unsigned char bridge_tunnel_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
/* Called by ieee80211_rx_frame_decrypt */
@@ -300,7 +300,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
if (skb->len < 24)
return 0;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
@@ -339,12 +339,11 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
if (!crypt || !crypt->ops->decrypt_mpdu)
return 0;
- if (ieee->hwsec_active)
- {
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
+ if (ieee->hwsec_active) {
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
if (ieee->tkip_countermeasures &&
@@ -386,13 +385,12 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
if (!crypt || !crypt->ops->decrypt_msdu)
return 0;
- if (ieee->hwsec_active)
- {
- struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
+ if (ieee->hwsec_active) {
+ struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -410,7 +408,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *s
/* this function is stolen from ipw2200 driver*/
-#define IEEE_PACKET_RETRY_TIME (5*HZ)
+#define IEEE_PACKET_RETRY_TIME (5 * HZ)
static int is_duplicate_packet(struct ieee80211_device *ieee,
struct rtl_80211_hdr_4addr *header)
{
@@ -426,18 +424,18 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
//TO2DS and QoS
- if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
- hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
- tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
- } else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
- hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
- tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
- tid = UP2AC(tid);
- tid++;
+ if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
+ hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header;
+ tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
+ } else if (IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
+ hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header;
+ tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
+ tid = UP2AC(tid);
+ tid++;
} else { // no QoS
- tid = 0;
+ tid = 0;
}
switch (ieee->iw_mode) {
@@ -507,8 +505,7 @@ drop:
static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry)
{
struct list_head *pList = &pTS->rx_pending_pkt_list;
- while(pList->next != &pTS->rx_pending_pkt_list)
- {
+ while (pList->next != &pTS->rx_pending_pkt_list) {
if (SN_LESS(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
pList = pList->next;
else if (SN_EQUAL(pReorderEntry->SeqNum, list_entry(pList->next, struct rx_reorder_entry, List)->SeqNum))
@@ -524,17 +521,16 @@ static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *p
return true;
}
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8 index)
+void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray, u8 index)
{
- u8 i = 0 , j=0;
+ u8 i = 0, j = 0;
u16 ethertype;
// if(index > 1)
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__func__,index);
- for(j = 0; j<index; j++)
- {
+ for (j = 0; j < index; j++) {
//added by amy for reorder
struct ieee80211_rxb *prxb = prxbIndicateArray[j];
- for(i = 0; i<prxb->nr_subframes; i++) {
+ for (i = 0; i < prxb->nr_subframes; i++) {
struct sk_buff *sub_skb = prxb->subframes[i];
/* convert hdr + possible LLC headers into Ethernet header */
@@ -585,7 +581,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
u16 WinEnd = (pTS->rx_indicate_seq + WinSize - 1) % 4096;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->rx_indicate_seq is %d, WinSize is %d\n",__func__,SeqNum,pTS->rx_indicate_seq,WinSize);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Seq is %d,pTS->rx_indicate_seq is %d, WinSize is %d\n", __func__, SeqNum, pTS->rx_indicate_seq, WinSize);
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
sizeof(struct ieee80211_rxb *),
@@ -599,12 +595,12 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
/* Drop out the packet which SeqNum is smaller than WinStart */
if (SN_LESS(SeqNum, pTS->rx_indicate_seq)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
pHTInfo->RxReorderDropCounter++;
{
int i;
- for(i =0; i < prxb->nr_subframes; i++) {
+ for (i = 0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
@@ -620,16 +616,16 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
* 1. Incoming SeqNum is equal to WinStart =>Window shift 1
* 2. Incoming SeqNum is larger than the WinEnd => Window shift N
*/
- if(SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
+ if (SN_EQUAL(SeqNum, pTS->rx_indicate_seq)) {
pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
bMatchWinStart = true;
- } else if(SN_LESS(WinEnd, SeqNum)) {
- if(SeqNum >= (WinSize - 1)) {
- pTS->rx_indicate_seq = SeqNum + 1 -WinSize;
+ } else if (SN_LESS(WinEnd, SeqNum)) {
+ if (SeqNum >= (WinSize - 1)) {
+ pTS->rx_indicate_seq = SeqNum + 1 - WinSize;
} else {
pTS->rx_indicate_seq = 4095 - (WinSize - (SeqNum + 1)) + 1;
}
- IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
}
/*
@@ -641,7 +637,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
* 1. All packets with SeqNum smaller than WinStart => Indicate
* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it.
*/
- if(bMatchWinStart) {
+ if (bMatchWinStart) {
/* Current packet is going to be indicated.*/
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\
pTS->rx_indicate_seq, SeqNum);
@@ -651,7 +647,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
} else {
/* Current packet is going to be inserted into pending list.*/
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to ordered list\n",__func__);
- if(!list_empty(&ieee->RxReorder_Unused_List)) {
+ if (!list_empty(&ieee->RxReorder_Unused_List)) {
pReorderEntry = list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, List);
list_del_init(&pReorderEntry->List);
@@ -660,13 +656,13 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
pReorderEntry->prxb = prxb;
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum);
- if(!AddReorderEntry(pTS, pReorderEntry)) {
+ if (!AddReorderEntry(pTS, pReorderEntry)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
__func__, pTS->rx_indicate_seq, SeqNum);
- list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
+ list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
{
int i;
- for(i =0; i < prxb->nr_subframes; i++) {
+ for (i = 0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
@@ -674,10 +670,9 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,
- "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
+ "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
}
- }
- else {
+ } else {
/*
* Packets are dropped if there is not enough reorder entries.
* This part shall be modified!! We can just indicate all the
@@ -686,7 +681,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n");
{
int i;
- for(i =0; i < prxb->nr_subframes; i++) {
+ for (i = 0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
@@ -696,12 +691,11 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
}
/* Check if there is any packet need indicate.*/
- while(!list_empty(&pTS->rx_pending_pkt_list)) {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__);
+ while (!list_empty(&pTS->rx_pending_pkt_list)) {
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): start RREORDER indicate\n", __func__);
pReorderEntry = list_entry(pTS->rx_pending_pkt_list.prev, struct rx_reorder_entry, List);
if (SN_LESS(pReorderEntry->SeqNum, pTS->rx_indicate_seq) ||
- SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
- {
+ SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq)) {
/* This protect buffer from overflow. */
if (index >= REORDER_WIN_SIZE) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n");
@@ -711,15 +705,15 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
list_del_init(&pReorderEntry->List);
- if(SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
+ if (SN_EQUAL(pReorderEntry->SeqNum, pTS->rx_indicate_seq))
pTS->rx_indicate_seq = (pTS->rx_indicate_seq + 1) % 4096;
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->rx_indicate_seq, SeqNum);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n", pTS->rx_indicate_seq, SeqNum);
prxbIndicateArray[index] = pReorderEntry->prxb;
// printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum);
index++;
- list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
+ list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
@@ -727,13 +721,13 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
}
/* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/
- if (index>0) {
+ if (index > 0) {
// Cancel previous pending timer.
// del_timer_sync(&pTS->rx_pkt_pending_timer);
pTS->rx_timeout_indicate_seq = 0xffff;
// Indicate packets
- if(index>REORDER_WIN_SIZE){
+ if (index > REORDER_WIN_SIZE) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
kfree(prxbIndicateArray);
return;
@@ -743,9 +737,9 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
if (bPktInBuf && pTS->rx_timeout_indicate_seq == 0xffff) {
// Set new pending timer.
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __func__);
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): SET rx timeout timer\n", __func__);
pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
- if(timer_pending(&pTS->rx_pkt_pending_timer))
+ if (timer_pending(&pTS->rx_pkt_pending_timer))
del_timer_sync(&pTS->rx_pkt_pending_timer);
pTS->rx_pkt_pending_timer.expires = jiffies +
msecs_to_jiffies(pHTInfo->RxReorderPendingTime);
@@ -762,12 +756,12 @@ static u8 parse_subframe(struct sk_buff *skb,
struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
- u16 LLCOffset= sizeof(struct rtl_80211_hdr_3addr);
+ u16 LLCOffset = sizeof(struct rtl_80211_hdr_3addr);
u16 ChkLength;
bool bIsAggregateFrame = false;
u16 nSubframe_Length;
u8 nPadding_Length = 0;
- u16 SeqNum=0;
+ u16 SeqNum = 0;
struct sk_buff *sub_skb;
/* just for debug purpose */
@@ -793,7 +787,7 @@ static u8 parse_subframe(struct sk_buff *skb,
skb_pull(skb, LLCOffset);
- if(!bIsAggregateFrame) {
+ if (!bIsAggregateFrame) {
rxb->nr_subframes = 1;
#ifdef JOHN_NOCPY
rxb->subframes[0] = skb;
@@ -801,26 +795,26 @@ static u8 parse_subframe(struct sk_buff *skb,
rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC);
#endif
- memcpy(rxb->src,src,ETH_ALEN);
- memcpy(rxb->dst,dst,ETH_ALEN);
+ memcpy(rxb->src, src, ETH_ALEN);
+ memcpy(rxb->dst, dst, ETH_ALEN);
//IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len);
return 1;
} else {
rxb->nr_subframes = 0;
- memcpy(rxb->src,src,ETH_ALEN);
- memcpy(rxb->dst,dst,ETH_ALEN);
- while(skb->len > ETHERNET_HEADER_SIZE) {
+ memcpy(rxb->src, src, ETH_ALEN);
+ memcpy(rxb->dst, dst, ETH_ALEN);
+ while (skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
nSubframe_Length = *((u16 *)(skb->data + 12));
//==m==>change the length order
- nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8);
+ nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8);
- if (skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
__func__, rxb->nr_subframes);
- printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__func__, nSubframe_Length);
- printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length);
- printk("The Packet SeqNum is %d\n",SeqNum);
+ printk("%s: A-MSDU parse error!! Subframe Length: %d\n", __func__, nSubframe_Length);
+ printk("nRemain_Length is %d and nSubframe_Length is : %d\n", skb->len, nSubframe_Length);
+ printk("The Packet SeqNum is %d\n", SeqNum);
return 0;
}
@@ -923,9 +917,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
- if (HTCCheck(ieee, skb->data))
- {
- if(net_ratelimit())
+ if (HTCCheck(ieee, skb->data)) {
+ if (net_ratelimit())
printk("find HTCControl\n");
hdrlen += 4;
rx_stats->bContainHTC = true;
@@ -972,7 +965,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
* stations that do not support WEP key mapping). */
if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
- (void) hostap_handle_sta_crypto(local, hdr, &crypt,
+ (void)hostap_handle_sta_crypto(local, hdr, &crypt,
&sta);
#endif
@@ -998,39 +991,32 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
goto rx_dropped;
// if QoS enabled, should check the sequence for each of the AC
- if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
+ if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
if (is_duplicate_packet(ieee, hdr))
- goto rx_dropped;
+ goto rx_dropped;
- }
- else
- {
+ } else {
struct rx_ts_record *pRxTS = NULL;
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__func__, tid);
- if(GetTs(
+ if (GetTs(
ieee,
- (struct ts_common_info **) &pRxTS,
+ (struct ts_common_info **)&pRxTS,
hdr->addr2,
Frame_QoSTID((u8 *)(skb->data)),
RX_DIR,
- true))
- {
+ true)) {
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->rx_last_frag_num is %d,frag is %d,pRxTS->rx_last_seq_num is %d,seq is %d\n",__func__,pRxTS->rx_last_frag_num,frag,pRxTS->rx_last_seq_num,WLAN_GET_SEQ_SEQ(sc));
- if ((fc & (1<<11)) &&
+ if ((fc & (1 << 11)) &&
(frag == pRxTS->rx_last_frag_num) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->rx_last_seq_num)) {
goto rx_dropped;
- }
- else
- {
+ } else {
pRxTS->rx_last_frag_num = frag;
pRxTS->rx_last_seq_num = WLAN_GET_SEQ_SEQ(sc);
}
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__func__);
+ } else {
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n", __func__);
goto rx_dropped;
}
}
@@ -1126,14 +1112,13 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
- {
+ (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
printk("decrypt frame error\n");
goto rx_dropped;
}
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
/* skb: hdr + (possibly fragmented) plaintext payload */
// PR: FIXME: hostap has additional conditions in the "if" below:
@@ -1185,15 +1170,14 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache */
skb = frag_skb;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
ieee80211_frag_cache_invalidate(ieee, hdr);
}
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
- {
+ ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
printk("==>decrypt msdu error\n");
goto rx_dropped;
}
@@ -1202,7 +1186,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee->LinkDetectInfo.NumRecvDataInPeriod++;
ieee->LinkDetectInfo.NumRxOkInPeriod++;
- hdr = (struct rtl_80211_hdr_4addr *) skb->data;
+ hdr = (struct rtl_80211_hdr_4addr *)skb->data;
if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
@@ -1227,10 +1211,10 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
#ifdef CONFIG_IEEE80211_DEBUG
if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
- struct eapol *eap = (struct eapol *)(skb->data +
- 24);
- IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
- eap_get_type(eap->type));
+ struct eapol *eap = (struct eapol *)(skb->data +
+ 24);
+ IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+ eap_get_type(eap->type));
}
#endif
@@ -1250,13 +1234,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
*/
//added by amy for reorder
if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
- && !is_multicast_ether_addr(hdr->addr1))
- {
+ && !is_multicast_ether_addr(hdr->addr1)) {
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee,(struct ts_common_info **) &pTS,hdr->addr2,TID,RX_DIR,true);
- if (TID !=0 && TID !=3)
- {
+ GetTs(ieee, (struct ts_common_info **)&pTS, hdr->addr2, TID, RX_DIR, true);
+ if (TID != 0 && TID != 3) {
ieee->bis_any_nonbepkts = true;
}
}
@@ -1270,7 +1252,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* qos data packets & reserved bit is 1 */
if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
/* only to free rxb, and not submit the packets to upper layer */
- for(i =0; i < rxb->nr_subframes; i++) {
+ for (i = 0; i < rxb->nr_subframes; i++) {
dev_kfree_skb(rxb->subframes[i]);
}
kfree(rxb);
@@ -1281,7 +1263,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
//added by amy for reorder
if (!ieee->pHTInfo->bCurRxReorderEnable || !pTS) {
//added by amy for reorder
- for(i = 0; i<rxb->nr_subframes; i++) {
+ for (i = 0; i < rxb->nr_subframes; i++) {
struct sk_buff *sub_skb = rxb->subframes[i];
if (sub_skb) {
@@ -1324,10 +1306,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
kfree(rxb);
rxb = NULL;
- }
- else
- {
- IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__func__);
+ } else {
+ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n", __func__);
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
}
#ifndef JOHN_NOCPY
@@ -1407,10 +1387,9 @@ static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
/*
* Parse a QoS information element
*/
-static int ieee80211_read_qos_info_element(struct
- ieee80211_qos_information_element
- *element_info, struct ieee80211_info_element
- *info_element)
+static int ieee80211_read_qos_info_element(
+ struct ieee80211_qos_information_element *element_info,
+ struct ieee80211_info_element *info_element)
{
int ret = 0;
u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
@@ -1438,11 +1417,9 @@ static int ieee80211_read_qos_info_element(struct
/*
* Write QoS parameters from the ac parameters.
*/
-static int ieee80211_qos_convert_ac_to_parameters(struct
- ieee80211_qos_parameter_info
- *param_elm, struct
- ieee80211_qos_parameters
- *qos_param)
+static int ieee80211_qos_convert_ac_to_parameters(
+ struct ieee80211_qos_parameter_info *param_elm,
+ struct ieee80211_qos_parameters *qos_param)
{
int i;
struct ieee80211_qos_ac_parameter *ac_params;
@@ -1455,12 +1432,12 @@ static int ieee80211_qos_convert_ac_to_parameters(struct
aci = (ac_params->aci_aifsn & 0x60) >> 5;
- if(aci >= QOS_QUEUE_NUM)
+ if (aci >= QOS_QUEUE_NUM)
continue;
qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
- qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
+ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci];
qos_param->cw_min[aci] =
cpu_to_le16(ac_params->ecw_min_max & 0x0F);
@@ -1561,15 +1538,12 @@ static inline void ieee80211_extract_country_ie(
u8 *addr2
)
{
- if (IS_DOT11D_ENABLE(ieee))
- {
- if (info_element->len!= 0)
- {
+ if (IS_DOT11D_ENABLE(ieee)) {
+ if (info_element->len != 0) {
memcpy(network->CountryIeBuf, info_element->data, info_element->len);
network->CountryIeLen = info_element->len;
- if (!IS_COUNTRY_IE_VALID(ieee))
- {
+ if (!IS_COUNTRY_IE_VALID(ieee)) {
dot11d_update_country_ie(ieee, addr2, info_element->len, info_element->data);
}
}
@@ -1579,8 +1553,7 @@ static inline void ieee80211_extract_country_ie(
// some AP (e.g. Cisco 1242) don't include country IE in their
// probe response frame.
//
- if (IS_EQUAL_CIE_SRC(ieee, addr2) )
- {
+ if (IS_EQUAL_CIE_SRC(ieee, addr2)) {
UPDATE_CIE_WATCHDOG(ieee);
}
}
@@ -1595,9 +1568,9 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
{
u8 i;
short offset;
- u16 tmp_htcap_len=0;
- u16 tmp_htinfo_len=0;
- u16 ht_realtek_agg_len=0;
+ u16 tmp_htcap_len = 0;
+ u16 tmp_htinfo_len = 0;
+ u16 ht_realtek_agg_len = 0;
u8 ht_realtek_agg_buf[MAX_IE_LEN];
// u16 broadcom_len = 0;
#ifdef CONFIG_IEEE80211_DEBUG
@@ -1628,7 +1601,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
}
network->ssid_len = min(info_element->len,
- (u8) IW_ESSID_MAX_SIZE);
+ (u8)IW_ESSID_MAX_SIZE);
memcpy(network->ssid, info_element->data, network->ssid_len);
if (network->ssid_len < IW_ESSID_MAX_SIZE)
memset(network->ssid + network->ssid_len, 0,
@@ -1707,14 +1680,14 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
break;
case MFIE_TYPE_TIM:
- if(info_element->len < 4)
+ if (info_element->len < 4)
break;
network->tim.tim_count = info_element->data[0];
network->tim.tim_period = info_element->data[1];
network->dtim_period = info_element->data[1];
- if(ieee->state != IEEE80211_LINKED)
+ if (ieee->state != IEEE80211_LINKED)
break;
network->last_dtim_sta_time[0] = stats->mac_time[0];
@@ -1722,22 +1695,22 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
network->dtim_data = IEEE80211_DTIM_VALID;
- if(info_element->data[0] != 0)
+ if (info_element->data[0] != 0)
break;
- if(info_element->data[2] & 1)
+ if (info_element->data[2] & 1)
network->dtim_data |= IEEE80211_DTIM_MBCAST;
- offset = (info_element->data[2] >> 1)*2;
+ offset = (info_element->data[2] >> 1) * 2;
- if(ieee->assoc_id < 8*offset ||
- ieee->assoc_id > 8*(offset + info_element->len -3))
+ if (ieee->assoc_id < 8 * offset ||
+ ieee->assoc_id > 8 * (offset + info_element->len - 3))
break;
offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
- if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
+ if (info_element->data[3 + offset] & (1 << (ieee->assoc_id % 8)))
network->dtim_data |= IEEE80211_DTIM_UCAST;
//IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
@@ -1790,66 +1763,66 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
#endif
//for HTcap and HTinfo parameters
- if(tmp_htcap_len == 0){
- if(info_element->len >= 4 &&
+ if (tmp_htcap_len == 0) {
+ if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x033){
- tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htcap_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
- sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
- }
+ tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htcap_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
+ sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
+ memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
+ }
}
- if(tmp_htcap_len != 0)
+ if (tmp_htcap_len != 0)
network->bssht.bdSupportHT = true;
else
network->bssht.bdSupportHT = false;
}
- if(tmp_htinfo_len == 0){
- if(info_element->len >= 4 &&
+ if (tmp_htinfo_len == 0) {
+ if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x034){
- tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htinfo_len != 0){
- network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- if(tmp_htinfo_len){
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
- sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
- }
-
+ tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htinfo_len != 0) {
+ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
+ if (tmp_htinfo_len) {
+ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
+ sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
+ memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
}
+ }
+
}
}
- if(ieee->aggregation){
- if(network->bssht.bdSupportHT){
- if(info_element->len >= 4 &&
+ if (ieee->aggregation) {
+ if (network->bssht.bdSupportHT) {
+ if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0xe0 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x02){
- ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN);
- memcpy(ht_realtek_agg_buf,info_element->data,info_element->len);
+ ht_realtek_agg_len = min(info_element->len, (u8)MAX_IE_LEN);
+ memcpy(ht_realtek_agg_buf, info_element->data, info_element->len);
}
- if(ht_realtek_agg_len >= 5){
+ if (ht_realtek_agg_len >= 5) {
network->bssht.bdRT2RTAggregation = true;
- if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
- network->bssht.bdRT2RTLongSlotTime = true;
+ if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
+ network->bssht.bdRT2RTLongSlotTime = true;
}
}
@@ -1870,78 +1843,63 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[1] == 0x10 &&
info_element->data[2] == 0x18)){
- network->broadcom_cap_exist = true;
+ network->broadcom_cap_exist = true;
}
}
- if(info_element->len >= 3 &&
+ if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0c &&
- info_element->data[2] == 0x43)
- {
+ info_element->data[2] == 0x43) {
network->ralink_cap_exist = true;
- }
- else
+ } else
network->ralink_cap_exist = false;
//added by amy for atheros AP
- if((info_element->len >= 3 &&
+ if ((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x03 &&
info_element->data[2] == 0x7f) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
- info_element->data[2] == 0x74))
- {
- printk("========>%s(): athros AP is exist\n",__func__);
+ info_element->data[2] == 0x74)) {
+ printk("========>%s(): athros AP is exist\n", __func__);
network->atheros_cap_exist = true;
- }
- else
+ } else
network->atheros_cap_exist = false;
- if(info_element->len >= 3 &&
+ if (info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
- info_element->data[2] == 0x96)
- {
+ info_element->data[2] == 0x96) {
network->cisco_cap_exist = true;
- }
- else
+ } else
network->cisco_cap_exist = false;
//added by amy for LEAP of cisco
if (info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x01)
- {
- if(info_element->len == 6)
- {
+ info_element->data[3] == 0x01) {
+ if (info_element->len == 6) {
memcpy(network->CcxRmState, &info_element[4], 2);
- if(network->CcxRmState[0] != 0)
- {
+ if (network->CcxRmState[0] != 0)
network->bCcxRmEnable = true;
- }
else
network->bCcxRmEnable = false;
//
// CCXv4 Table 59-1 MBSSID Masks.
//
network->MBssidMask = network->CcxRmState[1] & 0x07;
- if(network->MBssidMask != 0)
- {
+ if (network->MBssidMask != 0) {
network->bMBssidValid = true;
network->MBssidMask = 0xff << (network->MBssidMask);
ether_addr_copy(network->MBssid, network->bssid);
network->MBssid[5] &= network->MBssidMask;
- }
- else
- {
+ } else {
network->bMBssidValid = false;
}
- }
- else
- {
+ } else {
network->bCcxRmEnable = false;
}
}
@@ -1949,15 +1907,11 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
- info_element->data[3] == 0x03)
- {
- if(info_element->len == 5)
- {
+ info_element->data[3] == 0x03) {
+ if (info_element->len == 5) {
network->bWithCcxVerNum = true;
network->BssCcxVerNumber = info_element->data[4];
- }
- else
- {
+ } else {
network->bWithCcxVerNum = false;
network->BssCcxVerNumber = 0;
}
@@ -1977,19 +1931,18 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
case MFIE_TYPE_HT_CAP:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
info_element->len);
- tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htcap_len != 0){
+ tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htcap_len != 0) {
network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
- network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
- sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
- memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
+ network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? \
+ sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len;
+ memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen);
//If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
// windows driver will update WMM parameters each beacon received once connected
// Linux driver is a bit different.
network->bssht.bdSupportHT = true;
- }
- else
+ } else
network->bssht.bdSupportHT = false;
break;
@@ -1997,37 +1950,31 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
case MFIE_TYPE_HT_INFO:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n",
info_element->len);
- tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
- if(tmp_htinfo_len){
+ tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN);
+ if (tmp_htinfo_len) {
network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE;
- network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
- sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
- memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
+ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? \
+ sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len;
+ memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen);
}
break;
case MFIE_TYPE_AIRONET:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
info_element->len);
- if(info_element->len >IE_CISCO_FLAG_POSITION)
- {
+ if (info_element->len > IE_CISCO_FLAG_POSITION) {
network->bWithAironetIE = true;
// CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23):
// "A Cisco access point advertises support for CKIP in beacon and probe response packets,
// by adding an Aironet element and setting one or both of the CKIP negotiation bits."
- if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) ||
- (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) )
- {
+ if ((info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_MIC) ||
+ (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK)) {
network->bCkipSupported = true;
- }
- else
- {
+ } else {
network->bCkipSupported = false;
}
- }
- else
- {
+ } else {
network->bWithAironetIE = false;
network->bCkipSupported = false;
}
@@ -2057,13 +2004,10 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
data[info_element->len];
}
- if(!network->atheros_cap_exist && !network->broadcom_cap_exist &&
- !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation)
- {
+ if (!network->atheros_cap_exist && !network->broadcom_cap_exist &&
+ !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) {
network->unknown_cap_exist = true;
- }
- else
- {
+ } else {
network->unknown_cap_exist = false;
}
return 0;
@@ -2076,44 +2020,25 @@ static inline u8 ieee80211_SignalStrengthTranslate(
u8 RetSS;
// Step 1. Scale mapping.
- if(CurrSS >= 71 && CurrSS <= 100)
- {
+ if (CurrSS >= 71 && CurrSS <= 100) {
RetSS = 90 + ((CurrSS - 70) / 3);
- }
- else if(CurrSS >= 41 && CurrSS <= 70)
- {
+ } else if (CurrSS >= 41 && CurrSS <= 70) {
RetSS = 78 + ((CurrSS - 40) / 3);
- }
- else if(CurrSS >= 31 && CurrSS <= 40)
- {
+ } else if (CurrSS >= 31 && CurrSS <= 40) {
RetSS = 66 + (CurrSS - 30);
- }
- else if(CurrSS >= 21 && CurrSS <= 30)
- {
+ } else if (CurrSS >= 21 && CurrSS <= 30) {
RetSS = 54 + (CurrSS - 20);
- }
- else if(CurrSS >= 5 && CurrSS <= 20)
- {
+ } else if (CurrSS >= 5 && CurrSS <= 20) {
RetSS = 42 + (((CurrSS - 5) * 2) / 3);
- }
- else if(CurrSS == 4)
- {
+ } else if (CurrSS == 4) {
RetSS = 36;
- }
- else if(CurrSS == 3)
- {
+ } else if (CurrSS == 3) {
RetSS = 27;
- }
- else if(CurrSS == 2)
- {
+ } else if (CurrSS == 2) {
RetSS = 18;
- }
- else if(CurrSS == 1)
- {
+ } else if (CurrSS == 1) {
RetSS = 9;
- }
- else
- {
+ } else {
RetSS = CurrSS;
}
//RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS));
@@ -2193,7 +2118,7 @@ static inline int ieee80211_network_init(
network->rsn_ie_len = 0;
if (ieee80211_parse_info_param
- (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats))
+ (ieee, beacon->info_element, stats->len - sizeof(*beacon), network, stats))
return 1;
network->mode = 0;
@@ -2215,10 +2140,10 @@ static inline int ieee80211_network_init(
return 1;
}
- if(network->bssht.bdSupportHT){
- if(network->mode == IEEE_A)
+ if (network->bssht.bdSupportHT) {
+ if (network->mode == IEEE_A)
network->mode = IEEE_N_5G;
- else if(network->mode & (IEEE_G | IEEE_B))
+ else if (network->mode & (IEEE_G | IEEE_B))
network->mode = IEEE_N_24G;
}
if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
@@ -2226,7 +2151,7 @@ static inline int ieee80211_network_init(
stats->signal = 30 + (stats->SignalStrength * 70) / 100;
//stats->signal = ieee80211_SignalStrengthTranslate(stats->signal);
- stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25;
+ stats->noise = ieee80211_translate_todbm((u8)(100 - stats->signal)) - 25;
memcpy(&network->stats, stats, sizeof(network->stats));
@@ -2264,8 +2189,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->rates_len = src->rates_len;
memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
dst->rates_ex_len = src->rates_ex_len;
- if (src->ssid_len > 0)
- {
+ if (src->ssid_len > 0) {
memset(dst->ssid, 0, dst->ssid_len);
dst->ssid_len = src->ssid_len;
memcpy(dst->ssid, src->ssid, src->ssid_len);
@@ -2274,8 +2198,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->flags = src->flags;
dst->time_stamp[0] = src->time_stamp[0];
dst->time_stamp[1] = src->time_stamp[1];
- if (src->flags & NETWORK_HAS_ERP_VALUE)
- {
+ if (src->flags & NETWORK_HAS_ERP_VALUE) {
dst->erp_value = src->erp_value;
dst->berp_info_valid = src->berp_info_valid = true;
}
@@ -2290,10 +2213,10 @@ static inline void update_network(struct ieee80211_network *dst,
dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
- dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen;
- memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen);
- dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen;
- memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen);
+ dst->bssht.bdHTCapLen = src->bssht.bdHTCapLen;
+ memcpy(dst->bssht.bdHTCapBuf, src->bssht.bdHTCapBuf, src->bssht.bdHTCapLen);
+ dst->bssht.bdHTInfoLen = src->bssht.bdHTInfoLen;
+ memcpy(dst->bssht.bdHTInfoBuf, src->bssht.bdHTInfoBuf, src->bssht.bdHTInfoLen);
dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer;
dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime;
dst->broadcom_cap_exist = src->broadcom_cap_exist;
@@ -2312,7 +2235,7 @@ static inline void update_network(struct ieee80211_network *dst,
qos_active = dst->qos_data.active;
//old_param = dst->qos_data.old_param_count;
old_param = dst->qos_data.param_count;
- if(dst->flags & NETWORK_HAS_QOS_MASK)
+ if (dst->flags & NETWORK_HAS_QOS_MASK)
memcpy(&dst->qos_data, &src->qos_data,
sizeof(struct ieee80211_qos_data));
else {
@@ -2322,7 +2245,7 @@ static inline void update_network(struct ieee80211_network *dst,
if (dst->qos_data.supported == 1) {
dst->QoS_Enable = 1;
- if(dst->ssid_len)
+ if (dst->ssid_len)
IEEE80211_DEBUG_QOS
("QoS the network %s is QoS supported\n",
dst->ssid);
@@ -2335,11 +2258,11 @@ static inline void update_network(struct ieee80211_network *dst,
/* dst->last_associate is not overwritten */
dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame.
- if (src->wmm_param[0].aci_aifsn|| \
- src->wmm_param[1].aci_aifsn|| \
- src->wmm_param[2].aci_aifsn|| \
+ if (src->wmm_param[0].aci_aifsn || \
+ src->wmm_param[1].aci_aifsn || \
+ src->wmm_param[2].aci_aifsn || \
src->wmm_param[3].aci_aifsn) {
- memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
+ memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
}
//dst->QoS_Enable = src->QoS_Enable;
#ifdef THOMAS_TURBO
@@ -2429,46 +2352,34 @@ static inline void ieee80211_process_probe_response(
if (!is_legal_channel(ieee, network->channel))
goto out;
- if (ieee->bGlobalDomain)
- {
- if (fc == IEEE80211_STYPE_PROBE_RESP)
- {
- // Case 1: Country code
- if(IS_COUNTRY_IE_VALID(ieee) )
- {
+ if (ieee->bGlobalDomain) {
+ if (fc == IEEE80211_STYPE_PROBE_RESP) {
+ if (IS_COUNTRY_IE_VALID(ieee)) {
+ // Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel);
goto out;
}
- }
- // Case 2: No any country code.
- else
- {
+ } else {
+ // Case 2: No any country code.
// Filter over channel ch12~14
- if (network->channel > 11)
- {
+ if (network->channel > 11) {
printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel);
goto out;
}
}
- }
- else
- {
- // Case 1: Country code
- if(IS_COUNTRY_IE_VALID(ieee) )
- {
+ } else {
+ if (IS_COUNTRY_IE_VALID(ieee)) {
+ // Case 1: Country code
if (!is_legal_channel(ieee, network->channel)) {
- printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network->channel);
+ printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network->channel);
goto out;
}
- }
- // Case 2: No any country code.
- else
- {
+ } else {
+ // Case 2: No any country code.
// Filter over channel ch12~14
- if (network->channel > 14)
- {
- printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network->channel);
+ if (network->channel > 14) {
+ printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network->channel);
goto out;
}
}
@@ -2490,19 +2401,17 @@ static inline void ieee80211_process_probe_response(
if (is_same_network(&ieee->current_network, network, ieee)) {
update_network(&ieee->current_network, network);
if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
- && ieee->current_network.berp_info_valid){
- if(ieee->current_network.erp_value& ERP_UseProtection)
- ieee->current_network.buseprotection = true;
- else
- ieee->current_network.buseprotection = false;
+ && ieee->current_network.berp_info_valid){
+ if (ieee->current_network.erp_value & ERP_UseProtection)
+ ieee->current_network.buseprotection = true;
+ else
+ ieee->current_network.buseprotection = false;
}
- if(is_beacon(beacon->header.frame_ctl))
- {
- if(ieee->state == IEEE80211_LINKED)
+ if (is_beacon(beacon->header.frame_ctl)) {
+ if (ieee->state == IEEE80211_LINKED)
ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
- }
- else //hidden AP
- network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
+ } else //hidden AP
+ network->flags = (~NETWORK_EMPTY_ESSID & network->flags) | (NETWORK_EMPTY_ESSID & ieee->current_network.flags);
}
list_for_each_entry(target, &ieee->network_list, list) {
@@ -2543,8 +2452,8 @@ static inline void ieee80211_process_probe_response(
#endif
memcpy(target, network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
- if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
- ieee80211_softmac_new_net(ieee,network);
+ if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
+ ieee80211_softmac_new_net(ieee, network);
} else {
IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
escape_essid(target->ssid,
@@ -2559,26 +2468,26 @@ static inline void ieee80211_process_probe_response(
*/
renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
//YJ,add,080819,for hidden ap
- if(is_beacon(beacon->header.frame_ctl) == 0)
- network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & target->flags);
+ if (is_beacon(beacon->header.frame_ctl) == 0)
+ network->flags = (~NETWORK_EMPTY_ESSID & network->flags) | (NETWORK_EMPTY_ESSID & target->flags);
//if(strncmp(network->ssid, "linksys-c",9) == 0)
// printk("====>2 network->ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network->ssid, network->flags, target->ssid, target->flags);
- if(((network->flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
+ if (((network->flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
&& (((network->ssid_len > 0) && (strncmp(target->ssid, network->ssid, network->ssid_len)))\
- ||((ieee->current_network.ssid_len == network->ssid_len) && (strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0) && (ieee->state == IEEE80211_NOLINK))))
+ || ((ieee->current_network.ssid_len == network->ssid_len) && (strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0) && (ieee->state == IEEE80211_NOLINK))))
renew = 1;
//YJ,add,080819,for hidden ap,end
update_network(target, network);
- if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
- ieee80211_softmac_new_net(ieee,network);
+ if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
+ ieee80211_softmac_new_net(ieee, network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
if (is_beacon(beacon->header.frame_ctl) && is_same_network(&ieee->current_network, network, ieee) && \
(ieee->state == IEEE80211_LINKED)) {
if (ieee->handle_beacon)
- ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network);
+ ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network);
}
out:
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index e0da0900a4f7..33a6af7aad22 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -743,7 +743,6 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
- crypt = ieee->crypt[ieee->tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 4a8d16a45fc5..b1baaa18b129 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -42,8 +42,8 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
/* if setting by freq convert to channel */
if (fwrq->e == 1) {
- if ((fwrq->m >= (int) 2.412e8 &&
- fwrq->m <= (int) 2.487e8)) {
+ if ((fwrq->m >= (int)2.412e8 &&
+ fwrq->m <= (int)2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
@@ -92,7 +92,7 @@ int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
if (ieee->current_network.channel == 0)
return -1;
/* NM 0.7.0 will not accept channel any more. */
- fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000;
+ fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel - 1] * 100000;
fwrq->e = 1;
/* fwrq->m = ieee->current_network.channel; */
/* fwrq->e = 0; */
@@ -220,7 +220,7 @@ int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
u32 target_rate = wrqu->bitrate.value;
- ieee->rate = target_rate/100000;
+ ieee->rate = target_rate / 100000;
/* FIXME: we might want to limit rate also in management protocols. */
return 0;
}
@@ -415,9 +415,9 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
if (wrqu->essid.flags && wrqu->essid.length) {
/* first flush current network.ssid */
- len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE;
- strncpy(ieee->current_network.ssid, extra, len+1);
- ieee->current_network.ssid_len = len+1;
+ len = ((wrqu->essid.length - 1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length - 1) : IW_ESSID_MAX_SIZE;
+ strncpy(ieee->current_network.ssid, extra, len + 1);
+ ieee->current_network.ssid_len = len + 1;
ieee->ssid_set = 1;
} else {
ieee->ssid_set = 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index fc6eb97801e1..f0b6b8372f91 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -214,7 +214,8 @@ int ieee80211_encrypt_fragment(
}
-void ieee80211_txb_free(struct ieee80211_txb *txb) {
+void ieee80211_txb_free(struct ieee80211_txb *txb)
+{
//int i;
if (unlikely(!txb))
return;
@@ -293,7 +294,7 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
struct tx_ts_record *pTxTs = NULL;
struct rtl_80211_hdr_1addr *hdr = (struct rtl_80211_hdr_1addr *)skb->data;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (!IsQoSDataFrame(skb->data))
return;
@@ -301,13 +302,6 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
if (is_multicast_ether_addr(hdr->addr1))
return;
//check packet and mode later
-#ifdef TO_DO_LIST
- if (pTcb->PacketLength >= 4096)
- return;
- // For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation.
- if (!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
- return;
-#endif
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
return;
}
@@ -333,8 +327,7 @@ static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
}
}
FORCED_AGG_SETTING:
- switch (pHTInfo->ForcedAMPDUMode )
- {
+ switch (pHTInfo->ForcedAMPDUMode) {
case HT_AGG_AUTO:
break;
@@ -372,7 +365,7 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
tcb_desc->bUseShortGI = false;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (pHTInfo->bForcedShortGI) {
@@ -380,9 +373,9 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
return;
}
- if ((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
+ if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if ((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
+ else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
@@ -393,16 +386,16 @@ static void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee,
tcb_desc->bPacketBW = false;
- if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
+ if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
return;
- if ((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
+ if ((tcb_desc->data_rate & 0x80) == 0) // If using legacy rate, it shall use 20MHz channel.
return;
//BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
- if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
return;
}
@@ -418,25 +411,21 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate.
tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz
- if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts
+ if (tcb_desc->bBroadcast || tcb_desc->bMulticast) //only unicast frame will use rts/cts
return;
- if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA.
+ if (is_broadcast_ether_addr(skb->data + 16)) //check addr3 as infrastructure add3 is DA.
return;
- if (ieee->mode < IEEE_N_24G) //b, g mode
- {
+ if (ieee->mode < IEEE_N_24G) /* b, g mode */ {
// (1) RTS_Threshold is compared to the MPDU, not MSDU.
// (2) If there are more than one frag in this MSDU, only the first frag uses protection frame.
// Other fragments are protected by previous fragment.
// So we only need to check the length of first fragment.
- if (skb->len > ieee->rts)
- {
+ if (skb->len > ieee->rts) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
- }
- else if (ieee->current_network.buseprotection)
- {
+ } else if (ieee->current_network.buseprotection) {
// Use CTS-to-SELF in protection mode.
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
@@ -444,43 +433,35 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
}
//otherwise return;
return;
- }
- else
- {// 11n High throughput case.
+ } else { // 11n High throughput case.
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- while (true)
- {
+ while (true) {
//check ERP protection
- if (ieee->current_network.buseprotection)
- {// CTS-to-SELF
+ if (ieee->current_network.buseprotection) {// CTS-to-SELF
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
break;
}
//check HT op mode
- if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
- {
+ if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
u8 HTOpMode = pHTInfo->CurrentOpMode;
- if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
- (!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
- {
+ if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
+ (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
}
//check rts
- if (skb->len > ieee->rts)
- {
+ if (skb->len > ieee->rts) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
//to do list: check MIMO power save condition.
//check AMPDU aggregation for TXOP
- if(tcb_desc->bAMPDUEnable)
- {
+ if (tcb_desc->bAMPDUEnable) {
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
// According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads
// throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily
@@ -488,8 +469,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
break;
}
//check IOT action
- if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
- {
+ if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
@@ -508,7 +488,7 @@ static void ieee80211_query_protectionmode(struct ieee80211_device *ieee,
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
if (ieee->mode == IW_MODE_MASTER)
- goto NO_PROTECTION;
+ goto NO_PROTECTION;
return;
NO_PROTECTION:
tcb_desc->bRTSEnable = false;
@@ -522,27 +502,12 @@ NO_PROTECTION:
static void ieee80211_txrate_selectmode(struct ieee80211_device *ieee,
struct cb_desc *tcb_desc)
{
-#ifdef TO_DO_LIST
- if (!IsDataFrame(pFrame)) {
- pTcb->bTxDisableRateFallBack = true;
- pTcb->bTxUseDriverAssingedRate = true;
- pTcb->RATRIndex = 7;
- return;
- }
-
- if (pMgntInfo->ForcedDataRate!= 0) {
- pTcb->bTxDisableRateFallBack = true;
- pTcb->bTxUseDriverAssingedRate = true;
- return;
- }
-#endif
if (ieee->bTxDisableRateFallBack)
tcb_desc->bTxDisableRateFallBack = true;
if (ieee->bTxUseDriverAssingedRate)
tcb_desc->bTxUseDriverAssingedRate = true;
- if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
- {
+ if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) {
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
tcb_desc->RATRIndex = 0;
}
@@ -553,11 +518,9 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
{
if (is_multicast_ether_addr(dst))
return;
- if (IsQoSDataFrame(skb->data)) //we deal qos data only
- {
+ if (IsQoSDataFrame(skb->data)) /* we deal qos data only */ {
struct tx_ts_record *pTS = NULL;
- if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true))
- {
+ if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) {
return;
}
pTS->tx_cur_seq = (pTS->tx_cur_seq + 1) % 4096;
@@ -592,7 +555,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* If there is no driver handler to take the TXB, dont' bother
* creating it...
*/
- if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
+ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
printk(KERN_WARNING "%s: No xmit handler.\n",
ieee->dev->name);
@@ -631,7 +594,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Save source and destination addresses */
memcpy(&dest, skb->data, ETH_ALEN);
- memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
+ memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
@@ -646,7 +609,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
fc = IEEE80211_FTYPE_DATA;
//if(ieee->current_network.QoS_Enable)
- if(qos_actived)
+ if (qos_actived)
fc |= IEEE80211_STYPE_QOS_DATA;
else
fc |= IEEE80211_STYPE_DATA;
@@ -740,7 +703,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
- if(qos_actived){
+ if (qos_actived) {
skb_frag->priority = skb->priority;//UP2AC(skb->priority);
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
@@ -749,15 +712,13 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_reserve(skb_frag, ieee->tx_headroom);
- if (encrypt){
+ if (encrypt) {
if (ieee->hwsec_active)
tcb_desc->bHwSec = 1;
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
- }
- else
- {
+ } else {
tcb_desc->bHwSec = 0;
}
frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
@@ -775,12 +736,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
bytes = bytes_last_frag;
}
//if(ieee->current_network.QoS_Enable)
- if(qos_actived)
- {
+ if (qos_actived) {
// add 1 only indicate to corresponding seq number control 2006/7/12
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
+ frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority) + 1] << 4 | i);
} else {
- frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
+ frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4 | i);
}
/* Put a SNAP header on the first fragment */
@@ -806,17 +766,16 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
skb_put(skb_frag, 4);
}
- if(qos_actived)
- {
- if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
- ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
- else
- ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
+ if (qos_actived) {
+ if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
+ else
+ ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
+ if (ieee->seq_ctrl[0] == 0xFFF)
+ ieee->seq_ctrl[0] = 0;
+ else
+ ieee->seq_ctrl[0]++;
}
} else {
if (unlikely(skb->len < sizeof(struct rtl_80211_hdr_3addr))) {
@@ -826,7 +785,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
- if(!txb){
+ if (!txb) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
@@ -839,8 +798,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
- if (txb)
- {
+ if (txb) {
struct cb_desc *tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
if (is_multicast_ether_addr(header.addr1))
@@ -862,9 +820,9 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
- if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
+ if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
ieee80211_softmac_xmit(txb, ieee);
- }else{
+ } else {
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += __le16_to_cpu(txb->payload_size);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index be08cd1d37a7..9dd5c04181ea 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -70,10 +70,10 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
- for(i=0; i<ARRAY_SIZE(ieee80211_modes); i++) {
+ for (i = 0; i < ARRAY_SIZE(ieee80211_modes); i++) {
if (network->mode & BIT(i)) {
- sprintf(pname,ieee80211_modes[i].mode_string,ieee80211_modes[i].mode_size);
- pname +=ieee80211_modes[i].mode_size;
+ sprintf(pname, ieee80211_modes[i].mode_string, ieee80211_modes[i].mode_size);
+ pname += ieee80211_modes[i].mode_size;
}
}
*pname = '\0';
@@ -130,8 +130,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
max_rate = rate;
}
- if (network->mode >= IEEE_N_24G)//add N rate here;
- {
+ if (network->mode >= IEEE_N_24G) /* add N rate here */ {
struct ht_capability_ele *ht_cap = NULL;
bool is40M = false, isShortGI = false;
u8 max_mcs = 0;
@@ -139,13 +138,13 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[4];
else
ht_cap = (struct ht_capability_ele *)&network->bssht.bdHTCapBuf[0];
- is40M = (ht_cap->ChlWidth)?1:0;
- isShortGI = (ht_cap->ChlWidth)?
- ((ht_cap->ShortGI40Mhz)?1:0):
- ((ht_cap->ShortGI20Mhz)?1:0);
+ is40M = (ht_cap->ChlWidth) ? 1 : 0;
+ isShortGI = (ht_cap->ChlWidth) ?
+ ((ht_cap->ShortGI40Mhz) ? 1 : 0) :
+ ((ht_cap->ShortGI20Mhz) ? 1 : 0);
max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS, MCS_FILTER_ALL);
- rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs&0x7f];
+ rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
if (rate > max_rate)
max_rate = rate;
}
@@ -178,7 +177,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
if (ieee->wpa_enabled && network->wpa_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
@@ -219,7 +218,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
- start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+ start = iwe_stream_add_point(info, start, stop, &iwe, custom);
return start;
}
@@ -243,7 +242,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
list_for_each_entry(network, &ieee->network_list, list) {
i++;
- if((stop-ev)<200) {
+ if ((stop - ev) < 200) {
err = -E2BIG;
break;
}
@@ -454,7 +453,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("GET_ENCODE\n");
- if(ieee->iw_mode == IW_MODE_MONITOR)
+ if (ieee->iw_mode == IW_MODE_MONITOR)
return -1;
key = erq->flags & IW_ENCODE_INDEX;
@@ -571,7 +570,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ret = -EINVAL;
goto done;
}
- printk("alg name:%s\n",alg);
+ printk("alg name:%s\n", alg);
ops = try_then_request_module(ieee80211_get_crypto_ops(alg), module);
if (!ops) {
@@ -688,7 +687,7 @@ int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
- if (strcmp(crypt->ops->name, "WEP") == 0 )
+ if (strcmp(crypt->ops->name, "WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
else if (strcmp(crypt->ops->name, "TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
@@ -712,7 +711,7 @@ int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
case IW_MLME_DISASSOC:
@@ -765,7 +764,7 @@ int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
break;
case IW_AUTH_WPA_ENABLED:
- ieee->wpa_enabled = (data->value)?1:0;
+ ieee->wpa_enabled = (data->value) ? 1 : 0;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
@@ -785,14 +784,14 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
u8 *buf;
- if (len>MAX_WPA_IE_LEN || (len && !ie)) {
- // printk("return error out, len:%d\n", len);
- return -EINVAL;
+ if (len > MAX_WPA_IE_LEN || (len && !ie)) {
+ //printk("return error out, len:%d\n", len);
+ return -EINVAL;
}
if (len) {
- if (len != ie[1]+2) {
+ if (len != ie[1] + 2) {
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 53869b3c985c..379a2ccf4d9f 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -162,7 +162,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, s
tag += 2;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
return skb;
//return NULL;
}
@@ -229,7 +229,7 @@ static struct sk_buff *ieee80211_DELBA(
put_unaligned_le16(ReasonCode, tag);
tag += 2;
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
if (net_ratelimit())
IEEE80211_DEBUG(IEEE80211_DL_TRACE | IEEE80211_DL_BA,
"<=====%s()\n", __func__);
@@ -331,9 +331,9 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
return -1;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
- req = (struct rtl_80211_hdr_3addr *) skb->data;
+ req = (struct rtl_80211_hdr_3addr *)skb->data;
tag = (u8 *)req;
dst = &req->addr2[0];
tag += sizeof(struct rtl_80211_hdr_3addr);
@@ -556,7 +556,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb)
return -1;
}
- IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
+ IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_BA, skb->data, skb->len);
delba = (struct rtl_80211_hdr_3addr *)skb->data;
dst = &delba->addr2[0];
pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
@@ -643,7 +643,7 @@ TsInitDelBA(struct ieee80211_device *ieee, struct ts_common_info *pTsCommonInfo,
ieee80211_send_DELBA(
ieee,
pTsCommonInfo->addr,
- (pTxTs->tx_admitted_ba_record.valid)?(&pTxTs->tx_admitted_ba_record):(&pTxTs->tx_pending_ba_record),
+ (pTxTs->tx_admitted_ba_record.valid) ? (&pTxTs->tx_admitted_ba_record) : (&pTxTs->tx_pending_ba_record),
TxRxSelect,
DELBA_REASON_END_BA);
} else if (TxRxSelect == RX_DIR) {
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index b7769bca9740..79346a00af09 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -253,10 +253,10 @@ extern u8 MCS_FILTER_1SS[16];
/* 2007/07/12 MH We only define legacy and HT wireless mode now. */
#define LEGACY_WIRELESS_MODE IEEE_MODE_MASK
-#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
- ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ?\
- (LegacyRate) :\
- (PICK_RATE(LegacyRate, HTRate))
+#define CURRENT_RATE(WirelessMode, LegacyRate, HTRate) \
+ ((WirelessMode & (LEGACY_WIRELESS_MODE)) != 0) ? \
+ (LegacyRate) : \
+ (PICK_RATE(LegacyRate, HTRate))
// MCS Bw 40 {1~7, 12~15,32}
#define RATE_ADPT_1SS_MASK 0xFF
@@ -270,11 +270,10 @@ typedef enum _HT_AGGRE_SIZE {
HT_AGG_SIZE_16K = 1,
HT_AGG_SIZE_32K = 2,
HT_AGG_SIZE_64K = 3,
-}HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
+} HT_AGGRE_SIZE_E, *PHT_AGGRE_SIZE_E;
/* Indicate different AP vendor for IOT issue */
-typedef enum _HT_IOT_PEER
-{
+typedef enum _HT_IOT_PEER {
HT_IOT_PEER_UNKNOWN = 0,
HT_IOT_PEER_REALTEK = 1,
HT_IOT_PEER_BROADCOM = 2,
@@ -282,7 +281,7 @@ typedef enum _HT_IOT_PEER
HT_IOT_PEER_ATHEROS = 4,
HT_IOT_PEER_CISCO = 5,
HT_IOT_PEER_MAX = 6
-}HT_IOT_PEER_E, *PHTIOT_PEER_E;
+} HT_IOT_PEER_E, *PHTIOT_PEER_E;
/*
* IOT Action for different AP
@@ -298,6 +297,6 @@ typedef enum _HT_IOT_ACTION {
HT_IOT_ACT_CDD_FSYNC = 0x00000080,
HT_IOT_ACT_PURE_N_MODE = 0x00000100,
HT_IOT_ACT_FORCED_CTS2SELF = 0x00000200,
-}HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
+} HT_IOT_ACTION_E, *PHT_IOT_ACTION_E;
#endif //_RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index c73a8058cf87..dba3f2db9f48 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -93,10 +93,6 @@ void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
-#ifdef TO_DO_LIST
- // 8190 only. Assign duration operation mode to firmware
- pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
-#endif
/*
* 8190 only, Realtek proprietary aggregation mode
* Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 59d179ae7ad2..5cee1031a27c 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -105,7 +105,7 @@ static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
eth_zero_addr(pTsCommonInfo->addr);
memset(&pTsCommonInfo->t_spec, 0, sizeof(struct tspec_body));
- memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas)*TCLAS_NUM);
+ memset(&pTsCommonInfo->t_class, 0, sizeof(union qos_tclas) * TCLAS_NUM);
pTsCommonInfo->t_clas_proc = 0;
pTsCommonInfo->t_clas_num = 0;
}
@@ -180,14 +180,12 @@ void TSInitialize(struct ieee80211_device *ieee)
}
// Initialize unused Rx Reorder List.
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
-//#ifdef TO_DO_LIST
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List);
- if (count == (REORDER_ENTRY_NUM-1))
+ if (count == (REORDER_ENTRY_NUM - 1))
break;
- pRxReorderEntry = &ieee->RxReorderEntry[count+1];
+ pRxReorderEntry = &ieee->RxReorderEntry[count + 1];
}
-//#endif
}
static void AdmitTS(struct ieee80211_device *ieee,
@@ -259,7 +257,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct ieee80211_device *ieee,
}
if (&pRet->list != psearch_list)
- return pRet ;
+ return pRet;
else
return NULL;
}
@@ -367,8 +365,8 @@ bool GetTs(
(&ieee->Rx_TS_Admit_List);
enum direction_value Dir = (ieee->iw_mode == IW_MODE_MASTER) ?
- ((TxRxSelect == TX_DIR)?DIR_DOWN:DIR_UP) :
- ((TxRxSelect == TX_DIR)?DIR_UP:DIR_DOWN);
+ ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
+ ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, list);
@@ -417,7 +415,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
TsInitDelBA(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
-//#ifdef TO_DO_LIST
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
if (timer_pending(&pRxTS->rx_pkt_pending_timer))
@@ -445,7 +442,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, struct ts_common_info *
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
-//#endif
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
del_timer_sync(&pTxTS->ts_add_ba_timer);
@@ -530,7 +526,7 @@ void TsStartAddBaProcess(struct ieee80211_device *ieee, struct tx_ts_record *pTx
jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
} else {
IEEE80211_DEBUG(IEEE80211_DL_BA, "%s: Immediately Start ADDBA now!!\n", __func__);
- mod_timer(&pTxTS->ts_add_ba_timer, jiffies+10); //set 10 ticks
+ mod_timer(&pTxTS->ts_add_ba_timer, jiffies + 10); //set 10 ticks
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __func__);
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index de83daa0c9ed..2527cea60e3e 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -39,7 +39,6 @@ static void eprom_cs(struct net_device *dev, short bit)
udelay(EPROM_DELAY);
}
-
static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
@@ -58,7 +57,6 @@ static void eprom_ck_cycle(struct net_device *dev)
udelay(EPROM_DELAY);
}
-
static void eprom_w(struct net_device *dev, short bit)
{
u8 cmdreg;
@@ -76,7 +74,6 @@ static void eprom_w(struct net_device *dev, short bit)
udelay(EPROM_DELAY);
}
-
static short eprom_r(struct net_device *dev)
{
u8 bit;
@@ -94,7 +91,6 @@ static short eprom_r(struct net_device *dev)
return 0;
}
-
static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
{
int i;
@@ -105,7 +101,6 @@ static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
}
}
-
int eprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -119,7 +114,7 @@ int eprom_read(struct net_device *dev, u32 addr)
ret = 0;
/* enable EPROM programming */
write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -162,7 +157,7 @@ int eprom_read(struct net_device *dev, u32 addr)
if (err < 0)
return err;
- ret |= err<<(15-i);
+ ret |= err << (15 - i);
}
eprom_cs(dev, 0);
@@ -170,6 +165,6 @@ int eprom_read(struct net_device *dev, u32 addr)
/* disable EPROM programming */
write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
+ (EPROM_CMD_NORMAL << EPROM_CMD_OPERATING_MODE_SHIFT));
return ret;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 92de92a3325a..b169460b9f26 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -42,9 +42,9 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
- if (priv->card_8192_version == VERSION_819XU_A
- || priv->card_8192_version
- == VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
+ if (priv->card_8192_version == VERSION_819XU_A ||
+ priv->card_8192_version ==
+ VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
(enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
@@ -79,10 +79,10 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
default:
RT_TRACE(COMP_ERR, "phy_set_rf8256_bandwidth(): unknown Bandwidth: %#X\n", Bandwidth);
break;
-
}
}
}
+
/*--------------------------------------------------------------------------
* Overview: Interface to config 8256
* Input: struct net_device* dev
@@ -101,6 +101,7 @@ void phy_rf8256_config(struct net_device *dev)
/* Config BB and RF */
phy_rf8256_config_para_file(dev);
}
+
/*--------------------------------------------------------------------------
* Overview: Interface to config 8256
* Input: struct net_device* dev
@@ -137,12 +138,12 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
break;
case RF90_PATH_B:
case RF90_PATH_D:
- u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16);
break;
}
/*----Set RF_ENV enable----*/
- rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
/*----Set RF_ENV output high----*/
rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
@@ -151,7 +152,7 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 */
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? */
- rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e) eRFPath, 0x0, bMask12Bits, 0xbf);
+ rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path_e)eRFPath, 0x0, bMask12Bits, 0xbf);
/* Check RF block (for FPGA platform only)----
* TODO: this function should be removed on ASIC , Emily 2007.2.2
@@ -207,7 +208,7 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
break;
case RF90_PATH_B:
case RF90_PATH_D:
- rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
break;
}
@@ -215,7 +216,6 @@ static void phy_rf8256_config_para_file(struct net_device *dev)
RT_TRACE(COMP_ERR, "phy_rf8256_config_para_file():Radio[%d] Fail!!", eRFPath);
goto phy_RF8256_Config_ParaFile_Fail;
}
-
}
RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
@@ -225,11 +225,11 @@ phy_RF8256_Config_ParaFile_Fail:
RT_TRACE(COMP_ERR, "PHY Initialization failed\n");
}
-
void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel)
{
u32 TxAGC = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
+
TxAGC = powerlevel;
if (priv->bDynamicTxLowPower) {
@@ -244,7 +244,6 @@ void phy_set_rf8256_cck_tx_power(struct net_device *dev, u8 powerlevel)
rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
}
-
void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -255,16 +254,16 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
u8 byte0, byte1, byte2, byte3;
powerBase0 = powerlevel + priv->TxPowerDiff; /* OFDM rates */
- powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0;
powerBase1 = powerlevel; /* MCS rates */
- powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
+ powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
for (index = 0; index < 6; index++) {
- writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index < 2)?powerBase0:powerBase1);
+ writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index < 2) ? powerBase0 : powerBase1);
byte0 = (u8)(writeVal & 0x7f);
- byte1 = (u8)((writeVal & 0x7f00)>>8);
- byte2 = (u8)((writeVal & 0x7f0000)>>16);
- byte3 = (u8)((writeVal & 0x7f000000)>>24);
+ byte1 = (u8)((writeVal & 0x7f00) >> 8);
+ byte2 = (u8)((writeVal & 0x7f0000) >> 16);
+ byte3 = (u8)((writeVal & 0x7f000000) >> 24);
if (byte0 > 0x24)
/* Max power index = 0x24 */
@@ -278,7 +277,7 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
/* for tx power track */
if (index == 3) {
- writeVal_tmp = (byte3<<24) | (byte2<<16) | (byte1<<8) | byte0;
+ writeVal_tmp = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
priv->Pwr_Track = writeVal_tmp;
}
@@ -288,10 +287,9 @@ void phy_set_rf8256_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
*/
writeVal = 0x03030303;
} else {
- writeVal = (byte3<<24) | (byte2<<16) | (byte1<<8) | byte0;
- }
- rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
+ writeVal = (byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0;
+ }
+ rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
}
return;
-
}
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index fe1f279ca368..2821411878ce 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2076,14 +2076,6 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
wireless_mode = WIRELESS_MODE_B;
}
}
-#ifdef TO_DO_LIST
- /* TODO: this function doesn't work well at this time,
- * we should wait for FPGA
- */
- ActUpdateChannelAccessSetting(
- pAdapter, pHalData->CurrentWirelessMode,
- &pAdapter->MgntInfo.Info8185.ChannelAccessSetting);
-#endif
priv->ieee80211->mode = wireless_mode;
if (wireless_mode == WIRELESS_MODE_N_24G ||
@@ -2096,7 +2088,7 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
}
/* init priv variables here. only non_zero value should be initialized here. */
-static void rtl8192_init_priv_variable(struct net_device *dev)
+static int rtl8192_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i;
@@ -2159,12 +2151,6 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
priv->ieee80211->InitialGainHandler = InitialGain819xUsb;
priv->card_type = USB;
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest) {
- pHalData->ShortRetryLimit = 7;
- pHalData->LongRetryLimit = 7;
- }
-#endif
priv->ShortRetryLimit = 0x30;
priv->LongRetryLimit = 0x30;
priv->EarlyRxThreshold = 7;
@@ -2180,34 +2166,6 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
* TRUE: SW provides them
*/
(false ? TCR_SAT : 0);
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest)
- pHalData->ReceiveConfig =
- pHalData->CSMethod |
- /* accept management/data */
- RCR_AMF | RCR_ADF |
- /* accept control frame for SW
- * AP needs PS-poll
- */
- RCR_ACF |
- /* accept BC/MC/UC */
- RCR_AB | RCR_AM | RCR_APM |
- /* accept ICV/CRC error
- * packet
- */
- RCR_AICV | RCR_ACRC32 |
- /* Max DMA Burst Size per Tx
- * DMA Burst, 7: unlimited.
- */
- ((u32)7 << RCR_MXDMA_OFFSET) |
- /* Rx FIFO Threshold,
- * 7: No Rx threshold.
- */
- (pHalData->EarlyRxThreshold << RCR_FIFO_OFFSET) |
- (pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt : 0);
- else
-
-#endif
priv->ReceiveConfig =
/* accept management/data */
RCR_AMF | RCR_ADF |
@@ -2223,6 +2181,8 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
priv->AcmControl = 0;
priv->pFirmware = kzalloc(sizeof(rt_firmware), GFP_KERNEL);
+ if (!priv->pFirmware)
+ return -ENOMEM;
/* rx related queue */
skb_queue_head_init(&priv->rx_queue);
@@ -2236,6 +2196,8 @@ static void rtl8192_init_priv_variable(struct net_device *dev)
for (i = 0; i < MAX_QUEUE_SIZE; i++)
skb_queue_head_init(&priv->ieee80211->skb_drv_aggQ[i]);
priv->rf_set_chan = rtl8192_phy_SwChnl;
+
+ return 0;
}
/* init lock here */
@@ -2605,7 +2567,10 @@ static short rtl8192_init(struct net_device *dev)
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
#endif
- rtl8192_init_priv_variable(dev);
+ err = rtl8192_init_priv_variable(dev);
+ if (err)
+ return err;
+
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(dev);
rtl8192_get_eeprom_size(dev);
@@ -2658,19 +2623,10 @@ static void rtl8192_hwconfig(struct net_device *dev)
regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_AUTO:
-#ifdef TO_DO_LIST
- if (Adapter->bInHctTest) {
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- } else
-#endif
- {
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- }
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_24G:
/* It support CCK rate by default. CCK rate will be filtered
@@ -2841,48 +2797,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
}
RT_TRACE(COMP_INIT, "%s():after firmware download\n", __func__);
-#ifdef TO_DO_LIST
- if (Adapter->ResetProgress == RESET_TYPE_NORESET) {
- if (pMgntInfo->RegRfOff) { /* User disable RF via registry. */
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
- MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
- /* Those actions will be discard in MgntActSet_RF_State
- * because of the same state
- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
- PHY_SetRFReg(Adapter,
- (enum rf90_radio_path_e)eRFPath,
- 0x4, 0xC00, 0x0);
- } else if (pMgntInfo->RfOffReason > RF_CHANGE_BY_PS) {
- /* H/W or S/W RF OFF before sleep. */
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): Turn off RF for RfOffReason(%d) ----------\n",
- pMgntInfo->RfOffReason));
- MgntActSet_RF_State(Adapter,
- eRfOff,
- pMgntInfo->RfOffReason);
- } else {
- pHalData->eRFPowerState = eRfOn;
- pMgntInfo->RfOffReason = 0;
- RT_TRACE((COMP_INIT | COMP_RF), DBG_LOUD,
- ("InitializeAdapter819xUsb(): RF is on ----------\n"));
- }
- } else {
- if (pHalData->eRFPowerState == eRfOff) {
- MgntActSet_RF_State(Adapter,
- eRfOff,
- pMgntInfo->RfOffReason);
- /* Those actions will be discard in MgntActSet_RF_State
- * because of the same state
- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
- PHY_SetRFReg(Adapter,
- (enum rf90_radio_path_e)eRFPath,
- 0x4, 0xC00, 0x0);
- }
- }
-#endif
/* config RF. */
if (priv->ResetProgress == RESET_TYPE_NORESET) {
rtl8192_phy_RFConfig(dev);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index ade14ef05730..c23e43b095d9 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -1334,7 +1334,7 @@ static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev)
return;
}
/*DbgPrint("Schedule TxPowerTrackingWorkItem\n");*/
- queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ queue_delayed_work(priv->priv_wq, &priv->txpower_tracking_wq, 0);
TM_Trigger = 0;
}
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 153d4ee0ec07..dd81d210bd49 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -231,7 +231,7 @@ bool init_firmware(struct net_device *dev)
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
} else {
- RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n");
+ RT_TRACE(COMP_FIRMWARE, "PlatformInitFirmware: undefined firmware state\n");
}
/*
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 5f04afe53d69..c04d8eca0cfb 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -516,16 +516,6 @@ static void rtl8192_phyConfigBB(struct net_device *dev,
{
u32 i;
-#ifdef TO_DO_LIST
- u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL;
-
- if (Adapter->bInHctTest) {
- PHY_REGArrayLen = PHY_REGArrayLengthDTM;
- AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM;
- Rtl8190PHY_REGArray_Table = Rtl819XPHY_REGArrayDTM;
- Rtl8190AGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM;
- }
-#endif
if (ConfigType == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < PHY_REG_1T2RArrayLength; i += 2) {
rtl8192_setBBreg(dev, Rtl8192UsbPHY_REG_1T2RArray[i],
@@ -1059,10 +1049,6 @@ static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
switch (priv->rf_chip) {
case RF_8225:
-#ifdef TO_DO_LIST
- PHY_SetRF8225CckTxPower(Adapter, powerlevel);
- PHY_SetRF8225OfdmTxPower(Adapter, powerlevelOFDM24G);
-#endif
break;
case RF_8256:
@@ -1160,48 +1146,6 @@ bool rtl8192_SetRFPowerState(struct net_device *dev,
RT_TRACE(COMP_ERR, "Not support rf_chip(%x)\n", priv->rf_chip);
break;
}
-#ifdef TO_DO_LIST
- if (bResult) {
- /* Update current RF state variable. */
- pHalData->eRFPowerState = eRFPowerState;
- switch (pHalData->RFChipID) {
- case RF_8256:
- switch (pHalData->eRFPowerState) {
- case eRfOff:
- /* If Rf off reason is from IPS,
- * LED should blink with no link
- */
- if (pMgntInfo->RfOffReason == RF_CHANGE_BY_IPS)
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
- else
- /* Turn off LED if RF is not ON. */
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_POWER_OFF);
- break;
-
- case eRfOn:
- /* Turn on RF we are still linked, which might
- * happen when we quickly turn off and on HW RF.
- */
- if (pMgntInfo->bMediaConnect)
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_LINK);
- else
- /* Turn off LED if RF is not ON. */
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
- break;
-
- default:
- break;
- }
- break;
-
- default:
- RT_TRACE(COMP_RF, DBG_LOUD, "%s(): Unknown RF type\n",
- __func__);
- break;
- }
-
- }
-#endif
priv->SetRFPowerStateInProgress = false;
return bResult;
@@ -1628,9 +1572,6 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
/* <3> Set RF related register */
switch (priv->rf_chip) {
case RF_8225:
-#ifdef TO_DO_LIST
- PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW);
-#endif
break;
case RF_8256:
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index b554cf8bd679..0c3ae8495afb 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -258,7 +258,7 @@ void r8712_stop_drv_timers(struct _adapter *padapter)
del_timer_sync(&padapter->mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer);
}
-static u8 init_default_value(struct _adapter *padapter)
+static void init_default_value(struct _adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -292,37 +292,41 @@ static u8 init_default_value(struct _adapter *padapter)
r8712_init_registrypriv_dev_network(padapter);
r8712_update_registrypriv_dev_network(padapter);
/*misc.*/
- return _SUCCESS;
}
-u8 r8712_init_drv_sw(struct _adapter *padapter)
+int r8712_init_drv_sw(struct _adapter *padapter)
{
- if (r8712_init_cmd_priv(&padapter->cmdpriv))
- return _FAIL;
+ int ret;
+
+ ret = r8712_init_cmd_priv(&padapter->cmdpriv);
+ if (ret)
+ return ret;
padapter->cmdpriv.padapter = padapter;
- if (r8712_init_evt_priv(&padapter->evtpriv))
- return _FAIL;
- if (r8712_init_mlme_priv(padapter) == _FAIL)
- return _FAIL;
+ ret = r8712_init_evt_priv(&padapter->evtpriv);
+ if (ret)
+ return ret;
+ ret = r8712_init_mlme_priv(padapter);
+ if (ret)
+ return ret;
_r8712_init_xmit_priv(&padapter->xmitpriv, padapter);
_r8712_init_recv_priv(&padapter->recvpriv, padapter);
memset((unsigned char *)&padapter->securitypriv, 0,
sizeof(struct security_priv));
timer_setup(&padapter->securitypriv.tkip_timer,
r8712_use_tkipkey_handler, 0);
- if (_r8712_init_sta_priv(&padapter->stapriv))
- return _FAIL;
+ ret = _r8712_init_sta_priv(&padapter->stapriv);
+ if (ret)
+ return ret;
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
r8712_init_pwrctrl_priv(padapter);
mp871xinit(padapter);
- if (init_default_value(padapter) != _SUCCESS)
- return _FAIL;
+ init_default_value(padapter);
r8712_InitSwLeds(padapter);
- return _SUCCESS;
+ return ret;
}
-u8 r8712_free_drv_sw(struct _adapter *padapter)
+void r8712_free_drv_sw(struct _adapter *padapter)
{
struct net_device *pnetdev = padapter->pnetdev;
@@ -337,7 +341,6 @@ u8 r8712_free_drv_sw(struct _adapter *padapter)
mp871xdeinit(padapter);
if (pnetdev)
free_netdev(pnetdev);
- return _SUCCESS;
}
static void enable_video_mode(struct _adapter *padapter, int cbw40_value)
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 84c4c8580f9a..215fca4abb3a 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -29,24 +29,23 @@
/*init os related resource in struct recv_priv*/
/*alloc os related resource in union recv_frame*/
-int r8712_os_recv_resource_alloc(struct _adapter *padapter,
- union recv_frame *precvframe)
+void r8712_os_recv_resource_alloc(struct _adapter *padapter,
+ union recv_frame *precvframe)
{
precvframe->u.hdr.pkt_newalloc = NULL;
precvframe->u.hdr.pkt = NULL;
- return _SUCCESS;
}
/*alloc os related resource in struct recv_buf*/
int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
struct recv_buf *precvbuf)
{
- int res = _SUCCESS;
+ int res = 0;
precvbuf->irp_pending = false;
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
if (!precvbuf->purb)
- res = _FAIL;
+ res = -ENOMEM;
precvbuf->pskb = NULL;
precvbuf->pallocated_buf = NULL;
precvbuf->pbuf = NULL;
@@ -60,8 +59,8 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
}
/*free os related resource in struct recv_buf*/
-int r8712_os_recvbuf_resource_free(struct _adapter *padapter,
- struct recv_buf *precvbuf)
+void r8712_os_recvbuf_resource_free(struct _adapter *padapter,
+ struct recv_buf *precvbuf)
{
if (precvbuf->pskb)
dev_kfree_skb_any(precvbuf->pskb);
@@ -69,7 +68,6 @@ int r8712_os_recvbuf_resource_free(struct _adapter *padapter,
usb_kill_urb(precvbuf->purb);
usb_free_urb(precvbuf->purb);
}
- return _SUCCESS;
}
void r8712_handle_tkip_mic_err(struct _adapter *adapter, u8 bgroup)
@@ -115,8 +113,8 @@ void r8712_recv_indicatepkt(struct _adapter *adapter,
skb->protocol = eth_type_trans(skb, adapter->pnetdev);
netif_rx(skb);
recvframe->u.hdr.pkt = NULL; /* pointers to NULL before
- * r8712_free_recvframe()
- */
+ * r8712_free_recvframe()
+ */
r8712_free_recvframe(recvframe, free_recv_queue);
return;
_recv_indicatepkt_drop:
diff --git a/drivers/staging/rtl8712/recv_osdep.h b/drivers/staging/rtl8712/recv_osdep.h
index dcd3b484c793..d8c1fa74f544 100644
--- a/drivers/staging/rtl8712/recv_osdep.h
+++ b/drivers/staging/rtl8712/recv_osdep.h
@@ -18,22 +18,22 @@
#include "drv_types.h"
#include <linux/skbuff.h>
-sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
+void _r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter);
void _r8712_free_recv_priv(struct recv_priv *precvpriv);
-s32 r8712_recv_entry(union recv_frame *precv_frame);
+void r8712_recv_entry(union recv_frame *precv_frame);
void r8712_recv_indicatepkt(struct _adapter *adapter,
union recv_frame *precv_frame);
void r8712_handle_tkip_mic_err(struct _adapter *padapter, u8 bgroup);
-int r8712_init_recv_priv(struct recv_priv *precvpriv,
- struct _adapter *padapter);
+void r8712_init_recv_priv(struct recv_priv *precvpriv,
+ struct _adapter *padapter);
void r8712_free_recv_priv(struct recv_priv *precvpriv);
-int r8712_os_recv_resource_alloc(struct _adapter *padapter,
- union recv_frame *precvframe);
+void r8712_os_recv_resource_alloc(struct _adapter *padapter,
+ union recv_frame *precvframe);
int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
struct recv_buf *precvbuf);
-int r8712_os_recvbuf_resource_free(struct _adapter *padapter,
- struct recv_buf *precvbuf);
+void r8712_os_recvbuf_resource_free(struct _adapter *padapter,
+ struct recv_buf *precvbuf);
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
#endif
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 6a72a4ad176a..ff3cb09c57a6 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -263,11 +263,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
return pcmd_r; /* if returning pcmd_r == NULL, pcmd must be free. */
}
-static u8 check_cmd_fifo(struct _adapter *padapter, uint sz)
-{
- return _SUCCESS;
-}
-
u8 r8712_fw_cmd(struct _adapter *pAdapter, u32 cmd)
{
int pollingcnts = 50;
@@ -311,7 +306,7 @@ int r8712_cmd_thread(void *context)
break;
if (padapter->driver_stopped || padapter->surprise_removed)
break;
- if (r8712_register_cmd_alive(padapter) != _SUCCESS)
+ if (r8712_register_cmd_alive(padapter))
continue;
_next:
pcmd = r8712_dequeue_cmd(&pcmdpriv->cmd_queue);
@@ -359,13 +354,6 @@ _next:
(pcmdpriv->cmd_seq << 24));
pcmdbuf += 2; /* 8 bytes alignment */
memcpy((u8 *)pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
- while (check_cmd_fifo(padapter, wr_sz) == _FAIL) {
- if (padapter->driver_stopped ||
- padapter->surprise_removed)
- break;
- msleep(100);
- continue;
- }
if (blnPending)
wr_sz += 8; /* Append 8 bytes */
r8712_write_mem(padapter, RTL8712_DMA_H2CCMD, wr_sz,
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 82ddc0c3ecd4..9901815604f4 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -35,11 +35,11 @@ static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
static void recv_tasklet(void *priv);
-int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
+void r8712_init_recv_priv(struct recv_priv *precvpriv,
+ struct _adapter *padapter)
{
int i;
struct recv_buf *precvbuf;
- int res = _SUCCESS;
addr_t tmpaddr = 0;
int alignment = 0;
struct sk_buff *pskb = NULL;
@@ -49,15 +49,14 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
precvpriv->pallocated_recv_buf =
kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC);
if (!precvpriv->pallocated_recv_buf)
- return _FAIL;
+ return;
precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
((addr_t)(precvpriv->pallocated_recv_buf) & 3);
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
INIT_LIST_HEAD(&precvbuf->list);
spin_lock_init(&precvbuf->recvbuf_lock);
- res = r8712_os_recvbuf_resource_alloc(padapter, precvbuf);
- if (res == _FAIL)
+ if (r8712_os_recvbuf_resource_alloc(padapter, precvbuf))
break;
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
@@ -83,7 +82,6 @@ int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter)
}
pskb = NULL;
}
- return res;
}
void r8712_free_recv_priv(struct recv_priv *precvpriv)
@@ -107,7 +105,7 @@ void r8712_free_recv_priv(struct recv_priv *precvpriv)
skb_queue_len(&precvpriv->free_recv_skb_queue));
}
-int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
+void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
{
precvbuf->transfer_len = 0;
precvbuf->len = 0;
@@ -118,10 +116,9 @@ int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
precvbuf->ptail = precvbuf->pbuf;
precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ;
}
- return _SUCCESS;
}
-int r8712_free_recvframe(union recv_frame *precvframe,
+void r8712_free_recvframe(union recv_frame *precvframe,
struct __queue *pfree_recv_queue)
{
unsigned long irqL;
@@ -140,7 +137,6 @@ int r8712_free_recvframe(union recv_frame *precvframe,
precvpriv->free_recvframe_cnt++;
}
spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL);
- return _SUCCESS;
}
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
@@ -323,7 +319,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
return prtnframe;
}
-static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
+static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
{
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
@@ -421,7 +417,6 @@ static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
exit:
prframe->u.hdr.len = 0;
r8712_free_recvframe(prframe, pfree_recv_queue);
- return _SUCCESS;
}
void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
@@ -511,7 +506,6 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
union recv_frame *prframe;
struct rx_pkt_attrib *pattrib;
int bPktInBuf = false;
- struct recv_priv *precvpriv = &padapter->recvpriv;
struct __queue *ppending_recvframe_queue =
&preorder_ctrl->pending_recvframe_queue;
@@ -548,10 +542,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
prframe);
}
} else if (pattrib->amsdu == 1) {
- if (amsdu_to_msdu(padapter, prframe) !=
- _SUCCESS)
- r8712_free_recvframe(prframe,
- &precvpriv->free_recv_queue);
+ amsdu_to_msdu(padapter, prframe);
}
/* Update local variables. */
bPktInBuf = false;
@@ -579,9 +570,9 @@ static int recv_indicatepkt_reorder(struct _adapter *padapter,
if (!padapter->driver_stopped &&
!padapter->surprise_removed) {
r8712_recv_indicatepkt(padapter, prframe);
- return _SUCCESS;
+ return 0;
} else {
- return _FAIL;
+ return -EINVAL;
}
}
}
@@ -603,8 +594,7 @@ static int recv_indicatepkt_reorder(struct _adapter *padapter,
* 2. All packets with SeqNum larger than or equal to
* WinStart => Buffer it.
*/
- if (r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, false) ==
- true) {
+ if (r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
mod_timer(&preorder_ctrl->reordering_ctrl_timer,
jiffies + msecs_to_jiffies(REORDER_WAIT_TIME));
spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
@@ -612,10 +602,10 @@ static int recv_indicatepkt_reorder(struct _adapter *padapter,
spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
del_timer(&preorder_ctrl->reordering_ctrl_timer);
}
- return _SUCCESS;
+ return 0;
_err_exit:
spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql);
- return _FAIL;
+ return -ENOMEM;
}
void r8712_reordering_ctrl_timeout_handler(void *pcontext)
@@ -641,7 +631,7 @@ static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (phtpriv->ht_option == 1) { /*B/G/N Mode*/
- if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
+ if (recv_indicatepkt_reorder(padapter, prframe)) {
/* including perform A-MPDU Rx Ordering Buffer Control*/
if (!padapter->driver_stopped &&
!padapter->surprise_removed)
@@ -649,8 +639,8 @@ static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
}
} else { /*B/G mode*/
retval = r8712_wlanhdr_to_ethhdr(prframe);
- if (retval != _SUCCESS)
- return retval;
+ if (retval)
+ return _FAIL;
if (!padapter->driver_stopped && !padapter->surprise_removed) {
/* indicate this recv_frame */
r8712_recv_indicatepkt(padapter, prframe);
@@ -991,7 +981,7 @@ _exit_recv_func:
return retval;
}
-static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
+static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
{
u8 *pbuf, shift_sz = 0;
u8 frag, mf;
@@ -1018,7 +1008,7 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
/* In this case, it means the MAX_RECVBUF_SZ is too small to
* get the data from 8712u.
*/
- return _FAIL;
+ return;
}
do {
prxstat = (struct recv_stat *)pbuf;
@@ -1031,13 +1021,13 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
drvinfo_sz <<= 3;
if (pkt_len <= 0)
- goto _exit_recvbuf2recvframe;
+ return;
/* Qos data, wireless lan header length is 26 */
if ((le32_to_cpu(prxstat->rxdw0) >> 23) & 0x01)
shift_sz = 2;
precvframe = r8712_alloc_recvframe(pfree_recv_queue);
if (!precvframe)
- goto _exit_recvbuf2recvframe;
+ return;
INIT_LIST_HEAD(&precvframe->u.hdr.list);
precvframe->u.hdr.precvbuf = NULL; /*can't access the precvbuf*/
precvframe->u.hdr.len = 0;
@@ -1068,7 +1058,7 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
} else {
precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
if (!precvframe->u.hdr.pkt)
- return _FAIL;
+ return;
precvframe->u.hdr.rx_head = pbuf;
precvframe->u.hdr.rx_data = pbuf;
precvframe->u.hdr.rx_tail = pbuf;
@@ -1088,8 +1078,6 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
precvframe = NULL;
pkt_copy = NULL;
} while ((transfer_len > 0) && pkt_cnt > 0);
-_exit_recvbuf2recvframe:
- return _SUCCESS;
}
static void recv_tasklet(void *priv)
diff --git a/drivers/staging/rtl8712/rtl8712_recv.h b/drivers/staging/rtl8712/rtl8712_recv.h
index 6954c5bfbcaf..3e385b2242d8 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.h
+++ b/drivers/staging/rtl8712/rtl8712_recv.h
@@ -136,7 +136,7 @@ union recv_frame {
} u;
};
-int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf);
+void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf);
void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf);
s32 r8712_signal_scale_mapping(s32 cur_sig);
void r8712_reordering_ctrl_timeout_handler(void *pcontext);
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 307b0e292976..c247f92207f5 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -246,7 +246,7 @@ void r8712_do_queue_select(struct _adapter *padapter,
}
#ifdef CONFIG_R8712_TX_AGGR
-u8 r8712_construct_txaggr_cmd_desc(struct xmit_buf *pxmitbuf)
+void r8712_construct_txaggr_cmd_desc(struct xmit_buf *pxmitbuf)
{
struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf;
@@ -260,11 +260,9 @@ u8 r8712_construct_txaggr_cmd_desc(struct xmit_buf *pxmitbuf)
/* dw1 */
ptx_desc->txdw1 |= cpu_to_le32((0x13 << QSEL_SHT) & 0x00001f00);
-
- return _SUCCESS;
}
-u8 r8712_construct_txaggr_cmd_hdr(struct xmit_buf *pxmitbuf)
+void r8712_construct_txaggr_cmd_hdr(struct xmit_buf *pxmitbuf)
{
struct xmit_frame *pxmitframe = (struct xmit_frame *)
pxmitbuf->priv_data;
@@ -278,12 +276,10 @@ u8 r8712_construct_txaggr_cmd_hdr(struct xmit_buf *pxmitbuf)
pcmd_hdr->cmd_dw0 = cpu_to_le32((GEN_CMD_CODE(_AMSDU_TO_AMPDU) << 16) |
(pcmdpriv->cmd_seq << 24));
pcmdpriv->cmd_seq++;
-
- return _SUCCESS;
}
-u8 r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe)
+void r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
{
struct _adapter *padapter = pxmitframe->padapter;
struct tx_desc *ptx_desc = (struct tx_desc *)pxmitbuf->pbuf;
@@ -319,13 +315,11 @@ u8 r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
((ptx_desc->txdw0 & 0x0000ffff) +
((TXDESC_SIZE + last_txcmdsz + padding_sz) &
0x0000ffff)));
-
- return _SUCCESS;
}
-u8 r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe)
+void r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
{
/* linux complete context doesn't need to protect */
pxmitframe->pxmitbuf = pxmitbuf;
@@ -336,10 +330,8 @@ u8 r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
/*RTL8712_DMA_H2CCMD */
r8712_construct_txaggr_cmd_desc(pxmitbuf);
r8712_construct_txaggr_cmd_hdr(pxmitbuf);
- if (r8712_append_mpdu_unit(pxmitbuf, pxmitframe) == _SUCCESS)
- pxmitbuf->aggr_nr = 1;
-
- return _SUCCESS;
+ r8712_append_mpdu_unit(pxmitbuf, pxmitframe);
+ pxmitbuf->aggr_nr = 1;
}
u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf,
@@ -351,18 +343,17 @@ u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf,
/* buffer addr assoc */
pxmitframe->buf_addr = pxmitbuf->pbuf + TXDESC_SIZE +
(((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff);
- if (r8712_append_mpdu_unit(pxmitbuf, pxmitframe) == _SUCCESS) {
- r8712_free_xmitframe_ex(&pxmitframe->padapter->xmitpriv,
- pxmitframe);
- pxmitbuf->aggr_nr++;
- }
+ r8712_append_mpdu_unit(pxmitbuf, pxmitframe);
+ r8712_free_xmitframe_ex(&pxmitframe->padapter->xmitpriv,
+ pxmitframe);
+ pxmitbuf->aggr_nr++;
return TXDESC_SIZE +
(((struct tx_desc *)pxmitbuf->pbuf)->txdw0 & 0x0000ffff);
}
-u8 r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe)
+void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe)
{
struct _adapter *padapter = pxmitframe->padapter;
struct dvobj_priv *pdvobj = &padapter->dvobjpriv;
@@ -399,8 +390,6 @@ u8 r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
}
r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD,
total_length + TXDESC_SIZE, (u8 *)pxmitframe);
-
- return _SUCCESS;
}
#endif
@@ -737,20 +726,19 @@ static void dump_xframe(struct _adapter *padapter,
}
}
-int r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe)
+void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe)
{
- int res = _SUCCESS;
+ int res;
res = r8712_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
pxmitframe->pkt = NULL;
if (res == _SUCCESS)
dump_xframe(padapter, pxmitframe);
- return res;
}
int r8712_xmit_enqueue(struct _adapter *padapter, struct xmit_frame *pxmitframe)
{
- if (r8712_xmit_classifier(padapter, pxmitframe) == _FAIL) {
+ if (r8712_xmit_classifier(padapter, pxmitframe)) {
pxmitframe->pkt = NULL;
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.h b/drivers/staging/rtl8712/rtl8712_xmit.h
index 9be8fb70c92e..0b56bd3ac4d0 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.h
+++ b/drivers/staging/rtl8712/rtl8712_xmit.h
@@ -102,10 +102,10 @@ void r8712_do_queue_select(struct _adapter *padapter,
struct pkt_attrib *pattrib);
#ifdef CONFIG_R8712_TX_AGGR
-u8 r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe);
-u8 r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe);
+void r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe);
+void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
+ struct xmit_frame *pxmitframe);
#endif
#endif
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 28941423b7ed..c20dd5a6bbd1 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -11,8 +11,8 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
-#ifndef _IO_H_
-#define _IO_H_
+#ifndef _RTL871X_IO_H_
+#define _RTL871X_IO_H_
#include "osdep_service.h"
#include "osdep_intf.h"
@@ -234,5 +234,4 @@ void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
uint r8712_alloc_io_queue(struct _adapter *adapter);
void r8712_free_io_queue(struct _adapter *adapter);
-#endif /*_RTL8711_IO_H_*/
-
+#endif /*_RTL871X_IO_H_*/
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index b08b9a191a34..944336e0d2e2 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -419,8 +419,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
pwep->KeyIndex |= 0x80000000;
memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
if (param->u.crypt.set_tx) {
- if (r8712_set_802_11_add_wep(padapter, pwep) ==
- (u8)_FAIL)
+ if (r8712_set_802_11_add_wep(padapter, pwep))
ret = -EOPNOTSUPP;
} else {
/* don't update "psecuritypriv->PrivacyAlgrthm" and
@@ -1585,7 +1584,7 @@ static int r8711_wx_set_enc(struct net_device *dev,
}
wep.KeyIndex |= 0x80000000; /* transmit key */
memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
- if (r8712_set_802_11_add_wep(padapter, &wep) == _FAIL)
+ if (r8712_set_802_11_add_wep(padapter, &wep))
return -EOPNOTSUPP;
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index f3c0a9348f56..6cdc6f1a6bc6 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -320,22 +320,22 @@ u8 r8712_set_802_11_authentication_mode(struct _adapter *padapter,
psecuritypriv->ndisauthtype = authmode;
if (psecuritypriv->ndisauthtype > 3)
psecuritypriv->AuthAlgrthm = 2; /* 802.1x */
- if (r8712_set_auth(padapter, psecuritypriv) == _SUCCESS)
- ret = true;
- else
+ if (r8712_set_auth(padapter, psecuritypriv))
ret = false;
+ else
+ ret = true;
return ret;
}
-u8 r8712_set_802_11_add_wep(struct _adapter *padapter,
- struct NDIS_802_11_WEP *wep)
+int r8712_set_802_11_add_wep(struct _adapter *padapter,
+ struct NDIS_802_11_WEP *wep)
{
sint keyid;
struct security_priv *psecuritypriv = &padapter->securitypriv;
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= WEP_KEYS)
- return false;
+ return -EINVAL;
switch (wep->KeyLength) {
case 5:
psecuritypriv->PrivacyAlgrthm = _WEP40_;
@@ -351,7 +351,5 @@ u8 r8712_set_802_11_add_wep(struct _adapter *padapter,
wep->KeyLength);
psecuritypriv->DefKeylen[keyid] = wep->KeyLength;
psecuritypriv->PrivacyKeyIndex = keyid;
- if (r8712_set_key(padapter, psecuritypriv, keyid) == _FAIL)
- return false;
- return _SUCCESS;
+ return r8712_set_key(padapter, psecuritypriv, keyid);
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.h b/drivers/staging/rtl8712/rtl871x_ioctl_set.h
index 8b1085aea962..e2de820f61d9 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.h
@@ -28,8 +28,8 @@ u8 r8712_set_802_11_authentication_mode(struct _adapter *pdapter,
u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid);
-u8 r8712_set_802_11_add_wep(struct _adapter *padapter,
- struct NDIS_802_11_WEP *wep);
+int r8712_set_802_11_add_wep(struct _adapter *padapter,
+ struct NDIS_802_11_WEP *wep);
u8 r8712_set_802_11_disassociate(struct _adapter *padapter);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 0cc879a4d43f..cabdb3549a5a 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -53,7 +53,7 @@ int r8712_init_mlme_priv(struct _adapter *padapter)
pbuf = kmalloc_array(MAX_BSS_CNT, sizeof(struct wlan_network),
GFP_ATOMIC);
if (!pbuf)
- return _FAIL;
+ return -ENOMEM;
pmlmepriv->free_bss_buf = pbuf;
pnetwork = (struct wlan_network *)pbuf;
for (i = 0; i < MAX_BSS_CNT; i++) {
@@ -67,7 +67,7 @@ int r8712_init_mlme_priv(struct _adapter *padapter)
pmlmepriv->sitesurveyctrl.traffic_busy = false;
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
r8712_init_mlme_timer(padapter);
- return _SUCCESS;
+ return 0;
}
struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv)
@@ -1144,8 +1144,8 @@ ask_for_joinbss:
return r8712_joinbss_cmd(adapter, pnetwork);
}
-sint r8712_set_auth(struct _adapter *adapter,
- struct security_priv *psecuritypriv)
+int r8712_set_auth(struct _adapter *adapter,
+ struct security_priv *psecuritypriv)
{
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
struct cmd_obj *pcmd;
@@ -1153,12 +1153,12 @@ sint r8712_set_auth(struct _adapter *adapter,
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd)
- return _FAIL;
+ return -ENOMEM;
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
if (!psetauthparm) {
kfree(pcmd);
- return _FAIL;
+ return -ENOMEM;
}
psetauthparm->mode = (u8)psecuritypriv->AuthAlgrthm;
pcmd->cmdcode = _SetAuth_CMD_;
@@ -1168,25 +1168,25 @@ sint r8712_set_auth(struct _adapter *adapter,
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
r8712_enqueue_cmd(pcmdpriv, pcmd);
- return _SUCCESS;
+ return 0;
}
-sint r8712_set_key(struct _adapter *adapter,
- struct security_priv *psecuritypriv,
- sint keyid)
+int r8712_set_key(struct _adapter *adapter,
+ struct security_priv *psecuritypriv,
+ sint keyid)
{
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
u8 keylen;
- sint ret = _SUCCESS;
+ int ret;
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd)
- return _FAIL;
+ return -ENOMEM;
psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
if (!psetkeyparm) {
- ret = _FAIL;
+ ret = -ENOMEM;
goto err_free_cmd;
}
if (psecuritypriv->AuthAlgrthm == 2) { /* 802.1X */
@@ -1211,7 +1211,7 @@ sint r8712_set_key(struct _adapter *adapter,
break;
case _TKIP_:
if (keyid < 1 || keyid > 2) {
- ret = _FAIL;
+ ret = -EINVAL;
goto err_free_parm;
}
keylen = 16;
@@ -1221,7 +1221,7 @@ sint r8712_set_key(struct _adapter *adapter,
break;
case _AES_:
if (keyid < 1 || keyid > 2) {
- ret = _FAIL;
+ ret = -EINVAL;
goto err_free_parm;
}
keylen = 16;
@@ -1230,7 +1230,7 @@ sint r8712_set_key(struct _adapter *adapter,
psetkeyparm->grpkey = 1;
break;
default:
- ret = _FAIL;
+ ret = -EINVAL;
goto err_free_parm;
}
pcmd->cmdcode = _SetKey_CMD_;
@@ -1240,7 +1240,7 @@ sint r8712_set_key(struct _adapter *adapter,
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
r8712_enqueue_cmd(pcmdpriv, pcmd);
- return ret;
+ return 0;
err_free_parm:
kfree(psetkeyparm);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index a160107e9801..46effb469fd4 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -173,10 +173,10 @@ void r8712_free_network_queue(struct _adapter *adapter);
int r8712_init_mlme_priv(struct _adapter *adapter);
void r8712_free_mlme_priv(struct mlme_priv *pmlmepriv);
int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv);
-sint r8712_set_key(struct _adapter *adapter,
- struct security_priv *psecuritypriv, sint keyid);
-sint r8712_set_auth(struct _adapter *adapter,
- struct security_priv *psecuritypriv);
+int r8712_set_key(struct _adapter *adapter,
+ struct security_priv *psecuritypriv, sint keyid);
+int r8712_set_auth(struct _adapter *adapter,
+ struct security_priv *psecuritypriv);
uint r8712_get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss);
void r8712_generate_random_ibss(u8 *pibss);
u8 *r8712_get_capability_from_ie(u8 *ie);
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index edd3da05fc06..1a39a96b726f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -35,7 +35,7 @@ static void _init_mp_priv_(struct mp_priv *pmp_priv)
static int init_mp_priv(struct mp_priv *pmp_priv)
{
- int i, res;
+ int i;
struct mp_xmit_frame *pmp_xmitframe;
_init_mp_priv_(pmp_priv);
@@ -45,8 +45,7 @@ static int init_mp_priv(struct mp_priv *pmp_priv)
sizeof(struct mp_xmit_frame) + 4,
GFP_ATOMIC);
if (!pmp_priv->pallocated_mp_xmitframe_buf) {
- res = _FAIL;
- goto _exit_init_mp_priv;
+ return -ENOMEM;
}
pmp_priv->pmp_xmtframe_buf = pmp_priv->pallocated_mp_xmitframe_buf +
4 -
@@ -62,9 +61,7 @@ static int init_mp_priv(struct mp_priv *pmp_priv)
pmp_xmitframe++;
}
pmp_priv->free_mp_xmitframe_cnt = NR_MP_XMITFRAME;
- res = _SUCCESS;
-_exit_init_mp_priv:
- return res;
+ return 0;
}
static int free_mp_priv(struct mp_priv *pmp_priv)
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index 2beafc7742b3..23cff43437e2 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -184,19 +184,19 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
* will raise the cpwm to be greater than or equal to P2.
* Calling Context: Passive
* Return Value:
- * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
- * _FAIL: r8712_cmd_thread can not do anything.
+ * 0: r8712_cmd_thread can issue cmds to firmware afterwards.
+ * -EINVAL: r8712_cmd_thread can not do anything.
*/
-sint r8712_register_cmd_alive(struct _adapter *padapter)
+int r8712_register_cmd_alive(struct _adapter *padapter)
{
- uint res = _SUCCESS;
+ int res = 0;
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
mutex_lock(&pwrctrl->mutex_lock);
register_task_alive(pwrctrl, CMD_ALIVE);
if (pwrctrl->cpwm < PS_STATE_S2) {
r8712_set_rpwm(padapter, PS_STATE_S3);
- res = _FAIL;
+ res = -EINVAL;
}
mutex_unlock(&pwrctrl->mutex_lock);
return res;
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index 11b5034f203d..dd5a79f90b1a 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -104,7 +104,7 @@ struct pwrctrl_priv {
};
void r8712_init_pwrctrl_priv(struct _adapter *adapter);
-sint r8712_register_cmd_alive(struct _adapter *padapter);
+int r8712_register_cmd_alive(struct _adapter *padapter);
void r8712_unregister_cmd_alive(struct _adapter *padapter);
void r8712_cpwm_int_hdl(struct _adapter *padapter,
struct reportpwrstate_parm *preportpwrstate);
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 5298fe603437..e5092b6da4bd 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -48,7 +48,7 @@ void _r8712_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
_init_queue(&psta_recvpriv->defrag_q);
}
-sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
+void _r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter)
{
sint i;
@@ -64,7 +64,7 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
sizeof(union recv_frame) + RXFRAME_ALIGN_SZ,
GFP_ATOMIC);
if (precvpriv->pallocated_frame_buf == NULL)
- return _FAIL;
+ return;
kmemleak_not_leak(precvpriv->pallocated_frame_buf);
precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf +
RXFRAME_ALIGN_SZ -
@@ -80,7 +80,7 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
precvframe++;
}
precvpriv->rx_pending_cnt = 1;
- return r8712_init_recv_priv(precvpriv, padapter);
+ r8712_init_recv_priv(precvpriv, padapter);
}
void _r8712_free_recv_priv(struct recv_priv *precvpriv)
@@ -245,8 +245,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
if (auth_alg == 2) {
/* get ether_type */
ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
- memcpy(&ether_type, ptr, 2);
- be16_to_cpus(&ether_type);
+ ether_type = get_unaligned_be16(ptr);
if ((psta != NULL) && (psta->ieee8021x_blocked)) {
/* blocked
@@ -586,7 +585,7 @@ sint r8712_validate_recv_frame(struct _adapter *adapter,
return retval;
}
-sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
+int r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
{
/*remove the wlanhdr and add the eth_hdr*/
sint rmv_len;
@@ -629,14 +628,14 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + 2) - 24);
if (!ptr)
- return _FAIL;
+ return -ENOMEM;
memcpy(ptr, get_rxmem(precvframe), 24);
ptr += 24;
} else {
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
if (!ptr)
- return _FAIL;
+ return -ENOMEM;
}
memcpy(ptr, pattrib->dst, ETH_ALEN);
@@ -646,10 +645,10 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
memcpy(ptr + 12, &be_tmp, 2);
}
- return _SUCCESS;
+ return 0;
}
-s32 r8712_recv_entry(union recv_frame *precvframe)
+void r8712_recv_entry(union recv_frame *precvframe)
{
struct _adapter *padapter;
struct recv_priv *precvpriv;
@@ -667,9 +666,8 @@ s32 r8712_recv_entry(union recv_frame *precvframe)
precvpriv->rx_pkts++;
precvpriv->rx_bytes += (uint)(precvframe->u.hdr.rx_tail -
precvframe->u.hdr.rx_data);
- return ret;
+ return;
_recv_entry_drop:
precvpriv->rx_drop++;
padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
- return ret;
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index f87b2ff5de1c..0146a774e19d 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -128,7 +128,7 @@ struct sta_recv_priv {
/* get a free recv_frame from pfree_recv_queue */
union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue);
-int r8712_free_recvframe(union recv_frame *precvframe,
+void r8712_free_recvframe(union recv_frame *precvframe,
struct __queue *pfree_recv_queue);
void r8712_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
diff --git a/drivers/staging/rtl8712/rtl871x_rf.h b/drivers/staging/rtl8712/rtl871x_rf.h
index cc54453cd424..7d98921a48fa 100644
--- a/drivers/staging/rtl8712/rtl871x_rf.h
+++ b/drivers/staging/rtl8712/rtl871x_rf.h
@@ -52,5 +52,4 @@ enum {
RTL8712_RFC_2T2R = 0x22
};
-#endif /*_RTL8711_RF_H_*/
-
+#endif /*__RTL871X_RF_H_*/
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 693008bba83e..73e3d5ef3af2 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -636,7 +636,7 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
}
/* The hlen doesn't include the IV */
-u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
+void r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
u16 pnl;
u32 pnh;
@@ -670,7 +670,7 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
prwskey = &psecuritypriv->XGrpKey[
((idx >> 6) & 0x3) - 1].skey[0];
if (!psecuritypriv->binstallGrpkey)
- return _FAIL;
+ return;
} else {
prwskey = &stainfo->x_UncstKey.skey[0];
}
@@ -686,16 +686,8 @@ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
arcfour_encrypt(&mycontext, payload, payload, length);
*((__le32 *)crc) = cpu_to_le32(getcrc32(payload,
length - 4));
- if (crc[3] != payload[length - 1] ||
- crc[2] != payload[length - 2] ||
- crc[1] != payload[length - 3] ||
- crc[0] != payload[length - 4])
- return _FAIL;
- } else {
- return _FAIL;
}
}
- return _SUCCESS;
}
/* 3 =====AES related===== */
@@ -1019,8 +1011,8 @@ static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
out[i] = ina[i] ^ inb[i];
}
-static sint aes_cipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
+static void aes_cipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
{
uint qc_exists, a4_exists, i, j, payload_remainder;
uint num_blocks, payload_index;
@@ -1140,7 +1132,6 @@ static sint aes_cipher(u8 *key, uint hdrlen,
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
pframe[payload_index++] = chain_buffer[j];
- return _SUCCESS;
}
u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
@@ -1193,8 +1184,8 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
return res;
}
-static sint aes_decipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
+static void aes_decipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
uint qc_exists, a4_exists, i, j, payload_remainder;
@@ -1348,10 +1339,9 @@ static sint aes_decipher(u8 *key, uint hdrlen,
for (j = 0; j < 8; j++)
message[payload_index++] = chain_buffer[j];
/* compare the mic */
- return _SUCCESS;
}
-u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
+void r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
{ /* exclude ICV */
/* Intermediate Buffers */
sint length;
@@ -1374,7 +1364,7 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
prwskey = &psecuritypriv->XGrpKey[
((idx >> 6) & 0x3) - 1].skey[0];
if (!psecuritypriv->binstallGrpkey)
- return _FAIL;
+ return;
} else {
prwskey = &stainfo->x_UncstKey.skey[0];
@@ -1384,11 +1374,8 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
prxattrib->iv_len;
aes_decipher(prwskey, prxattrib->hdrlen, pframe,
length);
- } else {
- return _FAIL;
}
}
- return _SUCCESS;
}
void r8712_use_tkipkey_handler(struct timer_list *t)
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index 25b4d379766d..b2dda16cbd0a 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -209,8 +209,8 @@ void r8712_secgetmic(struct mic_data *pmicdata, u8 *dst);
u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe);
u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe);
void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe);
-u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe);
-u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe);
+void r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe);
+void r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe);
void r8712_wep_decrypt(struct _adapter *padapter, u8 *precvframe);
void r8712_use_tkipkey_handler(struct timer_list *t);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 0a26d71e5340..cc5809e49e35 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -49,8 +49,8 @@ void _r8712_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
}
-sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
- struct _adapter *padapter)
+int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ struct _adapter *padapter)
{
sint i;
struct xmit_buf *pxmitbuf;
@@ -79,7 +79,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
- return _FAIL;
+ return -ENOMEM;
}
pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 -
((addr_t) (pxmitpriv->pallocated_frame_buf) & 3);
@@ -119,7 +119,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
if (!pxmitpriv->pallocated_xmitbuf) {
kfree(pxmitpriv->pallocated_frame_buf);
pxmitpriv->pallocated_frame_buf = NULL;
- return _FAIL;
+ return -ENOMEM;
}
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
@@ -129,12 +129,12 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ,
GFP_ATOMIC);
if (!pxmitbuf->pallocated_buf)
- return _FAIL;
+ return -ENOMEM;
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
((addr_t) (pxmitbuf->pallocated_buf) &
(XMITBUF_ALIGN_SZ - 1));
if (r8712_xmit_resource_alloc(padapter, pxmitbuf))
- return _FAIL;
+ return -ENOMEM;
list_add_tail(&pxmitbuf->list,
&(pxmitpriv->free_xmitbuf_queue.queue));
pxmitbuf++;
@@ -146,7 +146,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
tasklet_init(&pxmitpriv->xmit_tasklet,
(void(*)(unsigned long))r8712_xmit_bh,
(unsigned long)padapter);
- return _SUCCESS;
+ return 0;
}
void _free_xmit_priv(struct xmit_priv *pxmitpriv)
@@ -173,8 +173,8 @@ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
free_hwxmits(padapter);
}
-sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
- struct pkt_attrib *pattrib)
+int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
+ struct pkt_attrib *pattrib)
{
struct pkt_file pktfile;
struct sta_info *psta = NULL;
@@ -224,7 +224,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
/*firstly, filter packet not belongs to mp*/
if (pattrib->ether_type != 0x8712)
- return _FAIL;
+ return -EINVAL;
/* for mp storing the txcmd per packet,
* according to the info of txcmd to update pattrib
*/
@@ -271,7 +271,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
} else {
psta = r8712_get_stainfo(pstapriv, pattrib->ra);
if (psta == NULL) /* drop the pkt */
- return _FAIL;
+ return -ENOMEM;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
pattrib->mac_id = 5;
else
@@ -283,7 +283,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pattrib->psta = psta;
} else {
/* if we cannot get psta => drrp the pkt */
- return _FAIL;
+ return -ENOMEM;
}
pattrib->ack_policy = 0;
@@ -301,7 +301,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pattrib->encrypt = 0;
if ((pattrib->ether_type != 0x888e) &&
!check_fwstate(pmlmepriv, WIFI_MP_STATE))
- return _FAIL;
+ return -EINVAL;
} else {
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
}
@@ -315,7 +315,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pattrib->iv_len = 8;
pattrib->icv_len = 4;
if (padapter->securitypriv.busetkipkey == _FAIL)
- return _FAIL;
+ return -EINVAL;
break;
case _AES_:
pattrib->iv_len = 8;
@@ -339,11 +339,11 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
pattrib->priority =
(le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
- return _SUCCESS;
+ return 0;
}
-static sint xmitframe_addmic(struct _adapter *padapter,
- struct xmit_frame *pxmitframe)
+static int xmitframe_addmic(struct _adapter *padapter,
+ struct xmit_frame *pxmitframe)
{
u32 curfragnum, length;
u8 *pframe, *payload, mic[8];
@@ -372,7 +372,7 @@ static sint xmitframe_addmic(struct _adapter *padapter,
if (!memcmp(psecuritypriv->XGrptxmickey
[psecuritypriv->XGrpKeyid].skey,
null_key, 16))
- return _FAIL;
+ return -ENOMEM;
/*start to calculate the mic code*/
r8712_secmicsetkey(&micdata,
psecuritypriv->
@@ -381,7 +381,7 @@ static sint xmitframe_addmic(struct _adapter *padapter,
} else {
if (!memcmp(&stainfo->tkiptxmickey.skey[0],
null_key, 16))
- return _FAIL;
+ return -ENOMEM;
/* start to calculate the mic code */
r8712_secmicsetkey(&micdata,
&stainfo->tkiptxmickey.skey[0]);
@@ -442,7 +442,7 @@ static sint xmitframe_addmic(struct _adapter *padapter,
payload = payload - pattrib->last_txcmdsz + 8;
}
}
- return _SUCCESS;
+ return 0;
}
static sint xmitframe_swencrypt(struct _adapter *padapter,
@@ -469,8 +469,8 @@ static sint xmitframe_swencrypt(struct _adapter *padapter,
return _SUCCESS;
}
-static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
- struct pkt_attrib *pattrib)
+static int make_wlanhdr(struct _adapter *padapter, u8 *hdr,
+ struct pkt_attrib *pattrib)
{
u16 *qc;
@@ -509,7 +509,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
ETH_ALEN);
} else {
- return _FAIL;
+ return -EINVAL;
}
if (pattrib->encrypt)
@@ -547,7 +547,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
}
}
}
- return _SUCCESS;
+ return 0;
}
static sint r8712_put_snap(u8 *data, u16 h_proto)
@@ -605,7 +605,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
pbuf_start = pxmitframe->buf_addr;
ptxdesc = pbuf_start;
mem_start = pbuf_start + TXDESC_OFFSET;
- if (make_wlanhdr(padapter, mem_start, pattrib) == _FAIL)
+ if (make_wlanhdr(padapter, mem_start, pattrib))
return _FAIL;
_r8712_open_pktfile(pkt, &pktfile);
_r8712_pktfile_read(&pktfile, NULL, (uint) pattrib->pkt_hdrlen);
@@ -696,7 +696,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
memcpy(mem_start, pbuf_start + TXDESC_OFFSET, pattrib->hdrlen);
}
- if (xmitframe_addmic(padapter, pxmitframe) == _FAIL)
+ if (xmitframe_addmic(padapter, pxmitframe))
return _FAIL;
xmitframe_swencrypt(padapter, pxmitframe);
return _SUCCESS;
@@ -753,19 +753,18 @@ struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
return pxmitbuf;
}
-int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+void r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
unsigned long irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
if (pxmitbuf == NULL)
- return _FAIL;
+ return;
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
list_del_init(&pxmitbuf->list);
list_add_tail(&(pxmitbuf->list), &pfree_xmitbuf_queue->queue);
pxmitpriv->free_xmitbuf_cnt++;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
- return _SUCCESS;
}
/*
@@ -894,8 +893,8 @@ static inline struct tx_servq *get_sta_pending(struct _adapter *padapter,
* Will enqueue pxmitframe to the proper queue, and indicate it
* to xx_pending list.....
*/
-sint r8712_xmit_classifier(struct _adapter *padapter,
- struct xmit_frame *pxmitframe)
+int r8712_xmit_classifier(struct _adapter *padapter,
+ struct xmit_frame *pxmitframe)
{
unsigned long irqL0;
struct __queue *pstapending;
@@ -920,7 +919,7 @@ sint r8712_xmit_classifier(struct _adapter *padapter,
}
}
if (psta == NULL)
- return _FAIL;
+ return -EINVAL;
ptxservq = get_sta_pending(padapter, &pstapending,
psta, pattrib->priority);
spin_lock_irqsave(&pstapending->lock, irqL0);
@@ -929,7 +928,7 @@ sint r8712_xmit_classifier(struct _adapter *padapter,
list_add_tail(&pxmitframe->list, &ptxservq->sta_pending.queue);
ptxservq->qcnt++;
spin_unlock_irqrestore(&pstapending->lock, irqL0);
- return _SUCCESS;
+ return 0;
}
static void alloc_hwxmits(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index 4199cb586fb1..b14da38bf652 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -41,7 +41,7 @@ do { \
pattrib_iv[0] = txpn._byte_.TSC0;\
pattrib_iv[1] = txpn._byte_.TSC1;\
pattrib_iv[2] = txpn._byte_.TSC2;\
- pattrib_iv[3] = ((keyidx & 0x3)<<6);\
+ pattrib_iv[3] = ((keyidx & 0x3) << 6);\
txpn.val = (txpn.val == 0xffffff) ? 0 : (txpn.val+1);\
} while (0)
@@ -249,8 +249,8 @@ struct xmit_priv {
uint free_xmitbuf_cnt;
};
-int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
+void r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len);
struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv);
@@ -258,25 +258,25 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe);
void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
struct __queue *pframequeue);
-sint r8712_xmit_classifier(struct _adapter *padapter,
- struct xmit_frame *pxmitframe);
+int r8712_xmit_classifier(struct _adapter *padapter,
+ struct xmit_frame *pxmitframe);
sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
struct xmit_frame *pxmitframe);
sint _r8712_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
void _r8712_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
-sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
- struct pkt_attrib *pattrib);
+int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
+ struct pkt_attrib *pattrib);
int r8712_txframes_sta_ac_pending(struct _adapter *padapter,
struct pkt_attrib *pattrib);
-sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
- struct _adapter *padapter);
+int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
+ struct _adapter *padapter);
void _free_xmit_priv(struct xmit_priv *pxmitpriv);
void r8712_free_xmitframe_ex(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe);
int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
int r8712_xmit_enqueue(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
-int r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
+void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
void r8712_xmit_bh(void *priv);
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index d0daae0b8299..ba1288297ee4 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -389,7 +389,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
}
/* step 4. */
status = r8712_init_drv_sw(padapter);
- if (status == _FAIL)
+ if (status)
goto error;
/* step 5. read efuse/eeprom data and get mac_addr */
{
diff --git a/drivers/staging/rtl8712/usb_osintf.h b/drivers/staging/rtl8712/usb_osintf.h
index ddfa405d0c9b..2e512b4a564c 100644
--- a/drivers/staging/rtl8712/usb_osintf.h
+++ b/drivers/staging/rtl8712/usb_osintf.h
@@ -28,8 +28,8 @@ void rtl871x_intf_stop(struct _adapter *padapter);
void r871x_dev_unload(struct _adapter *padapter);
void r8712_stop_drv_threads(struct _adapter *padapter);
void r8712_stop_drv_timers(struct _adapter *padapter);
-u8 r8712_init_drv_sw(struct _adapter *padapter);
-u8 r8712_free_drv_sw(struct _adapter *padapter);
+int r8712_init_drv_sw(struct _adapter *padapter);
+void r8712_free_drv_sw(struct _adapter *padapter);
struct net_device *r8712_init_netdev(void);
#endif
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 1a5b966a167e..be731f1a2209 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -300,7 +300,6 @@ static inline unsigned char *get_da(unsigned char *pframe)
return da;
}
-
static inline unsigned char *get_sa(unsigned char *pframe)
{
unsigned char *sa;
@@ -346,8 +345,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
return sa;
}
-
-
/*-----------------------------------------------------------------------------
* Below is for the security related definition
*-----------------------------------------------------------------------------
@@ -392,7 +389,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _RESERVED47_ 47
-
/* ---------------------------------------------------------------------------
* Below is the fixed elements...
* ---------------------------------------------------------------------------
@@ -436,7 +432,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define _WMM_IE_Length_ 7 /* for WMM STA */
#define _WMM_Para_Element_Length_ 24
-
/*-----------------------------------------------------------------------------
* Below is the definition for 802.11n
*------------------------------------------------------------------------------
@@ -456,7 +451,6 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define GetOrderBit(pbuf) (((*(__le16 *)(pbuf)) & \
le16_to_cpu(_ORDER_)) != 0)
-
/**
* struct ieee80211_bar - HT Block Ack Request
*
@@ -476,7 +470,6 @@ struct ieee80211_bar {
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-
/*
* struct ieee80211_ht_cap - HT capabilities
*
@@ -552,7 +545,6 @@ struct ieee80211_ht_addt_info {
*/
#define IEEE80211_MIN_AMPDU_BUF 0x8
-
/* Spatial Multiplexing Power Save Modes */
#define WLAN_HT_CAP_SM_PS_STATIC 0
#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 01d713d027b0..1f67d86c606f 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -160,7 +160,7 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
if (!xmitframe)
goto _xmit_entry_drop;
- if ((!r8712_update_attrib(adapter, pkt, &xmitframe->attrib)))
+ if (r8712_update_attrib(adapter, pkt, &xmitframe->attrib))
goto _xmit_entry_drop;
adapter->ledpriv.LedControlHandler(adapter, LED_CTL_TX);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index a12cf8dd8ed9..dfe410283ca0 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -60,7 +60,6 @@ r8723bs-y = \
os_dep/osdep_service.o \
os_dep/os_intfs.o \
os_dep/recv_linux.o \
- os_dep/rtw_proc.o \
os_dep/sdio_intf.o \
os_dep/sdio_ops_linux.o \
os_dep/wifi_regd.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 7bd5c61b055c..6d18d23acdc0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -13,11 +13,10 @@ extern unsigned char RTW_WPA_OUI[];
extern unsigned char WMM_OUI[];
extern unsigned char WPS_OUI[];
extern unsigned char P2P_OUI[];
-extern unsigned char WFD_OUI[];
void init_mlme_ap_info(struct adapter *padapter)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -34,9 +33,9 @@ void init_mlme_ap_info(struct adapter *padapter)
void free_mlme_ap_info(struct adapter *padapter)
{
struct sta_info *psta = NULL;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* stop_ap_mode(padapter); */
@@ -58,9 +57,9 @@ void free_mlme_ap_info(struct adapter *padapter)
static void update_BCNTIM(struct adapter *padapter)
{
struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
unsigned char *pie = pnetwork_mlmeext->IEs;
/* DBG_871X("%s\n", __func__); */
@@ -83,7 +82,7 @@ static void update_BCNTIM(struct adapter *padapter)
if (p != NULL && tim_ielen > 0) {
tim_ielen += 2;
- premainder_ie = p+tim_ielen;
+ premainder_ie = p + tim_ielen;
tim_ie_offset = (sint)(p - pie);
@@ -94,7 +93,7 @@ static void update_BCNTIM(struct adapter *padapter)
} else {
tim_ielen = 0;
- /* calucate head_len */
+ /* calculate head_len */
offset = _FIXED_IE_LENGTH_;
/* get ssid_ie len */
@@ -105,7 +104,7 @@ static void update_BCNTIM(struct adapter *padapter)
(pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
);
if (p != NULL)
- offset += tmp_len+2;
+ offset += tmp_len + 2;
/* get supported rates len */
p = rtw_get_ie(
@@ -114,7 +113,7 @@ static void update_BCNTIM(struct adapter *padapter)
(pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
);
if (p != NULL)
- offset += tmp_len+2;
+ offset += tmp_len + 2;
/* DS Parameter Set IE, len =3 */
offset += 3;
@@ -135,7 +134,7 @@ static void update_BCNTIM(struct adapter *padapter)
*dst_ie++ = _TIM_IE_;
- if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fe))
+ if ((pstapriv->tim_bitmap & 0xff00) && (pstapriv->tim_bitmap & 0x00fe))
tim_ielen = 5;
else
tim_ielen = 4;
@@ -143,9 +142,9 @@ static void update_BCNTIM(struct adapter *padapter)
*dst_ie++ = tim_ielen;
*dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM peroid */
+ *dst_ie++ = 1;/* DTIM period */
- if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
+ if (pstapriv->tim_bitmap & BIT(0))/* for bc/mc frames */
*dst_ie++ = BIT(0);/* bitmap ctrl */
else
*dst_ie++ = 0;
@@ -153,7 +152,7 @@ static void update_BCNTIM(struct adapter *padapter)
if (tim_ielen == 4) {
__le16 pvb;
- if (pstapriv->tim_bitmap&0xff00)
+ if (pstapriv->tim_bitmap & 0xff00)
pvb = cpu_to_le16(pstapriv->tim_bitmap >> 8);
else
pvb = tim_bitmap_le;
@@ -188,8 +187,8 @@ u8 chk_sta_is_alive(struct sta_info *psta)
/* STA_RX_PKTS_ARG(psta) */
, STA_RX_PKTS_DIFF_ARG(psta)
, psta->expire_to
- , psta->state&WIFI_SLEEP_STATE?"PS, ":""
- , psta->state&WIFI_STA_ALIVE_CHK_STATE?"SAC, ":""
+ , psta->state & WIFI_SLEEP_STATE ? "PS, " : ""
+ , psta->state & WIFI_STA_ALIVE_CHK_STATE ? "SAC, " : ""
, psta->sleepq_len
);
#endif
@@ -292,7 +291,7 @@ void expire_timeout_chk(struct adapter *padapter)
if (psta->state & WIFI_SLEEP_STATE) {
if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
- /* to check if alive by another methods if staion is at ps mode. */
+ /* to check if alive by another methods if station is at ps mode. */
psta->expire_to = pstapriv->expire_to;
psta->state |= WIFI_STA_ALIVE_CHK_STATE;
@@ -325,10 +324,10 @@ void expire_timeout_chk(struct adapter *padapter)
updated = ap_free_sta(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
- if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt)
+ if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt)
&& padapter->xmitpriv.free_xmitframe_cnt < ((
- NR_XMITFRAME/pstapriv->asoc_list_cnt
- )/2)
+ NR_XMITFRAME / pstapriv->asoc_list_cnt
+ ) / 2)
) {
DBG_871X(
"%s sta:"MAC_FMT", sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n",
@@ -586,8 +585,8 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
phtpriv_sta->rx_ampdu_min_spacing = (
- phtpriv_sta->ht_cap.ampdu_params_info&IEEE80211_HT_CAP_AMPDU_DENSITY
- )>>2;
+ phtpriv_sta->ht_cap.ampdu_params_info & IEEE80211_HT_CAP_AMPDU_DENSITY
+ ) >> 2;
/* bwmode */
if ((
@@ -782,8 +781,8 @@ void start_bss_network(struct adapter *padapter, u8 *pbuf)
/* check if there is wps ie, */
/* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
/* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
- if (!rtw_get_wps_ie(pnetwork->IEs+_FIXED_IE_LENGTH_,
- pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
+ if (!rtw_get_wps_ie(pnetwork->IEs + _FIXED_IE_LENGTH_,
+ pnetwork->IELength - _FIXED_IE_LENGTH_, NULL, NULL))
pmlmeext->bstart_bss = true;
/* todo: update wmm, ht cap */
@@ -861,7 +860,7 @@ void start_bss_network(struct adapter *padapter, u8 *pbuf)
(pnetwork->IELength - sizeof(struct ndis_802_11_fix_ie))
);
if (p && ie_len) {
- pht_info = (struct HT_info_element *)(p+2);
+ pht_info = (struct HT_info_element *)(p + 2);
if (cur_channel > 14) {
if ((pregpriv->bw_mode & 0xf0) > 0)
@@ -916,7 +915,7 @@ void start_bss_network(struct adapter *padapter, u8 *pbuf)
UpdateBrateTbl(padapter, pnetwork->SupportedRates);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
- /* udpate capability after cur_wireless_mode updated */
+ /* update capability after cur_wireless_mode updated */
update_capinfo(
padapter,
rtw_get_capability((struct wlan_bssid_ex *)pnetwork)
@@ -1015,7 +1014,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->Ssid.SsidLength = ie_len;
}
- /* chnnel */
+ /* channel */
channel = 0;
pbss_network->Configuration.Length = 0;
p = rtw_get_ie(
@@ -1037,7 +1036,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
(pbss_network->IELength - _BEACON_IE_OFFSET_)
);
if (p != NULL) {
- memcpy(supportRate, p+2, ie_len);
+ memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
@@ -1049,7 +1048,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->IELength - _BEACON_IE_OFFSET_
);
if (p != NULL) {
- memcpy(supportRate+supportRateNum, p+2, ie_len);
+ memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
}
@@ -1088,7 +1087,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (p && ie_len > 0) {
if (rtw_parse_wpa2_ie(
p,
- ie_len+2,
+ ie_len + 2,
&group_cipher,
&pairwise_cipher,
NULL
@@ -1115,10 +1114,10 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
&ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2))
);
- if ((p) && (!memcmp(p+2, OUI1, 4))) {
+ if ((p) && (!memcmp(p + 2, OUI1, 4))) {
if (rtw_parse_wpa_ie(
p,
- ie_len+2,
+ ie_len + 2,
&group_cipher,
&pairwise_cipher,
NULL
@@ -1151,10 +1150,10 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
&ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2))
);
- if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
+ if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
- *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+ *(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
/* disable all ACM bits since the WMM admission control is not supported */
*(p + 10) &= ~BIT(4); /* BE */
@@ -1180,7 +1179,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (p && ie_len > 0) {
u8 rf_type = 0;
u8 max_rx_ampdu_factor = 0;
- struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p+2);
+ struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
pHT_caps_ie = p;
@@ -1205,14 +1204,14 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_RX_STBC_3R));
pht_cap->ampdu_params_info &= ~(
- IEEE80211_HT_CAP_AMPDU_FACTOR|IEEE80211_HT_CAP_AMPDU_DENSITY
+ IEEE80211_HT_CAP_AMPDU_FACTOR | IEEE80211_HT_CAP_AMPDU_DENSITY
);
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
(psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
} else {
- pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00);
}
rtw_hal_get_def_var(
@@ -1230,7 +1229,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pht_cap->supp_mcs_set[1] = 0x0;
}
- memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+ memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
}
/* parsing HT_INFO_IE */
@@ -1265,8 +1264,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pmlmepriv->htpriv.ht_option = false;
- if ((psecuritypriv->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
- (psecuritypriv->wpa_pairwise_cipher&WPA_CIPHER_TKIP)) {
+ if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
+ (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
/* todo: */
/* ht_cap = false; */
}
@@ -1341,7 +1340,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
MAC_ARG(addr)
);
- if ((NUM_ACL-1) < pacl_list->num)
+ if ((NUM_ACL - 1) < pacl_list->num)
return (-1);
spin_lock_bh(&(pacl_node_q->lock));
@@ -1454,7 +1453,7 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree((u8 *)ph2c);
res = _FAIL;
goto exit;
}
@@ -1604,10 +1603,10 @@ static void update_bcn_erpinfo_ie(struct adapter *padapter)
struct ndis_80211_var_ie *pIE = (struct ndis_80211_var_ie *)p;
if (pmlmepriv->num_sta_non_erp == 1)
- pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION;
+ pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION;
else
pIE->data[0] &= ~(
- RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION
+ RTW_ERP_INFO_NON_ERP_PRESENT | RTW_ERP_INFO_USE_PROTECTION
);
if (pmlmepriv->num_sta_no_short_preamble > 0)
@@ -1662,8 +1661,8 @@ static void update_bcn_wps_ie(struct adapter *padapter)
DBG_871X("%s\n", __func__);
pwps_ie = rtw_get_wps_ie(
- ie+_FIXED_IE_LENGTH_,
- ielen-_FIXED_IE_LENGTH_,
+ ie + _FIXED_IE_LENGTH_,
+ ielen - _FIXED_IE_LENGTH_,
NULL,
&wps_ielen
);
@@ -1675,7 +1674,7 @@ static void update_bcn_wps_ie(struct adapter *padapter)
if (pwps_ie_src == NULL)
return;
- wps_offset = (uint)(pwps_ie-ie);
+ wps_offset = (uint)(pwps_ie - ie);
premainder_ie = pwps_ie + wps_ielen;
@@ -1688,22 +1687,22 @@ static void update_bcn_wps_ie(struct adapter *padapter)
}
wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
- if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
- memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
+ if ((wps_offset + wps_ielen + 2 + remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen + 2);
pwps_ie += (wps_ielen+2);
if (pbackup_remainder_ie)
memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
/* update IELength */
- pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ pnetwork->IELength = wps_offset + (wps_ielen + 2) + remainder_ielen;
}
kfree(pbackup_remainder_ie);
/* deal with the case without set_tx_beacon_cmd() in update_beacon() */
#if defined(CONFIG_INTERRUPT_BASED_TXBCN)
- if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
u8 sr = 0;
rtw_get_wps_attr_content(
@@ -1827,7 +1826,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
/*
op_mode
-Set to 0 (HT pure) under the followign conditions
+Set to 0 (HT pure) under the following conditions
- all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- all STAs in the BSS are 20 MHz HT in 20 MHz BSS
Set to 1 (HT non-member protection) if there may be non-HT STAs
@@ -2196,7 +2195,7 @@ void rtw_sta_flush(struct adapter *padapter)
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
- if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
return;
spin_lock_bh(&pstapriv->asoc_list_lock);
@@ -2230,7 +2229,7 @@ void sta_info_update(struct adapter *padapter, struct sta_info *psta)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
/* update wmm cap. */
- if (WLAN_STA_WME&flags)
+ if (WLAN_STA_WME & flags)
psta->qos_option = 1;
else
psta->qos_option = 0;
@@ -2239,7 +2238,7 @@ void sta_info_update(struct adapter *padapter, struct sta_info *psta)
psta->qos_option = 0;
/* update 802.11n ht cap. */
- if (WLAN_STA_HT&flags) {
+ if (WLAN_STA_HT & flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
} else {
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index addc55706a3c..8d93c2f26890 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -402,7 +402,7 @@ int rtw_cmd_thread(void *context)
{
u8 ret;
struct cmd_obj *pcmd;
- u8 *pcmdbuf, *prspbuf;
+ u8 *pcmdbuf;
unsigned long cmd_start_time;
unsigned long cmd_process_time;
u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
@@ -414,7 +414,6 @@ int rtw_cmd_thread(void *context)
thread_enter("RTW_CMD_THREAD");
pcmdbuf = pcmdpriv->cmd_buf;
- prspbuf = pcmdpriv->rsp_buf;
pcmdpriv->stop_req = 0;
atomic_set(&(pcmdpriv->cmdthd_running), true);
@@ -768,7 +767,7 @@ exit:
u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
{
- u8 *auth, res = _SUCCESS;
+ u8 res = _SUCCESS;
uint t_len = 0;
struct wlan_bssid_ex *psecnetwork;
struct cmd_obj *pcmd;
@@ -825,7 +824,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
- auth = &psecuritypriv->authenticator_ie[0];
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
if ((psecnetwork->IELength-12) < (256-1)) {
@@ -1819,11 +1817,6 @@ static void rtw_btinfo_hdl(struct adapter *adapter, u8 *buf, u16 buf_len)
len = info->len;
}
-/* define DBG_PROC_SET_BTINFO_EVT */
-#ifdef DBG_PROC_SET_BTINFO_EVT
- btinfo_evt_dump(RTW_DBGDUMP, info);
-#endif
-
/* transform BT-FW btinfo to WiFI-FW C2H format and notify */
if (cmd_idx == BTINFO_WIFI_FETCH)
buf[1] = 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index 695a85999270..c48a8b80af4c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -132,1310 +132,3 @@ void rf_reg_dump(void *sel, struct adapter *adapter)
}
}
}
-
-#ifdef PROC_DEBUG
-ssize_t proc_set_write_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 addr, val, len;
-
- if (count < 3) {
- DBG_871X("argument size is less than 3\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
-
- if (num != 3) {
- DBG_871X("invalid write_reg parameter!\n");
- return count;
- }
-
- switch (len) {
- case 1:
- rtw_write8(padapter, addr, (u8)val);
- break;
- case 2:
- rtw_write16(padapter, addr, (u16)val);
- break;
- case 4:
- rtw_write32(padapter, addr, val);
- break;
- default:
- DBG_871X("error write length =%d", len);
- break;
- }
-
- }
-
- return count;
-
-}
-
-static u32 proc_get_read_addr = 0xeeeeeeee;
-static u32 proc_get_read_len = 0x4;
-
-int proc_get_read_reg(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- if (proc_get_read_addr == 0xeeeeeeee) {
- DBG_871X_SEL_NL(m, "address not initialized\n");
- return 0;
- }
-
- switch (proc_get_read_len) {
- case 1:
- DBG_871X_SEL_NL(m, "rtw_read8(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read8(padapter, proc_get_read_addr));
- break;
- case 2:
- DBG_871X_SEL_NL(m, "rtw_read16(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read16(padapter, proc_get_read_addr));
- break;
- case 4:
- DBG_871X_SEL_NL(m, "rtw_read32(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read32(padapter, proc_get_read_addr));
- break;
- default:
- DBG_871X_SEL_NL(m, "error read length =%d\n", proc_get_read_len);
- break;
- }
-
- return 0;
-}
-
-ssize_t proc_set_read_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- char tmp[16];
- u32 addr, len;
-
- if (count < 2) {
- DBG_871X("argument size is less than 2\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%x %x", &addr, &len);
-
- if (num != 2) {
- DBG_871X("invalid read_reg parameter!\n");
- return count;
- }
-
- proc_get_read_addr = addr;
-
- proc_get_read_len = len;
- }
-
- return count;
-
-}
-
-int proc_get_fwstate(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
-
- DBG_871X_SEL_NL(m, "fwstate = 0x%x\n", get_fwstate(pmlmepriv));
-
- return 0;
-}
-
-int proc_get_sec_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct security_priv *sec = &padapter->securitypriv;
-
- DBG_871X_SEL_NL(m, "auth_alg = 0x%x, enc_alg = 0x%x, auth_type = 0x%x, enc_type = 0x%x\n",
- sec->dot11AuthAlgrthm, sec->dot11PrivacyAlgrthm,
- sec->ndisauthtype, sec->ndisencryptstatus);
-
- DBG_871X_SEL_NL(m, "hw_decrypted =%d\n", sec->hw_decrypted);
-
-#ifdef DBG_SW_SEC_CNT
- DBG_871X_SEL_NL(m, "wep_sw_enc_cnt =%llu, %llu, %llu\n"
- , sec->wep_sw_enc_cnt_bc, sec->wep_sw_enc_cnt_mc, sec->wep_sw_enc_cnt_uc);
- DBG_871X_SEL_NL(m, "wep_sw_dec_cnt =%llu, %llu, %llu\n"
- , sec->wep_sw_dec_cnt_bc, sec->wep_sw_dec_cnt_mc, sec->wep_sw_dec_cnt_uc);
-
- DBG_871X_SEL_NL(m, "tkip_sw_enc_cnt =%llu, %llu, %llu\n"
- , sec->tkip_sw_enc_cnt_bc, sec->tkip_sw_enc_cnt_mc, sec->tkip_sw_enc_cnt_uc);
- DBG_871X_SEL_NL(m, "tkip_sw_dec_cnt =%llu, %llu, %llu\n"
- , sec->tkip_sw_dec_cnt_bc, sec->tkip_sw_dec_cnt_mc, sec->tkip_sw_dec_cnt_uc);
-
- DBG_871X_SEL_NL(m, "aes_sw_enc_cnt =%llu, %llu, %llu\n"
- , sec->aes_sw_enc_cnt_bc, sec->aes_sw_enc_cnt_mc, sec->aes_sw_enc_cnt_uc);
- DBG_871X_SEL_NL(m, "aes_sw_dec_cnt =%llu, %llu, %llu\n"
- , sec->aes_sw_dec_cnt_bc, sec->aes_sw_dec_cnt_mc, sec->aes_sw_dec_cnt_uc);
-#endif /* DBG_SW_SEC_CNT */
-
- return 0;
-}
-
-int proc_get_mlmext_state(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-
- DBG_871X_SEL_NL(m, "pmlmeinfo->state = 0x%x\n", pmlmeinfo->state);
-
- return 0;
-}
-
-int proc_get_roam_flags(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- DBG_871X_SEL_NL(m, "0x%02x\n", rtw_roam_flags(adapter));
-
- return 0;
-}
-
-ssize_t proc_set_roam_flags(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- char tmp[32];
- u8 flags;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%hhx", &flags);
-
- if (num == 1)
- rtw_assign_roam_flags(adapter, flags);
- }
-
- return count;
-
-}
-
-int proc_get_roam_param(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *mlme = &adapter->mlmepriv;
-
- DBG_871X_SEL_NL(m, "%12s %12s %11s\n", "rssi_diff_th", "scanr_exp_ms", "scan_int_ms");
- DBG_871X_SEL_NL(m, "%-12u %-12u %-11u\n"
- , mlme->roam_rssi_diff_th
- , mlme->roam_scanr_exp_ms
- , mlme->roam_scan_int_ms
- );
-
- return 0;
-}
-
-ssize_t proc_set_roam_param(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *mlme = &adapter->mlmepriv;
-
- char tmp[32];
- u8 rssi_diff_th;
- u32 scanr_exp_ms;
- u32 scan_int_ms;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%hhu %u %u", &rssi_diff_th, &scanr_exp_ms, &scan_int_ms);
-
- if (num >= 1)
- mlme->roam_rssi_diff_th = rssi_diff_th;
- if (num >= 2)
- mlme->roam_scanr_exp_ms = scanr_exp_ms;
- if (num >= 3)
- mlme->roam_scan_int_ms = scan_int_ms;
- }
-
- return count;
-
-}
-
-ssize_t proc_set_roam_tgt_addr(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- char tmp[32];
- u8 addr[ETH_ALEN];
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", addr, addr+1, addr+2, addr+3, addr+4, addr+5);
- if (num == 6)
- memcpy(adapter->mlmepriv.roam_tgt_addr, addr, ETH_ALEN);
-
- DBG_871X("set roam_tgt_addr to "MAC_FMT"\n", MAC_ARG(adapter->mlmepriv.roam_tgt_addr));
- }
-
- return count;
-}
-
-int proc_get_qos_option(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
-
- DBG_871X_SEL_NL(m, "qos_option =%d\n", pmlmepriv->qospriv.qos_option);
-
- return 0;
-}
-
-int proc_get_ht_option(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
-
- DBG_871X_SEL_NL(m, "ht_option =%d\n", pmlmepriv->htpriv.ht_option);
-
- return 0;
-}
-
-int proc_get_rf_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- DBG_871X_SEL_NL(m, "cur_ch =%d, cur_bw =%d, cur_ch_offet =%d\n",
- pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
-
- DBG_871X_SEL_NL(m, "oper_ch =%d, oper_bw =%d, oper_ch_offet =%d\n",
- rtw_get_oper_ch(padapter), rtw_get_oper_bw(padapter), rtw_get_oper_choffset(padapter));
-
- return 0;
-}
-
-int proc_get_survey_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct __queue *queue = &(pmlmepriv->scanned_queue);
- struct wlan_network *pnetwork = NULL;
- struct list_head *plist, *phead;
- s32 notify_signal;
- s16 notify_noise = 0;
- u16 index = 0;
-
- spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
- phead = get_list_head(queue);
- plist = phead ? get_next(phead) : NULL;
- if ((!phead) || (!plist)) {
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
- return 0;
- }
-
- DBG_871X_SEL_NL(m, "%5s %-17s %3s %-3s %-4s %-4s %5s %s\n", "index", "bssid", "ch", "RSSI", "SdBm", "Noise", "age", "ssid");
- while (1) {
- if (phead == plist)
- break;
-
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
-
- if (!pnetwork)
- break;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
- notify_signal = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);/*dbm*/
- } else {
- notify_signal = translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength);/*dbm*/
- }
-
- #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
- rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(pnetwork->network.Configuration.DSConfig), &(notify_noise));
- #endif
-
- DBG_871X_SEL_NL(m, "%5d "MAC_FMT" %3d %3d %4d %4d %5d %s\n",
- ++index,
- MAC_ARG(pnetwork->network.MacAddress),
- pnetwork->network.Configuration.DSConfig,
- (int)pnetwork->network.Rssi,
- notify_signal,
- notify_noise,
- jiffies_to_msecs(jiffies - pnetwork->last_scanned),
- /*translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength),*/
- pnetwork->network.Ssid.Ssid);
- plist = get_next(plist);
- }
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
-
- return 0;
-}
-
-int proc_get_ap_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct sta_info *psta;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_network *cur_network = &(pmlmepriv->cur_network);
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
- if (psta) {
- int i;
- struct recv_reorder_ctrl *preorder_ctrl;
-
- DBG_871X_SEL_NL(m, "SSID =%s\n", cur_network->network.Ssid.Ssid);
- DBG_871X_SEL_NL(m, "sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr));
- DBG_871X_SEL_NL(m, "cur_channel =%d, cur_bwmode =%d, cur_ch_offset =%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
- DBG_871X_SEL_NL(m, "wireless_mode = 0x%x, rtsen =%d, cts2slef =%d\n", psta->wireless_mode, psta->rtsen, psta->cts2self);
- DBG_871X_SEL_NL(m, "state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
- DBG_871X_SEL_NL(m, "qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
- DBG_871X_SEL_NL(m, "bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n", psta->bw_mode, psta->htpriv.ch_offset, psta->htpriv.sgi_20m, psta->htpriv.sgi_40m);
- DBG_871X_SEL_NL(m, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
- DBG_871X_SEL_NL(m, "agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
- DBG_871X_SEL_NL(m, "ldpc_cap = 0x%x, stbc_cap = 0x%x, beamform_cap = 0x%x\n", psta->htpriv.ldpc_cap, psta->htpriv.stbc_cap, psta->htpriv.beamform_cap);
-
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- if (preorder_ctrl->enable) {
- DBG_871X_SEL_NL(m, "tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
- }
- }
-
- } else {
- DBG_871X_SEL_NL(m, "can't get sta's macaddr, cur_network's macaddr:" MAC_FMT "\n", MAC_ARG(cur_network->network.MacAddress));
- }
-
- return 0;
-}
-
-int proc_get_adapter_state(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- DBG_871X_SEL_NL(m, "name =%s, bSurpriseRemoved =%d, bDriverStopped =%d\n",
- dev->name, padapter->bSurpriseRemoved, padapter->bDriverStopped);
-
- return 0;
-}
-
-int proc_get_trx_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- int i;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- struct hw_xmit *phwxmit;
-
- DBG_871X_SEL_NL(m, "free_xmitbuf_cnt =%d, free_xmitframe_cnt =%d\n"
- , pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt);
- DBG_871X_SEL_NL(m, "free_ext_xmitbuf_cnt =%d, free_xframe_ext_cnt =%d\n"
- , pxmitpriv->free_xmit_extbuf_cnt, pxmitpriv->free_xframe_ext_cnt);
- DBG_871X_SEL_NL(m, "free_recvframe_cnt =%d\n"
- , precvpriv->free_recvframe_cnt);
-
- for (i = 0; i < 4; i++) {
- phwxmit = pxmitpriv->hwxmits + i;
- DBG_871X_SEL_NL(m, "%d, hwq.accnt =%d\n", i, phwxmit->accnt);
- }
-
- return 0;
-}
-
-int proc_get_rate_ctl(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- if (adapter->fix_rate != 0xff) {
- DBG_871X_SEL_NL(m, "FIX\n");
- DBG_871X_SEL_NL(m, "0x%02x\n", adapter->fix_rate);
- } else {
- DBG_871X_SEL_NL(m, "RA\n");
- }
-
- return 0;
-}
-
-ssize_t proc_set_rate_ctl(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u8 fix_rate;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%hhx", &fix_rate);
-
- if (num >= 1)
- adapter->fix_rate = fix_rate;
- }
-
- return count;
-}
-
-ssize_t proc_set_fwdl_test_case(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- char tmp[32];
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%hhu %hhu", &g_fwdl_chksum_fail, &g_fwdl_wintint_rdy_fail);
- }
-
- return count;
-}
-
-ssize_t proc_set_wait_hiq_empty(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- char tmp[32];
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%u", &g_wait_hiq_empty);
- }
-
- return count;
-}
-
-int proc_get_suspend_resume_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct dvobj_priv *dvobj = padapter->dvobj;
- struct debug_priv *pdbgpriv = &dvobj->drv_dbg;
-
- DBG_871X_SEL_NL(m, "dbg_sdio_alloc_irq_cnt =%d\n", pdbgpriv->dbg_sdio_alloc_irq_cnt);
- DBG_871X_SEL_NL(m, "dbg_sdio_free_irq_cnt =%d\n", pdbgpriv->dbg_sdio_free_irq_cnt);
- DBG_871X_SEL_NL(m, "dbg_sdio_alloc_irq_error_cnt =%d\n", pdbgpriv->dbg_sdio_alloc_irq_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_sdio_free_irq_error_cnt =%d\n", pdbgpriv->dbg_sdio_free_irq_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_sdio_init_error_cnt =%d\n", pdbgpriv->dbg_sdio_init_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_sdio_deinit_error_cnt =%d\n", pdbgpriv->dbg_sdio_deinit_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_suspend_error_cnt =%d\n", pdbgpriv->dbg_suspend_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_suspend_cnt =%d\n", pdbgpriv->dbg_suspend_cnt);
- DBG_871X_SEL_NL(m, "dbg_resume_cnt =%d\n", pdbgpriv->dbg_resume_cnt);
- DBG_871X_SEL_NL(m, "dbg_resume_error_cnt =%d\n", pdbgpriv->dbg_resume_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_deinit_fail_cnt =%d\n", pdbgpriv->dbg_deinit_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_carddisable_cnt =%d\n", pdbgpriv->dbg_carddisable_cnt);
- DBG_871X_SEL_NL(m, "dbg_ps_insuspend_cnt =%d\n", pdbgpriv->dbg_ps_insuspend_cnt);
- DBG_871X_SEL_NL(m, "dbg_dev_unload_inIPS_cnt =%d\n", pdbgpriv->dbg_dev_unload_inIPS_cnt);
- DBG_871X_SEL_NL(m, "dbg_scan_pwr_state_cnt =%d\n", pdbgpriv->dbg_scan_pwr_state_cnt);
- DBG_871X_SEL_NL(m, "dbg_downloadfw_pwr_state_cnt =%d\n", pdbgpriv->dbg_downloadfw_pwr_state_cnt);
- DBG_871X_SEL_NL(m, "dbg_carddisable_error_cnt =%d\n", pdbgpriv->dbg_carddisable_error_cnt);
- DBG_871X_SEL_NL(m, "dbg_fw_read_ps_state_fail_cnt =%d\n", pdbgpriv->dbg_fw_read_ps_state_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_leave_ips_fail_cnt =%d\n", pdbgpriv->dbg_leave_ips_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_leave_lps_fail_cnt =%d\n", pdbgpriv->dbg_leave_lps_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_h2c_leave32k_fail_cnt =%d\n", pdbgpriv->dbg_h2c_leave32k_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_diswow_dload_fw_fail_cnt =%d\n", pdbgpriv->dbg_diswow_dload_fw_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_enwow_dload_fw_fail_cnt =%d\n", pdbgpriv->dbg_enwow_dload_fw_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_ips_drvopen_fail_cnt =%d\n", pdbgpriv->dbg_ips_drvopen_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_poll_fail_cnt =%d\n", pdbgpriv->dbg_poll_fail_cnt);
- DBG_871X_SEL_NL(m, "dbg_rpwm_toggle_cnt =%d\n", pdbgpriv->dbg_rpwm_toggle_cnt);
- DBG_871X_SEL_NL(m, "dbg_rpwm_timeout_fail_cnt =%d\n", pdbgpriv->dbg_rpwm_timeout_fail_cnt);
-
- return 0;
-}
-
-#ifdef CONFIG_DBG_COUNTER
-
-int proc_get_rx_logs(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct rx_logs *rx_logs = &padapter->rx_logs;
-
- DBG_871X_SEL_NL(m,
- "intf_rx =%d\n"
- "intf_rx_err_recvframe =%d\n"
- "intf_rx_err_skb =%d\n"
- "intf_rx_report =%d\n"
- "core_rx =%d\n"
- "core_rx_pre =%d\n"
- "core_rx_pre_ver_err =%d\n"
- "core_rx_pre_mgmt =%d\n"
- "core_rx_pre_mgmt_err_80211w =%d\n"
- "core_rx_pre_mgmt_err =%d\n"
- "core_rx_pre_ctrl =%d\n"
- "core_rx_pre_ctrl_err =%d\n"
- "core_rx_pre_data =%d\n"
- "core_rx_pre_data_wapi_seq_err =%d\n"
- "core_rx_pre_data_wapi_key_err =%d\n"
- "core_rx_pre_data_handled =%d\n"
- "core_rx_pre_data_err =%d\n"
- "core_rx_pre_data_unknown =%d\n"
- "core_rx_pre_unknown =%d\n"
- "core_rx_enqueue =%d\n"
- "core_rx_dequeue =%d\n"
- "core_rx_post =%d\n"
- "core_rx_post_decrypt =%d\n"
- "core_rx_post_decrypt_wep =%d\n"
- "core_rx_post_decrypt_tkip =%d\n"
- "core_rx_post_decrypt_aes =%d\n"
- "core_rx_post_decrypt_wapi =%d\n"
- "core_rx_post_decrypt_hw =%d\n"
- "core_rx_post_decrypt_unknown =%d\n"
- "core_rx_post_decrypt_err =%d\n"
- "core_rx_post_defrag_err =%d\n"
- "core_rx_post_portctrl_err =%d\n"
- "core_rx_post_indicate =%d\n"
- "core_rx_post_indicate_in_oder =%d\n"
- "core_rx_post_indicate_reoder =%d\n"
- "core_rx_post_indicate_err =%d\n"
- "os_indicate =%d\n"
- "os_indicate_ap_mcast =%d\n"
- "os_indicate_ap_forward =%d\n"
- "os_indicate_ap_self =%d\n"
- "os_indicate_err =%d\n"
- "os_netif_ok =%d\n"
- "os_netif_err =%d\n",
- rx_logs->intf_rx,
- rx_logs->intf_rx_err_recvframe,
- rx_logs->intf_rx_err_skb,
- rx_logs->intf_rx_report,
- rx_logs->core_rx,
- rx_logs->core_rx_pre,
- rx_logs->core_rx_pre_ver_err,
- rx_logs->core_rx_pre_mgmt,
- rx_logs->core_rx_pre_mgmt_err_80211w,
- rx_logs->core_rx_pre_mgmt_err,
- rx_logs->core_rx_pre_ctrl,
- rx_logs->core_rx_pre_ctrl_err,
- rx_logs->core_rx_pre_data,
- rx_logs->core_rx_pre_data_wapi_seq_err,
- rx_logs->core_rx_pre_data_wapi_key_err,
- rx_logs->core_rx_pre_data_handled,
- rx_logs->core_rx_pre_data_err,
- rx_logs->core_rx_pre_data_unknown,
- rx_logs->core_rx_pre_unknown,
- rx_logs->core_rx_enqueue,
- rx_logs->core_rx_dequeue,
- rx_logs->core_rx_post,
- rx_logs->core_rx_post_decrypt,
- rx_logs->core_rx_post_decrypt_wep,
- rx_logs->core_rx_post_decrypt_tkip,
- rx_logs->core_rx_post_decrypt_aes,
- rx_logs->core_rx_post_decrypt_wapi,
- rx_logs->core_rx_post_decrypt_hw,
- rx_logs->core_rx_post_decrypt_unknown,
- rx_logs->core_rx_post_decrypt_err,
- rx_logs->core_rx_post_defrag_err,
- rx_logs->core_rx_post_portctrl_err,
- rx_logs->core_rx_post_indicate,
- rx_logs->core_rx_post_indicate_in_oder,
- rx_logs->core_rx_post_indicate_reoder,
- rx_logs->core_rx_post_indicate_err,
- rx_logs->os_indicate,
- rx_logs->os_indicate_ap_mcast,
- rx_logs->os_indicate_ap_forward,
- rx_logs->os_indicate_ap_self,
- rx_logs->os_indicate_err,
- rx_logs->os_netif_ok,
- rx_logs->os_netif_err
- );
-
- return 0;
-}
-
-int proc_get_tx_logs(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct tx_logs *tx_logs = &padapter->tx_logs;
-
- DBG_871X_SEL_NL(m,
- "os_tx =%d\n"
- "os_tx_err_up =%d\n"
- "os_tx_err_xmit =%d\n"
- "os_tx_m2u =%d\n"
- "os_tx_m2u_ignore_fw_linked =%d\n"
- "os_tx_m2u_ignore_self =%d\n"
- "os_tx_m2u_entry =%d\n"
- "os_tx_m2u_entry_err_xmit =%d\n"
- "os_tx_m2u_entry_err_skb =%d\n"
- "os_tx_m2u_stop =%d\n"
- "core_tx =%d\n"
- "core_tx_err_pxmitframe =%d\n"
- "core_tx_err_brtx =%d\n"
- "core_tx_upd_attrib =%d\n"
- "core_tx_upd_attrib_adhoc =%d\n"
- "core_tx_upd_attrib_sta =%d\n"
- "core_tx_upd_attrib_ap =%d\n"
- "core_tx_upd_attrib_unknown =%d\n"
- "core_tx_upd_attrib_dhcp =%d\n"
- "core_tx_upd_attrib_icmp =%d\n"
- "core_tx_upd_attrib_active =%d\n"
- "core_tx_upd_attrib_err_ucast_sta =%d\n"
- "core_tx_upd_attrib_err_ucast_ap_link =%d\n"
- "core_tx_upd_attrib_err_sta =%d\n"
- "core_tx_upd_attrib_err_link =%d\n"
- "core_tx_upd_attrib_err_sec =%d\n"
- "core_tx_ap_enqueue_warn_fwstate =%d\n"
- "core_tx_ap_enqueue_warn_sta =%d\n"
- "core_tx_ap_enqueue_warn_nosta =%d\n"
- "core_tx_ap_enqueue_warn_link =%d\n"
- "core_tx_ap_enqueue_warn_trigger =%d\n"
- "core_tx_ap_enqueue_mcast =%d\n"
- "core_tx_ap_enqueue_ucast =%d\n"
- "core_tx_ap_enqueue =%d\n"
- "intf_tx =%d\n"
- "intf_tx_pending_ac =%d\n"
- "intf_tx_pending_fw_under_survey =%d\n"
- "intf_tx_pending_fw_under_linking =%d\n"
- "intf_tx_pending_xmitbuf =%d\n"
- "intf_tx_enqueue =%d\n"
- "core_tx_enqueue =%d\n"
- "core_tx_enqueue_class =%d\n"
- "core_tx_enqueue_class_err_sta =%d\n"
- "core_tx_enqueue_class_err_nosta =%d\n"
- "core_tx_enqueue_class_err_fwlink =%d\n"
- "intf_tx_direct =%d\n"
- "intf_tx_direct_err_coalesce =%d\n"
- "intf_tx_dequeue =%d\n"
- "intf_tx_dequeue_err_coalesce =%d\n"
- "intf_tx_dump_xframe =%d\n"
- "intf_tx_dump_xframe_err_txdesc =%d\n"
- "intf_tx_dump_xframe_err_port =%d\n",
- tx_logs->os_tx,
- tx_logs->os_tx_err_up,
- tx_logs->os_tx_err_xmit,
- tx_logs->os_tx_m2u,
- tx_logs->os_tx_m2u_ignore_fw_linked,
- tx_logs->os_tx_m2u_ignore_self,
- tx_logs->os_tx_m2u_entry,
- tx_logs->os_tx_m2u_entry_err_xmit,
- tx_logs->os_tx_m2u_entry_err_skb,
- tx_logs->os_tx_m2u_stop,
- tx_logs->core_tx,
- tx_logs->core_tx_err_pxmitframe,
- tx_logs->core_tx_err_brtx,
- tx_logs->core_tx_upd_attrib,
- tx_logs->core_tx_upd_attrib_adhoc,
- tx_logs->core_tx_upd_attrib_sta,
- tx_logs->core_tx_upd_attrib_ap,
- tx_logs->core_tx_upd_attrib_unknown,
- tx_logs->core_tx_upd_attrib_dhcp,
- tx_logs->core_tx_upd_attrib_icmp,
- tx_logs->core_tx_upd_attrib_active,
- tx_logs->core_tx_upd_attrib_err_ucast_sta,
- tx_logs->core_tx_upd_attrib_err_ucast_ap_link,
- tx_logs->core_tx_upd_attrib_err_sta,
- tx_logs->core_tx_upd_attrib_err_link,
- tx_logs->core_tx_upd_attrib_err_sec,
- tx_logs->core_tx_ap_enqueue_warn_fwstate,
- tx_logs->core_tx_ap_enqueue_warn_sta,
- tx_logs->core_tx_ap_enqueue_warn_nosta,
- tx_logs->core_tx_ap_enqueue_warn_link,
- tx_logs->core_tx_ap_enqueue_warn_trigger,
- tx_logs->core_tx_ap_enqueue_mcast,
- tx_logs->core_tx_ap_enqueue_ucast,
- tx_logs->core_tx_ap_enqueue,
- tx_logs->intf_tx,
- tx_logs->intf_tx_pending_ac,
- tx_logs->intf_tx_pending_fw_under_survey,
- tx_logs->intf_tx_pending_fw_under_linking,
- tx_logs->intf_tx_pending_xmitbuf,
- tx_logs->intf_tx_enqueue,
- tx_logs->core_tx_enqueue,
- tx_logs->core_tx_enqueue_class,
- tx_logs->core_tx_enqueue_class_err_sta,
- tx_logs->core_tx_enqueue_class_err_nosta,
- tx_logs->core_tx_enqueue_class_err_fwlink,
- tx_logs->intf_tx_direct,
- tx_logs->intf_tx_direct_err_coalesce,
- tx_logs->intf_tx_dequeue,
- tx_logs->intf_tx_dequeue_err_coalesce,
- tx_logs->intf_tx_dump_xframe,
- tx_logs->intf_tx_dump_xframe_err_txdesc,
- tx_logs->intf_tx_dump_xframe_err_port
- );
-
- return 0;
-}
-
-int proc_get_int_logs(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- DBG_871X_SEL_NL(m,
- "all =%d\n"
- "err =%d\n"
- "tbdok =%d\n"
- "tbder =%d\n"
- "bcnderr =%d\n"
- "bcndma =%d\n"
- "bcndma_e =%d\n"
- "rx =%d\n"
- "rx_rdu =%d\n"
- "rx_fovw =%d\n"
- "txfovw =%d\n"
- "mgntok =%d\n"
- "highdok =%d\n"
- "bkdok =%d\n"
- "bedok =%d\n"
- "vidok =%d\n"
- "vodok =%d\n",
- padapter->int_logs.all,
- padapter->int_logs.err,
- padapter->int_logs.tbdok,
- padapter->int_logs.tbder,
- padapter->int_logs.bcnderr,
- padapter->int_logs.bcndma,
- padapter->int_logs.bcndma_e,
- padapter->int_logs.rx,
- padapter->int_logs.rx_rdu,
- padapter->int_logs.rx_fovw,
- padapter->int_logs.txfovw,
- padapter->int_logs.mgntok,
- padapter->int_logs.highdok,
- padapter->int_logs.bkdok,
- padapter->int_logs.bedok,
- padapter->int_logs.vidok,
- padapter->int_logs.vodok
- );
-
- return 0;
-}
-
-#endif /* CONFIG_DBG_COUNTER*/
-
-int proc_get_rx_signal(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- DBG_871X_SEL_NL(m, "rssi:%d\n", padapter->recvpriv.rssi);
- /*DBG_871X_SEL_NL(m, "rxpwdb:%d\n", padapter->recvpriv.rxpwdb);*/
- DBG_871X_SEL_NL(m, "signal_strength:%u\n", padapter->recvpriv.signal_strength);
- DBG_871X_SEL_NL(m, "signal_qual:%u\n", padapter->recvpriv.signal_qual);
- DBG_871X_SEL_NL(m, "noise:%d\n", padapter->recvpriv.noise);
- rtw_odm_get_perpkt_rssi(m, padapter);
- #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
- rtw_get_raw_rssi_info(m, padapter);
- #endif
- return 0;
-}
-
-
-int proc_get_hw_status(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct dvobj_priv *dvobj = padapter->dvobj;
- struct debug_priv *pdbgpriv = &dvobj->drv_dbg;
-
- DBG_871X_SEL_NL(m, "RX FIFO full count: last_time =%lld, current_time =%lld, differential =%lld\n"
- , pdbgpriv->dbg_rx_fifo_last_overflow, pdbgpriv->dbg_rx_fifo_curr_overflow, pdbgpriv->dbg_rx_fifo_diff_overflow);
-
- return 0;
-}
-
-ssize_t proc_set_rx_signal(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 is_signal_dbg, signal_strength;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
-
- is_signal_dbg = is_signal_dbg == 0?0:1;
-
- if (is_signal_dbg && num != 2)
- return count;
-
- signal_strength = signal_strength > 100?100:signal_strength;
-
- padapter->recvpriv.is_signal_dbg = is_signal_dbg;
- padapter->recvpriv.signal_strength_dbg = signal_strength;
-
- if (is_signal_dbg)
- DBG_871X("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
- else
- DBG_871X("set %s\n", "HW_SIGNAL_STRENGTH");
-
- }
-
- return count;
-
-}
-
-int proc_get_ht_enable(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pregpriv)
- DBG_871X_SEL_NL(m, "%d\n", pregpriv->ht_enable);
-
- return 0;
-}
-
-ssize_t proc_set_ht_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && mode < 2) {
- pregpriv->ht_enable = mode;
- printk("ht_enable =%d\n", pregpriv->ht_enable);
- }
- }
-
- return count;
-
-}
-
-int proc_get_bw_mode(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pregpriv)
- DBG_871X_SEL_NL(m, "0x%02x\n", pregpriv->bw_mode);
-
- return 0;
-}
-
-ssize_t proc_set_bw_mode(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && mode < 2) {
-
- pregpriv->bw_mode = mode;
- printk("bw_mode =%d\n", mode);
-
- }
- }
-
- return count;
-
-}
-
-int proc_get_ampdu_enable(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pregpriv)
- DBG_871X_SEL_NL(m, "%d\n", pregpriv->ampdu_enable);
-
- return 0;
-}
-
-ssize_t proc_set_ampdu_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && mode < 3) {
- pregpriv->ampdu_enable = mode;
- printk("ampdu_enable =%d\n", mode);
- }
-
- }
-
- return count;
-
-}
-
-int proc_get_rx_ampdu(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
-
- if (pregpriv)
- DBG_871X_SEL_NL(m,
- "accept_addba_req = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n",
- pmlmeinfo->accept_addba_req
- );
-
- return 0;
-}
-
-ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && mode < 2) {
- pmlmeinfo->accept_addba_req = mode;
- DBG_871X("pmlmeinfo->accept_addba_req =%d\n",
- pmlmeinfo->accept_addba_req);
- if (mode == 0) {
- /*tear down Rx AMPDU*/
- send_delba(padapter, 0, get_my_bssid(&(pmlmeinfo->network)));/* recipient*/
- }
- }
-
- }
-
- return count;
-}
-
-int proc_get_en_fwps(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pregpriv)
- DBG_871X_SEL_NL(m, "check_fw_ps = %d , 1:enable get FW PS state , 0: disable get FW PS state\n"
- , pregpriv->check_fw_ps);
-
- return 0;
-}
-
-ssize_t proc_set_en_fwps(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && mode < 2) {
- pregpriv->check_fw_ps = mode;
- DBG_871X("pregpriv->check_fw_ps =%d\n", pregpriv->check_fw_ps);
- }
- }
- return count;
-}
-
-int proc_get_rx_stbc(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- if (pregpriv)
- DBG_871X_SEL_NL(m, "%d\n", pregpriv->rx_stbc);
-
- return 0;
-}
-
-ssize_t proc_set_rx_stbc(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%d ", &mode);
-
- if (pregpriv && (mode == 0 || mode == 1 ||
- mode == 2 || mode == 3)) {
- pregpriv->rx_stbc = mode;
- printk("rx_stbc =%d\n", mode);
- }
- }
-
- return count;
-
-}
-
-int proc_get_rssi_disp(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-ssize_t proc_set_rssi_disp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 enable = 0;
-
- if (count < 1) {
- DBG_8192C("argument size is less than 1\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x", &enable);
-
- if (num != 1) {
- DBG_8192C("invalid set_rssi_disp parameter!\n");
- return count;
- }
-
- if (enable) {
- DBG_8192C("Linked info Function Enable\n");
- padapter->bLinkInfoDump = enable;
- } else {
- DBG_8192C("Linked info Function Disable\n");
- padapter->bLinkInfoDump = 0;
- }
- }
- return count;
-}
-
-int proc_get_all_sta_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct sta_info *psta;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct sta_priv *pstapriv = &padapter->stapriv;
- int i, j;
- struct list_head *plist, *phead;
- struct recv_reorder_ctrl *preorder_ctrl;
-
- DBG_871X_SEL_NL(m, "sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- for (i = 0; i < NUM_STA; i++) {
- phead = &(pstapriv->sta_hash[i]);
- plist = get_next(phead);
-
- while (phead != plist) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
-
- plist = get_next(plist);
-
- DBG_871X_SEL_NL(m, "==============================\n");
- DBG_871X_SEL_NL(m, "sta's macaddr:" MAC_FMT "\n",
- MAC_ARG(psta->hwaddr));
- DBG_871X_SEL_NL(m, "rtsen =%d, cts2slef =%d\n",
- psta->rtsen, psta->cts2self);
- DBG_871X_SEL_NL(m, "state = 0x%x, aid =%d, macid =%d, raid =%d\n",
- psta->state, psta->aid, psta->mac_id,
- psta->raid);
- DBG_871X_SEL_NL(m, "qos_en =%d, ht_en =%d, init_rate =%d\n",
- psta->qos_option,
- psta->htpriv.ht_option,
- psta->init_rate);
- DBG_871X_SEL_NL(m, "bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n",
- psta->bw_mode, psta->htpriv.ch_offset,
- psta->htpriv.sgi_20m,
- psta->htpriv.sgi_40m);
- DBG_871X_SEL_NL(m, "ampdu_enable = %d\n",
- psta->htpriv.ampdu_enable);
- DBG_871X_SEL_NL(m, "agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n",
- psta->htpriv.agg_enable_bitmap,
- psta->htpriv.candidate_tid_bitmap);
- DBG_871X_SEL_NL(m, "sleepq_len =%d\n",
- psta->sleepq_len);
- DBG_871X_SEL_NL(m, "sta_xmitpriv.vo_q_qcnt =%d\n",
- psta->sta_xmitpriv.vo_q.qcnt);
- DBG_871X_SEL_NL(m, "sta_xmitpriv.vi_q_qcnt =%d\n",
- psta->sta_xmitpriv.vi_q.qcnt);
- DBG_871X_SEL_NL(m, "sta_xmitpriv.be_q_qcnt =%d\n",
- psta->sta_xmitpriv.be_q.qcnt);
- DBG_871X_SEL_NL(m, "sta_xmitpriv.bk_q_qcnt =%d\n",
- psta->sta_xmitpriv.bk_q.qcnt);
-
- DBG_871X_SEL_NL(m, "capability = 0x%x\n",
- psta->capability);
- DBG_871X_SEL_NL(m, "flags = 0x%x\n", psta->flags);
- DBG_871X_SEL_NL(m, "wpa_psk = 0x%x\n", psta->wpa_psk);
- DBG_871X_SEL_NL(m, "wpa2_group_cipher = 0x%x\n",
- psta->wpa2_group_cipher);
- DBG_871X_SEL_NL(m, "wpa2_pairwise_cipher = 0x%x\n",
- psta->wpa2_pairwise_cipher);
- DBG_871X_SEL_NL(m, "qos_info = 0x%x\n", psta->qos_info);
- DBG_871X_SEL_NL(m, "dot118021XPrivacy = 0x%x\n",
- psta->dot118021XPrivacy);
-
- for (j = 0; j < 16; j++) {
- preorder_ctrl = &psta->recvreorder_ctrl[j];
- if (preorder_ctrl->enable)
- DBG_871X_SEL_NL(m, "tid =%d, indicate_seq =%d\n",
- j, preorder_ctrl->indicate_seq);
- }
- DBG_871X_SEL_NL(m, "==============================\n");
- }
- }
-
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- return 0;
-}
-
-int proc_get_btcoex_dbg(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter;
- char buf[512] = {0};
- padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- hal_btcoex_GetDBG(padapter, buf, 512);
-
- DBG_871X_SEL(m, "%s", buf);
-
- return 0;
-}
-
-ssize_t proc_set_btcoex_dbg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter;
- u8 tmp[80] = {0};
- u32 module[2] = {0};
- u32 num;
-
- padapter = (struct adapter *)rtw_netdev_priv(dev);
-
-/* DBG_871X("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter));*/
-
- if (NULL == buffer) {
- DBG_871X(FUNC_ADPT_FMT ": input buffer is NULL!\n",
- FUNC_ADPT_ARG(padapter));
-
- return -EFAULT;
- }
-
- if (count < 1) {
- DBG_871X(FUNC_ADPT_FMT ": input length is 0!\n",
- FUNC_ADPT_ARG(padapter));
-
- return -EFAULT;
- }
-
- num = count;
- if (num > (sizeof(tmp) - 1))
- num = (sizeof(tmp) - 1);
-
- if (copy_from_user(tmp, buffer, num)) {
- DBG_871X(FUNC_ADPT_FMT ": copy buffer from user space FAIL!\n",
- FUNC_ADPT_ARG(padapter));
-
- return -EFAULT;
- }
-
- num = sscanf(tmp, "%x %x", module, module+1);
- if (num == 1) {
- if (module[0] == 0)
- memset(module, 0, sizeof(module));
- else
- memset(module, 0xFF, sizeof(module));
- } else if (num != 2) {
- DBG_871X(FUNC_ADPT_FMT ": input(\"%s\") format incorrect!\n",
- FUNC_ADPT_ARG(padapter), tmp);
-
- if (num == 0)
- return -EFAULT;
- }
-
- DBG_871X(FUNC_ADPT_FMT ": input 0x%08X 0x%08X\n",
- FUNC_ADPT_ARG(padapter), module[0], module[1]);
- hal_btcoex_SetDBG(padapter, module);
-
- return count;
-}
-
-int proc_get_btcoex_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter;
- const u32 bufsize = 30*100;
- u8 *pbuf = NULL;
-
- padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- pbuf = rtw_zmalloc(bufsize);
- if (!pbuf)
- return -ENOMEM;
-
- hal_btcoex_DisplayBtCoexInfo(padapter, pbuf, bufsize);
-
- DBG_871X_SEL(m, "%s\n", pbuf);
-
- kfree(pbuf);
-
- return 0;
-}
-
-#endif
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
index a92bc19b196a..57168578663a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_io.c
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -142,7 +142,7 @@ u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
struct io_priv *pio_priv = &adapter->iopriv;
struct intf_hdl *pintfhdl = &(pio_priv->intf);
- u32 ret = _SUCCESS;
+ u32 ret;
_write_port = pintfhdl->io_ops._write_port;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index 8eb0ff57925f..eb08569db5ea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -9,13 +9,6 @@
#include <drv_types.h>
#include <rtw_debug.h>
-#define IS_MAC_ADDRESS_BROADCAST(addr) \
-(\
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
-
u8 rtw_validate_bssid(u8 *bssid)
{
u8 ret = true;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 4285844420cb..2128886c9924 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -295,11 +295,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
init_mlme_default_rate_set(padapter);
- if (pmlmeext->cur_channel > 14)
- pmlmeext->tx_rate = IEEE80211_OFDM_RATE_6MB;
- else
- pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
-
+ pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
pmlmeext->sitesurvey_res.channel_idx = 0;
pmlmeext->sitesurvey_res.bss_cnt = 0;
@@ -459,9 +455,8 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, RT_CHANNEL_
return chanset_size;
}
-int init_mlme_ext_priv(struct adapter *padapter)
+void init_mlme_ext_priv(struct adapter *padapter)
{
- int res = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -488,9 +483,6 @@ int init_mlme_ext_priv(struct adapter *padapter)
#ifdef DBG_FIXED_CHAN
pmlmeext->fixed_chan = 0xFF;
#endif
-
- return res;
-
}
void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
@@ -1882,7 +1874,6 @@ unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
{
- unsigned int ret = _FAIL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
@@ -1914,7 +1905,7 @@ unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_fr
}
exit:
- return ret;
+ return _FAIL;
}
unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index ae7fb7046c93..4075de07e0a9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -103,7 +103,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
bool ret = false;
- if (adapter_to_pwrctl(adapter)->bpower_saving == true) {
+ if (adapter_to_pwrctl(adapter)->bpower_saving) {
/* DBG_871X("%s: already in LPS or IPS mode\n", __func__); */
goto exit;
}
@@ -167,7 +167,7 @@ void rtw_ps_processor(struct adapter *padapter)
goto exit;
}
- if (pwrpriv->bInSuspend == true) {/* system suspend or autosuspend */
+ if (pwrpriv->bInSuspend) {/* system suspend or autosuspend */
pdbgpriv->dbg_ps_insuspend_cnt++;
DBG_871X("%s, pwrpriv->bInSuspend == true ignore this process\n", __func__);
return;
@@ -219,10 +219,9 @@ void traffic_check_for_leave_lps(struct adapter *padapter, u8 tx, u32 tx_packets
if (jiffies_to_msecs(jiffies - start_time) > 2000) { /* 2 sec == watch dog timer */
if (xmit_cnt > 8) {
- if ((adapter_to_pwrctl(padapter)->bLeisurePs)
- && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
- && (hal_btcoex_IsBtControlLps(padapter) == false)
- ) {
+ if (adapter_to_pwrctl(padapter)->bLeisurePs
+ && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
+ && !(hal_btcoex_IsBtControlLps(padapter))) {
DBG_871X("leave lps via Tx = %d\n", xmit_cnt);
bLeaveLPS = true;
}
@@ -234,10 +233,9 @@ void traffic_check_for_leave_lps(struct adapter *padapter, u8 tx, u32 tx_packets
} else { /* from rx path */
if (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 4/*2*/) {
- if ((adapter_to_pwrctl(padapter)->bLeisurePs)
- && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
- && (hal_btcoex_IsBtControlLps(padapter) == false)
- ) {
+ if (adapter_to_pwrctl(padapter)->bLeisurePs
+ && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
+ && !(hal_btcoex_IsBtControlLps(padapter))) {
DBG_871X("leave lps via Rx = %d\n", pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod);
bLeaveLPS = true;
}
@@ -267,7 +265,7 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
pslv = PS_STATE(pslv);
- if (pwrpriv->brpwmtimeout == true) {
+ if (pwrpriv->brpwmtimeout) {
DBG_871X("%s: RPWM timeout, force to set RPWM(0x%02X) again!\n", __func__, pslv);
} else {
if ((pwrpriv->rpwm == pslv)
@@ -278,8 +276,7 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
}
}
- if ((padapter->bSurpriseRemoved == true) ||
- (padapter->hw_init_completed == false)) {
+ if ((padapter->bSurpriseRemoved) || !(padapter->hw_init_completed)) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
__func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
@@ -289,7 +286,7 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
return;
}
- if (padapter->bDriverStopped == true) {
+ if (padapter->bDriverStopped) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
@@ -355,14 +352,14 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
- if (true == pwrpriv->bInSuspend && pwrpriv->wowlan_mode)
+ if (pwrpriv->bInSuspend && pwrpriv->wowlan_mode)
return true;
- else if (true == pwrpriv->bInSuspend && pwrpriv->wowlan_ap_mode)
+ else if (pwrpriv->bInSuspend && pwrpriv->wowlan_ap_mode)
return true;
- else if (true == pwrpriv->bInSuspend)
+ else if (pwrpriv->bInSuspend)
return false;
#else
- if (true == pwrpriv->bInSuspend)
+ if (pwrpriv->bInSuspend)
return false;
#endif
@@ -381,7 +378,7 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
)
return false;
- if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && (padapter->securitypriv.binstallGrpkey == false)) {
+ if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && !(padapter->securitypriv.binstallGrpkey)) {
DBG_871X("Group handshake still in progress !!!\n");
return false;
}
@@ -417,13 +414,9 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
/* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
if (ps_mode == PS_MODE_ACTIVE) {
- if (1
- && (((hal_btcoex_IsBtControlLps(padapter) == false)
- )
- || ((hal_btcoex_IsBtControlLps(padapter) == true)
- && (hal_btcoex_IsLpsOn(padapter) == false))
- )
- ) {
+ if (!(hal_btcoex_IsBtControlLps(padapter))
+ || (hal_btcoex_IsBtControlLps(padapter)
+ && !(hal_btcoex_IsLpsOn(padapter)))) {
DBG_871X(FUNC_ADPT_FMT" Leave 802.11 power save - %s\n",
FUNC_ADPT_ARG(padapter), msg);
@@ -431,8 +424,7 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
rtw_set_rpwm(padapter, PS_STATE_S4);
#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
- if (pwrpriv->wowlan_mode == true ||
- pwrpriv->wowlan_ap_mode == true) {
+ if (pwrpriv->wowlan_mode || pwrpriv->wowlan_ap_mode) {
unsigned long start_time;
u32 delay_ms;
u8 val8;
@@ -461,8 +453,8 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
}
} else {
if ((PS_RDY_CHECK(padapter) && check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE))
- || ((hal_btcoex_IsBtControlLps(padapter) == true)
- && (hal_btcoex_IsLpsOn(padapter) == true))
+ || ((hal_btcoex_IsBtControlLps(padapter))
+ && (hal_btcoex_IsLpsOn(padapter)))
) {
u8 pslv;
@@ -481,8 +473,8 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
if (pwrpriv->alives == 0)
pslv = PS_STATE_S0;
- if ((hal_btcoex_IsBtDisabled(padapter) == false)
- && (hal_btcoex_IsBtControlLps(padapter) == true)) {
+ if (!(hal_btcoex_IsBtDisabled(padapter))
+ && (hal_btcoex_IsBtControlLps(padapter))) {
u8 val8;
val8 = hal_btcoex_LpsVal(padapter);
@@ -513,10 +505,10 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
start_time = jiffies;
while (1) {
rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
- if (true == bAwake)
+ if (bAwake)
break;
- if (true == padapter->bSurpriseRemoved) {
+ if (padapter->bSurpriseRemoved) {
err = -2;
DBG_871X("%s: device surprise removed!!\n", __func__);
break;
@@ -544,7 +536,7 @@ void LPS_Enter(struct adapter *padapter, const char *msg)
int n_assoc_iface = 0;
char buf[32] = {0};
- if (hal_btcoex_IsBtControlLps(padapter) == true)
+ if (hal_btcoex_IsBtControlLps(padapter))
return;
/* Skip lps enter request if number of assocated adapters is not 1 */
@@ -557,8 +549,8 @@ void LPS_Enter(struct adapter *padapter, const char *msg)
if (get_iface_type(padapter) != IFACE_PORT0)
return;
- if (PS_RDY_CHECK(dvobj->padapters) == false)
- return;
+ if (!PS_RDY_CHECK(dvobj->padapters))
+ return;
if (pwrpriv->bLeisurePs) {
/* Idle for a while if we connect to AP a while ago. */
@@ -589,7 +581,7 @@ void LPS_Leave(struct adapter *padapter, const char *msg)
/* DBG_871X("+LeisurePSLeave\n"); */
- if (hal_btcoex_IsBtControlLps(padapter) == true)
+ if (hal_btcoex_IsBtControlLps(padapter))
return;
if (pwrpriv->bLeisurePs) {
@@ -615,13 +607,13 @@ void LeaveAllPowerSaveModeDirect(struct adapter *Adapter)
DBG_871X("%s.....\n", __func__);
- if (true == Adapter->bSurpriseRemoved) {
+ if (Adapter->bSurpriseRemoved) {
DBG_871X(FUNC_ADPT_FMT ": bSurpriseRemoved =%d Skip!\n",
FUNC_ADPT_ARG(Adapter), Adapter->bSurpriseRemoved);
return;
}
- if ((check_fwstate(pmlmepriv, _FW_LINKED) == true)) { /* connect */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
DBG_871X("%s: Driver Already Leave LPS\n", __func__);
@@ -637,7 +629,7 @@ void LeaveAllPowerSaveModeDirect(struct adapter *Adapter)
rtw_lps_ctrl_wk_cmd(pri_padapter, LPS_CTRL_LEAVE, 0);
} else {
if (pwrpriv->rf_pwrstate == rf_off)
- if (false == ips_leave(pri_padapter))
+ if (!ips_leave(pri_padapter))
DBG_871X("======> ips_leave fail.............\n");
}
}
@@ -675,7 +667,7 @@ void LeaveAllPowerSaveMode(struct adapter *Adapter)
LPS_Leave_check(Adapter);
} else {
if (adapter_to_pwrctl(Adapter)->rf_pwrstate == rf_off) {
- if (false == ips_leave(Adapter))
+ if (!ips_leave(Adapter))
DBG_871X("======> ips_leave fail.............\n");
}
}
@@ -698,15 +690,14 @@ void LPS_Leave_check(
while (1) {
mutex_lock(&pwrpriv->lock);
- if ((padapter->bSurpriseRemoved == true)
- || (padapter->hw_init_completed == false)
- || (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
- )
+ if (padapter->bSurpriseRemoved
+ || !(padapter->hw_init_completed)
+ || (pwrpriv->pwr_mode == PS_MODE_ACTIVE))
bReady = true;
mutex_unlock(&pwrpriv->lock);
- if (true == bReady)
+ if (bReady)
break;
if (jiffies_to_msecs(jiffies - start_time) > 100) {
@@ -830,12 +821,12 @@ static void pwr_rpwm_timeout_handler(struct timer_list *t)
_set_workitem(&pwrpriv->rpwmtimeoutwi);
}
-static __inline void register_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+static inline void register_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
{
pwrctrl->alives |= tag;
}
-static __inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+static inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
{
pwrctrl->alives &= ~tag;
}
@@ -870,7 +861,7 @@ s32 rtw_register_task_alive(struct adapter *padapter, u32 task)
register_task_alive(pwrctrl, task);
- if (pwrctrl->bFwCurrentInPSMode == true) {
+ if (pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("%s: task = 0x%x cpwm = 0x%02x alives = 0x%08x\n",
__func__, task, pwrctrl->cpwm, pwrctrl->alives));
@@ -910,8 +901,8 @@ void rtw_unregister_task_alive(struct adapter *padapter, u32 task)
pwrctrl = adapter_to_pwrctl(padapter);
pslv = PS_STATE_S0;
- if ((hal_btcoex_IsBtDisabled(padapter) == false)
- && (hal_btcoex_IsBtControlLps(padapter) == true)) {
+ if (!(hal_btcoex_IsBtDisabled(padapter))
+ && hal_btcoex_IsBtControlLps(padapter)) {
u8 val8;
val8 = hal_btcoex_LpsVal(padapter);
@@ -924,7 +915,7 @@ void rtw_unregister_task_alive(struct adapter *padapter, u32 task)
unregister_task_alive(pwrctrl, task);
if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
- && (pwrctrl->bFwCurrentInPSMode == true)) {
+ && pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("%s: cpwm = 0x%02x alives = 0x%08x\n",
__func__, pwrctrl->cpwm, pwrctrl->alives));
@@ -965,7 +956,7 @@ s32 rtw_register_tx_alive(struct adapter *padapter)
register_task_alive(pwrctrl, XMIT_ALIVE);
- if (pwrctrl->bFwCurrentInPSMode == true) {
+ if (pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("rtw_register_tx_alive: cpwm = 0x%02x alives = 0x%08x\n",
pwrctrl->cpwm, pwrctrl->alives));
@@ -1014,7 +1005,7 @@ s32 rtw_register_cmd_alive(struct adapter *padapter)
register_task_alive(pwrctrl, CMD_ALIVE);
- if (pwrctrl->bFwCurrentInPSMode == true) {
+ if (pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_info_,
("rtw_register_cmd_alive: cpwm = 0x%02x alives = 0x%08x\n",
pwrctrl->cpwm, pwrctrl->alives));
@@ -1051,8 +1042,8 @@ void rtw_unregister_tx_alive(struct adapter *padapter)
pwrctrl = adapter_to_pwrctl(padapter);
pslv = PS_STATE_S0;
- if ((hal_btcoex_IsBtDisabled(padapter) == false)
- && (hal_btcoex_IsBtControlLps(padapter) == true)) {
+ if (!(hal_btcoex_IsBtDisabled(padapter))
+ && hal_btcoex_IsBtControlLps(padapter)) {
u8 val8;
val8 = hal_btcoex_LpsVal(padapter);
@@ -1065,7 +1056,7 @@ void rtw_unregister_tx_alive(struct adapter *padapter)
unregister_task_alive(pwrctrl, XMIT_ALIVE);
if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
- && (pwrctrl->bFwCurrentInPSMode == true)) {
+ && pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
("%s: cpwm = 0x%02x alives = 0x%08x\n",
__func__, pwrctrl->cpwm, pwrctrl->alives));
@@ -1093,8 +1084,8 @@ void rtw_unregister_cmd_alive(struct adapter *padapter)
pwrctrl = adapter_to_pwrctl(padapter);
pslv = PS_STATE_S0;
- if ((hal_btcoex_IsBtDisabled(padapter) == false)
- && (hal_btcoex_IsBtControlLps(padapter) == true)) {
+ if (!(hal_btcoex_IsBtDisabled(padapter))
+ && hal_btcoex_IsBtControlLps(padapter)) {
u8 val8;
val8 = hal_btcoex_LpsVal(padapter);
@@ -1107,7 +1098,7 @@ void rtw_unregister_cmd_alive(struct adapter *padapter)
unregister_task_alive(pwrctrl, CMD_ALIVE);
if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
- && (pwrctrl->bFwCurrentInPSMode == true)) {
+ && pwrctrl->bFwCurrentInPSMode) {
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_info_,
("%s: cpwm = 0x%02x alives = 0x%08x\n",
__func__, pwrctrl->cpwm, pwrctrl->alives));
@@ -1237,7 +1228,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
DBG_871X("%s wait ps_processing done\n", __func__);
}
- if (pwrpriv->bInternalAutoSuspend == false && pwrpriv->bInSuspend) {
+ if (!(pwrpriv->bInternalAutoSuspend) && pwrpriv->bInSuspend) {
DBG_871X("%s wait bInSuspend...\n", __func__);
while (pwrpriv->bInSuspend
&& jiffies_to_msecs(jiffies - start) <= 3000
@@ -1251,19 +1242,19 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
/* System suspend is not allowed to wakeup */
- if ((pwrpriv->bInternalAutoSuspend == false) && (true == pwrpriv->bInSuspend)) {
+ if (!(pwrpriv->bInternalAutoSuspend) && pwrpriv->bInSuspend) {
ret = _FAIL;
goto exit;
}
/* block??? */
- if ((pwrpriv->bInternalAutoSuspend == true) && (padapter->net_closed == true)) {
+ if (pwrpriv->bInternalAutoSuspend && padapter->net_closed) {
ret = _FAIL;
goto exit;
}
/* I think this should be check in IPS, LPS, autosuspend functions... */
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
ret = _SUCCESS;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 979056c3d397..57cfe06d7d73 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -2290,8 +2290,7 @@ static void gf_mulx(u8 *pad)
static void aes_encrypt_deinit(void *ctx)
{
- memset(ctx, 0, AES_PRIV_SIZE);
- kfree(ctx);
+ kzfree(ctx);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 76c50377f0fe..ea3ea2a6b314 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -451,7 +451,7 @@ void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigne
mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex));
}
-__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
+inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
{
return pnetwork->MacAddress;
}
@@ -1996,11 +1996,6 @@ void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
}
}
-void beacon_timing_control(struct adapter *padapter)
-{
- rtw_hal_bcn_related_reg_setting(padapter);
-}
-
void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
{
int i;
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
index 8e4caeeb4070..dd349c506da8 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -1758,7 +1758,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
static s32 up, dn, m, n, WaitCount;
s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
u8 retryCount = 0, btInfoExt;
- bool bWifiBusy = false;
BTC_PRINT(
BTC_MSG_ALGORITHM,
@@ -1766,11 +1765,6 @@ static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
("[BTCoex], TdmaDurationAdjustForAcl()\n")
);
- if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifiStatus)
- bWifiBusy = true;
- else
- bWifiBusy = false;
-
if (
(BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
(BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
index aad86570b59c..7150d54d49ab 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -532,7 +532,6 @@ typedef struct _BTC_COEXIST {
extern BTC_COEXIST GLBtCoexist;
-u8 EXhalbtcoutsrc_InitlizeVariables(void *Adapter);
void EXhalbtcoutsrc_PowerOnSetting(PBTC_COEXIST pBtCoexist);
void EXhalbtcoutsrc_InitHwConfig(PBTC_COEXIST pBtCoexist, u8 bWifiOnly);
void EXhalbtcoutsrc_InitCoexDm(PBTC_COEXIST pBtCoexist);
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 5257287b4f4d..6e4a1fcb8790 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -389,7 +389,6 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
u8 *pu8;
s32 *pS4Tmp;
u32 *pU4Tmp;
- u8 *pU1Tmp;
u8 ret;
@@ -403,7 +402,6 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
pu8 = pOutBuf;
pS4Tmp = pOutBuf;
pU4Tmp = pOutBuf;
- pU1Tmp = pOutBuf;
ret = true;
switch (getType) {
@@ -484,10 +482,8 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
*pU4Tmp = BTC_WIFI_BW_LEGACY;
else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_20)
*pU4Tmp = BTC_WIFI_BW_HT20;
- else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40)
- *pU4Tmp = BTC_WIFI_BW_HT40;
else
- *pU4Tmp = BTC_WIFI_BW_HT40; /* todo */
+ *pU4Tmp = BTC_WIFI_BW_HT40;
break;
case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
@@ -516,32 +512,32 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
- *pU1Tmp = padapter->mlmeextpriv.cur_channel;
+ *pu8 = padapter->mlmeextpriv.cur_channel;
break;
case BTC_GET_U1_WIFI_CENTRAL_CHNL:
- *pU1Tmp = pHalData->CurrentChannel;
+ *pu8 = pHalData->CurrentChannel;
break;
case BTC_GET_U1_WIFI_HS_CHNL:
- *pU1Tmp = 0;
+ *pu8 = 0;
ret = false;
break;
case BTC_GET_U1_MAC_PHY_MODE:
- *pU1Tmp = BTC_SMSP;
+ *pu8 = BTC_SMSP;
/* *pU1Tmp = BTC_DMSP; */
/* *pU1Tmp = BTC_DMDP; */
/* *pU1Tmp = BTC_MP_UNKNOWN; */
break;
case BTC_GET_U1_AP_NUM:
- *pU1Tmp = halbtcoutsrc_GetWifiScanAPNum(padapter);
+ *pu8 = halbtcoutsrc_GetWifiScanAPNum(padapter);
break;
/* 1Ant =========== */
case BTC_GET_U1_LPS_MODE:
- *pU1Tmp = padapter->dvobj->pwrctl_priv.pwr_mode;
+ *pu8 = padapter->dvobj->pwrctl_priv.pwr_mode;
break;
default:
@@ -959,9 +955,13 @@ static u8 EXhalbtcoutsrc_BindBtCoexWithAdapter(void *padapter)
return true;
}
-u8 EXhalbtcoutsrc_InitlizeVariables(void *padapter)
+void hal_btcoex_Initialize(void *padapter)
{
- PBTC_COEXIST pBtCoexist = &GLBtCoexist;
+ PBTC_COEXIST pBtCoexist;
+
+ memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
+
+ pBtCoexist = &GLBtCoexist;
/* pBtCoexist->statistics.cntBind++; */
@@ -1001,8 +1001,6 @@ u8 EXhalbtcoutsrc_InitlizeVariables(void *padapter)
GLBtcWiFiInScanState = false;
GLBtcWiFiInIQKState = false;
-
- return true;
}
void EXhalbtcoutsrc_PowerOnSetting(PBTC_COEXIST pBtCoexist)
@@ -1337,7 +1335,7 @@ void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
*true Enable BT co-exist mechanism
*false Disable BT co-exist mechanism
*/
-u8 hal_btcoex_IsBtExist(struct adapter *padapter)
+bool hal_btcoex_IsBtExist(struct adapter *padapter)
{
struct hal_com_data *pHalData;
@@ -1384,12 +1382,6 @@ void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath)
EXhalbtcoutsrc_SetSingleAntPath(singleAntPath);
}
-u8 hal_btcoex_Initialize(struct adapter *padapter)
-{
- memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
- return EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
-}
-
void hal_btcoex_PowerOnSetting(struct adapter *padapter)
{
EXhalbtcoutsrc_PowerOnSetting(&GLBtCoexist);
@@ -1477,9 +1469,9 @@ void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual)
GLBtCoexist.bManualControl = bmanual;
}
-u8 hal_btcoex_IsBtControlLps(struct adapter *padapter)
+bool hal_btcoex_IsBtControlLps(struct adapter *padapter)
{
- if (hal_btcoex_IsBtExist(padapter) == false)
+ if (!hal_btcoex_IsBtExist(padapter))
return false;
if (GLBtCoexist.btInfo.bBtDisabled)
@@ -1491,9 +1483,9 @@ u8 hal_btcoex_IsBtControlLps(struct adapter *padapter)
return false;
}
-u8 hal_btcoex_IsLpsOn(struct adapter *padapter)
+bool hal_btcoex_IsLpsOn(struct adapter *padapter)
{
- if (hal_btcoex_IsBtExist(padapter) == false)
+ if (!hal_btcoex_IsBtExist(padapter))
return false;
if (GLBtCoexist.btInfo.bBtDisabled)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 638b12ae6ee9..eddd56abbb2d 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -152,10 +152,7 @@ bool HAL_IsLegalChannel(struct adapter *Adapter, u32 Channel)
{
bool bLegalChannel = true;
- if (Channel > 14) {
- bLegalChannel = false;
- DBG_871X("Channel > 14 but wireless_mode do not support 5G\n");
- } else if ((Channel <= 14) && (Channel >= 1)) {
+ if ((Channel <= 14) && (Channel >= 1)) {
if (IsSupported24G(Adapter->registrypriv.wireless_mode) == false) {
bLegalChannel = false;
DBG_871X("(Channel <= 14) && (Channel >= 1) but wireless_mode do not support 2.4G\n");
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 336764464e7d..6539bee9b5ba 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -2040,24 +2040,6 @@ void PHY_SetTxPowerLimit(
}
}
-u8 PHY_GetTxPowerIndex(
- struct adapter *padapter,
- u8 RFPath,
- u8 Rate,
- enum CHANNEL_WIDTH BandWidth,
- u8 Channel
-)
-{
- return PHY_GetTxPowerIndex_8723B(padapter, RFPath, Rate, BandWidth, Channel);
-}
-
-void PHY_SetTxPowerIndex(
- struct adapter *padapter, u32 PowerIndex, u8 RFPath, u8 Rate
-)
-{
- PHY_SetTxPowerIndex_8723B(padapter, PowerIndex, RFPath, Rate);
-}
-
void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index acb25978a46c..7d8f21f32fb9 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -369,7 +369,7 @@ void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter)
}
}
-void rtw_hal_bcn_related_reg_setting(struct adapter *padapter)
+void beacon_timing_control(struct adapter *padapter)
{
if (padapter->HalFunc.SetBeaconRelatedRegistersHandler)
padapter->HalFunc.SetBeaconRelatedRegistersHandler(padapter);
diff --git a/drivers/staging/rtl8723bs/hal/hal_phy.c b/drivers/staging/rtl8723bs/hal/hal_phy.c
deleted file mode 100644
index 24a9d8f783f0..000000000000
--- a/drivers/staging/rtl8723bs/hal/hal_phy.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#define _HAL_PHY_C_
-
-#include <drv_types.h>
-
-/* */
-/* ==> RF shadow Operation API Code Section!!! */
-/* */
-/*-----------------------------------------------------------------------------
- * Function: PHY_RFShadowRead
- * PHY_RFShadowWrite
- * PHY_RFShadowCompare
- * PHY_RFShadowRecorver
- * PHY_RFShadowCompareAll
- * PHY_RFShadowRecorverAll
- * PHY_RFShadowCompareFlagSet
- * PHY_RFShadowRecorverFlagSet
- *
- * Overview: When we set RF register, we must write shadow at first.
- * When we are running, we must compare shadow abd locate error addr.
- * Decide to recorver or not.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/20/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-u32 PHY_RFShadowRead(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
-{
- return RF_Shadow[eRFPath][Offset].Value;
-
-} /* PHY_RFShadowRead */
-
-
-void PHY_RFShadowWrite(
- IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u32 Data
-)
-{
- RF_Shadow[eRFPath][Offset].Value = (Data & bRFRegOffsetMask);
- RF_Shadow[eRFPath][Offset].Driver_Write = true;
-
-} /* PHY_RFShadowWrite */
-
-
-bool PHY_RFShadowCompare(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
-{
- u32 reg;
- /* Check if we need to check the register */
- if (RF_Shadow[eRFPath][Offset].Compare == true) {
- reg = rtw_hal_read_rfreg(Adapter, eRFPath, Offset, bRFRegOffsetMask);
- /* Compare shadow and real rf register for 20bits!! */
- if (RF_Shadow[eRFPath][Offset].Value != reg) {
- /* Locate error position. */
- RF_Shadow[eRFPath][Offset].ErrorOrNot = true;
- /* RT_TRACE(COMP_INIT, DBG_LOUD, */
- /* PHY_RFShadowCompare RF-%d Addr%02lx Err = %05lx\n", */
- /* eRFPath, Offset, reg)); */
- }
- return RF_Shadow[eRFPath][Offset].ErrorOrNot;
- }
- return false;
-} /* PHY_RFShadowCompare */
-
-
-void PHY_RFShadowRecorver(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
-{
- /* Check if the address is error */
- if (RF_Shadow[eRFPath][Offset].ErrorOrNot == true) {
- /* Check if we need to recorver the register. */
- if (RF_Shadow[eRFPath][Offset].Recorver == true) {
- rtw_hal_write_rfreg(Adapter, eRFPath, Offset, bRFRegOffsetMask,
- RF_Shadow[eRFPath][Offset].Value);
- /* RT_TRACE(COMP_INIT, DBG_LOUD, */
- /* PHY_RFShadowRecorver RF-%d Addr%02lx=%05lx", */
- /* eRFPath, Offset, RF_Shadow[eRFPath][Offset].Value)); */
- }
- }
-
-} /* PHY_RFShadowRecorver */
-
-
-void PHY_RFShadowCompareAll(IN PADAPTER Adapter)
-{
- u8 eRFPath = 0;
- u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
-
- for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
- for (Offset = 0; Offset < maxReg; Offset++) {
- PHY_RFShadowCompare(Adapter, eRFPath, Offset);
- }
- }
-
-} /* PHY_RFShadowCompareAll */
-
-
-void PHY_RFShadowRecorverAll(IN PADAPTER Adapter)
-{
- u8 eRFPath = 0;
- u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
-
- for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
- for (Offset = 0; Offset < maxReg; Offset++) {
- PHY_RFShadowRecorver(Adapter, eRFPath, Offset);
- }
- }
-
-} /* PHY_RFShadowRecorverAll */
-
-
-void
-PHY_RFShadowCompareFlagSet(
- IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u8 Type
-)
-{
- /* Set True or False!!! */
- RF_Shadow[eRFPath][Offset].Compare = Type;
-
-} /* PHY_RFShadowCompareFlagSet */
-
-
-void PHY_RFShadowRecorverFlagSet(
- IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u8 Type
-)
-{
- /* Set True or False!!! */
- RF_Shadow[eRFPath][Offset].Recorver = Type;
-
-} /* PHY_RFShadowRecorverFlagSet */
-
-
-void PHY_RFShadowCompareFlagSetAll(IN PADAPTER Adapter)
-{
- u8 eRFPath = 0;
- u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
-
- for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
- for (Offset = 0; Offset < maxReg; Offset++) {
- /* 2008/11/20 MH For S3S4 test, we only check reg 26/27 now!!!! */
- if (Offset != 0x26 && Offset != 0x27)
- PHY_RFShadowCompareFlagSet(Adapter, eRFPath, Offset, false);
- else
- PHY_RFShadowCompareFlagSet(Adapter, eRFPath, Offset, true);
- }
- }
-
-} /* PHY_RFShadowCompareFlagSetAll */
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index e3f4307f3d20..aa6631ee4ea7 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -339,13 +339,9 @@ void ODM_TXPowerTrackingCheck(PDM_ODM_T pDM_Odm);
void odm_RateAdaptiveMaskInit(PDM_ODM_T pDM_Odm);
-void odm_TXPowerTrackingThermalMeterInit(PDM_ODM_T pDM_Odm);
-
void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm);
-void odm_TXPowerTrackingCheckCE(PDM_ODM_T pDM_Odm);
-
/* Remove Edca by Yu Chen */
@@ -1259,13 +1255,11 @@ void odm_RSSIMonitorCheckCE(PDM_ODM_T pDM_Odm)
int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
u8 sta_cnt = 0;
u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
- bool FirstConnect = false;
pRA_T pRA_Table = &pDM_Odm->DM_RA_Table;
if (pDM_Odm->bLinked != true)
return;
- FirstConnect = (pDM_Odm->bLinked) && (pRA_Table->firstconnect == false);
pRA_Table->firstconnect = pDM_Odm->bLinked;
/* if (check_fwstate(&Adapter->mlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) == true) */
@@ -1324,11 +1318,6 @@ void odm_RSSIMonitorCheckCE(PDM_ODM_T pDM_Odm)
/* 3 Tx Power Tracking */
/* 3 ============================================================ */
-void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm)
-{
- odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
-}
-
static u8 getSwingIndex(PDM_ODM_T pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
@@ -1353,7 +1342,7 @@ static u8 getSwingIndex(PDM_ODM_T pDM_Odm)
return i;
}
-void odm_TXPowerTrackingThermalMeterInit(PDM_ODM_T pDM_Odm)
+void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm)
{
u8 defaultSwingIndex = getSwingIndex(pDM_Odm);
u8 p = 0;
@@ -1397,14 +1386,8 @@ void odm_TXPowerTrackingThermalMeterInit(PDM_ODM_T pDM_Odm)
}
-
void ODM_TXPowerTrackingCheck(PDM_ODM_T pDM_Odm)
{
- odm_TXPowerTrackingCheckCE(pDM_Odm);
-}
-
-void odm_TXPowerTrackingCheckCE(PDM_ODM_T pDM_Odm)
-{
struct adapter *Adapter = pDM_Odm->Adapter;
if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 6ba77bb70889..fba3b9e1491b 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -1365,10 +1365,6 @@ extern u32 TxScalingTable_Jaguar[TXSCALE_TABLE_SIZE];
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
-/* Remove DIG by yuchen */
-
-void ODM_SetAntenna(PDM_ODM_T pDM_Odm, u8 Antenna);
-
/* Remove BB power saving by Yuchen */
#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index a73304639226..95edd148ac24 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -11,11 +11,6 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
{
PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
- bool bEEPROMCheck;
- struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
-
- bEEPROMCheck = pHalData->EEPROMVersion >= 0x01;
if (pCfoTrack->CrystalCap == CrystalCap)
return;
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index 49fa814068b8..71919a3d81ab 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -89,7 +89,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(
u8 RSSI, total_rssi = 0;
bool isCCKrate = false;
u8 rf_rx_num = 0;
- u8 cck_highpwr = 0;
u8 LNA_idx, VGA_idx;
PPHY_STATUS_RPT_8192CD_T pPhyStaRpt = (PPHY_STATUS_RPT_8192CD_T)pPhyStatus;
@@ -107,16 +106,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(
/* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
/* */
- /* if (pHalData->eRFPowerState == eRfOn) */
- cck_highpwr = pDM_Odm->bCckHighPower;
- /* else */
- /* cck_highpwr = false; */
-
cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
- /* In 88E, cck_highpwr is always set to 1 */
LNA_idx = ((cck_agc_rpt & 0xE0)>>5);
VGA_idx = (cck_agc_rpt & 0x1F);
rx_pwr_all = odm_CCKRSSI_8723B(LNA_idx, VGA_idx);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 080e974914b6..7760fd0eb6c9 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -1300,7 +1300,7 @@ static void rtl8723b_set_FwScanOffloadInfo_cmd(struct adapter *padapter, PRSVDPA
}
#endif /* CONFIG_PNO_SUPPORT */
-static void rtl8723b_set_FwWoWlanRelated_cmd(struct adapter *padapter, u8 enable)
+void rtl8723b_set_wowlan_cmd(struct adapter *padapter, u8 enable)
{
struct security_priv *psecpriv = &padapter->securitypriv;
struct pwrctrl_priv *ppwrpriv = adapter_to_pwrctl(padapter);
@@ -1346,11 +1346,6 @@ static void rtl8723b_set_FwWoWlanRelated_cmd(struct adapter *padapter, u8 enable
DBG_871X_LEVEL(_drv_always_, "-%s()-\n", __func__);
}
-
-void rtl8723b_set_wowlan_cmd(struct adapter *padapter, u8 enable)
-{
- rtl8723b_set_FwWoWlanRelated_cmd(padapter, enable);
-}
#endif /* CONFIG_WOWLAN */
#ifdef CONFIG_AP_WOWLAN
@@ -1398,7 +1393,7 @@ static void rtl8723b_set_Fw_AP_Offload_Cmd(struct adapter *padapter, u8 bFuncEn)
H2C_AP_OFFLOAD_LEN, u1H2CAPOffloadCtrlParm);
}
-static void rtl8723b_set_AP_FwWoWlan_cmd(struct adapter *padapter, u8 enable)
+void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable)
{
DBG_871X_LEVEL(_drv_always_, "+%s()+: enable =%d\n", __func__, enable);
if (enable) {
@@ -1411,12 +1406,6 @@ static void rtl8723b_set_AP_FwWoWlan_cmd(struct adapter *padapter, u8 enable)
rtl8723b_set_Fw_AP_Offload_Cmd(padapter, enable);
msleep(10);
DBG_871X_LEVEL(_drv_always_, "-%s()-\n", __func__);
- return ;
-}
-
-void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable)
-{
- rtl8723b_set_AP_FwWoWlan_cmd(padapter, enable);
}
#endif /* CONFIG_AP_WOWLAN */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 25c75b977666..6df2b58bdc67 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -431,14 +431,12 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
u8 sz8723BBRegFile[] = RTL8723B_PHY_REG;
u8 sz8723AGCTableFile[] = RTL8723B_AGC_TAB;
u8 sz8723BBBRegPgFile[] = RTL8723B_PHY_REG_PG;
- u8 sz8723BBRegMpFile[] = RTL8723B_PHY_REG_MP;
u8 sz8723BRFTxPwrLmtFile[] = RTL8723B_TXPWR_LMT;
- u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszBBRegMpFile = NULL, *pszRFTxPwrLmtFile = NULL;
+ u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszRFTxPwrLmtFile = NULL;
pszBBRegFile = sz8723BBRegFile;
pszAGCTableFile = sz8723AGCTableFile;
pszBBRegPgFile = sz8723BBBRegPgFile;
- pszBBRegMpFile = sz8723BBRegMpFile;
pszRFTxPwrLmtFile = sz8723BRFTxPwrLmtFile;
/* Read Tx Power Limit File */
@@ -585,7 +583,7 @@ int PHY_RFConfig8723B(struct adapter *Adapter)
* <20120830, Kordan>
**************************************************************************************************************/
-void PHY_SetTxPowerIndex_8723B(
+void PHY_SetTxPowerIndex(
struct adapter *Adapter,
u32 PowerIndex,
u8 RFPath,
@@ -668,7 +666,7 @@ void PHY_SetTxPowerIndex_8723B(
}
}
-u8 PHY_GetTxPowerIndex_8723B(
+u8 PHY_GetTxPowerIndex(
struct adapter *padapter,
u8 RFPath,
u8 Rate,
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index e23b39ab16c5..0f3301091258 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -486,7 +486,6 @@ initbuferror:
}
if (precvpriv->pallocated_recv_buf) {
- n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
kfree(precvpriv->pallocated_recv_buf);
precvpriv->pallocated_recv_buf = NULL;
}
@@ -503,7 +502,7 @@ exit:
*/
void rtl8723bs_free_recv_priv(struct adapter *padapter)
{
- u32 i, n;
+ u32 i;
struct recv_priv *precvpriv;
struct recv_buf *precvbuf;
@@ -515,9 +514,8 @@ void rtl8723bs_free_recv_priv(struct adapter *padapter)
/* 3 2. free all recv buffers */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
if (precvbuf) {
- n = NR_RECVBUFF;
precvpriv->free_recv_buf_queue_cnt = 0;
- for (i = 0; i < n ; i++) {
+ for (i = 0; i < NR_RECVBUFF; i++) {
list_del_init(&precvbuf->list);
rtw_os_recvbuf_resource_free(padapter, precvbuf);
precvbuf++;
@@ -526,7 +524,6 @@ void rtl8723bs_free_recv_priv(struct adapter *padapter)
}
if (precvpriv->pallocated_recv_buf) {
- n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
kfree(precvpriv->pallocated_recv_buf);
precvpriv->pallocated_recv_buf = NULL;
}
diff --git a/drivers/staging/rtl8723bs/include/autoconf.h b/drivers/staging/rtl8723bs/include/autoconf.h
index 196aca3aed7b..8f4c1e734473 100644
--- a/drivers/staging/rtl8723bs/include/autoconf.h
+++ b/drivers/staging/rtl8723bs/include/autoconf.h
@@ -57,9 +57,5 @@
#define DBG 0 /* for ODM & BTCOEX debug */
#endif /* !DEBUG */
-#ifdef CONFIG_PROC_FS
-#define PROC_DEBUG
-#endif
-
/* define DBG_XMIT_BUF */
/* define DBG_XMIT_BUF_EXT */
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 96346ce064aa..8d7fce1e39b7 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -478,7 +478,7 @@ struct sdio_data intf_data;
#define dvobj_to_pwrctl(dvobj) (&(dvobj->pwrctl_priv))
#define pwrctl_to_dvobj(pwrctl) container_of(pwrctl, struct dvobj_priv, pwrctl_priv)
-__inline static struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
{
/* todo: get interface type from dvobj and the return the dev accordingly */
#ifdef RTW_DVOBJ_CHIP_HW_TYPE
@@ -576,8 +576,6 @@ struct adapter {
int bup;
struct net_device_stats stats;
struct iw_statistics iwstats;
- struct proc_dir_entry *dir_dev;/* for proc directory */
- struct proc_dir_entry *dir_odm;
struct wireless_dev *rtw_wdev;
struct rtw_wdev_priv wdev_data;
@@ -636,14 +634,14 @@ struct adapter {
/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */
/* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */
-__inline static void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
+static inline void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
{
int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
df |= func_bit;
atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
}
-__inline static void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
+static inline void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
{
int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
df &= ~(func_bit);
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
index 6f7514be998f..eb03813fdcb9 100644
--- a/drivers/staging/rtl8723bs/include/hal_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -22,13 +22,13 @@ typedef struct _BT_COEXIST
void DBG_BT_INFO(u8 *dbgmsg);
void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist);
-u8 hal_btcoex_IsBtExist(struct adapter *padapter);
+bool hal_btcoex_IsBtExist(struct adapter *padapter);
bool hal_btcoex_IsBtDisabled(struct adapter *);
void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType);
void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum);
void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath);
-u8 hal_btcoex_Initialize(struct adapter *padapter);
+void hal_btcoex_Initialize(void *padapter);
void hal_btcoex_PowerOnSetting(struct adapter *padapter);
void hal_btcoex_InitHwConfig(struct adapter *padapter, u8 bWifiOnly);
@@ -47,8 +47,8 @@ void hal_btcoex_Handler(struct adapter *padapter);
s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter);
void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual);
-u8 hal_btcoex_IsBtControlLps(struct adapter *);
-u8 hal_btcoex_IsLpsOn(struct adapter *);
+bool hal_btcoex_IsBtControlLps(struct adapter *padapter);
+bool hal_btcoex_IsLpsOn(struct adapter *padapter);
u8 hal_btcoex_RpwmVal(struct adapter *);
u8 hal_btcoex_LpsVal(struct adapter *);
u32 hal_btcoex_GetRaMask(struct adapter *);
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index f841546584a7..9167f1e7827f 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -213,23 +213,6 @@ PHY_GetTxPowerTrackingOffset(
u8 RFPath
);
-u8
-PHY_GetTxPowerIndex(
-struct adapter * padapter,
-u8 RFPath,
-u8 Rate,
-enum CHANNEL_WIDTH BandWidth,
-u8 Channel
- );
-
-void
-PHY_SetTxPowerIndex(
-struct adapter * padapter,
-u32 PowerIndex,
-u8 RFPath,
-u8 Rate
- );
-
void
Hal_ChannelPlanToRegulation(
struct adapter * Adapter,
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 3a0c3d079d50..24926ebaf950 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -362,7 +362,7 @@ void rtw_hal_add_ra_tid(struct adapter *padapter, u32 bitmap, u8 *arg, u8 rssi_l
void rtw_hal_start_thread(struct adapter *padapter);
void rtw_hal_stop_thread(struct adapter *padapter);
-void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+void beacon_timing_control(struct adapter *padapter);
u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask);
void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data);
diff --git a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
index 640427f407e3..b40868b2e76f 100644
--- a/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_phy_cfg.h
@@ -65,7 +65,7 @@ int PHY_RFConfig8723B(struct adapter *Adapter );
s32 PHY_MACConfig8723B(struct adapter *padapter);
void
-PHY_SetTxPowerIndex_8723B(
+PHY_SetTxPowerIndex(
struct adapter * Adapter,
u32 PowerIndex,
u8 RFPath,
@@ -73,7 +73,7 @@ u8 Rate
);
u8
-PHY_GetTxPowerIndex_8723B(
+PHY_GetTxPowerIndex(
struct adapter * padapter,
u8 RFPath,
u8 Rate,
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 40313d17a242..fa16139fcce6 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -64,8 +64,6 @@ u16 rtw_recv_select_queue(struct sk_buff *skb);
int rtw_ndev_notifier_register(void);
void rtw_ndev_notifier_unregister(void);
-#include "../os_dep/rtw_proc.h"
-
void rtw_ips_dev_unload(struct adapter *padapter);
int rtw_ips_pwr_up(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index d2616af95ffa..81a9c19ecc6a 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -110,12 +110,12 @@ int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
extern void _rtw_init_queue(struct __queue *pqueue);
-static __inline void thread_enter(char *name)
+static inline void thread_enter(char *name)
{
allow_signal(SIGTERM);
}
-__inline static void flush_signals_thread(void)
+static inline void flush_signals_thread(void)
{
if (signal_pending (current))
{
@@ -125,7 +125,7 @@ __inline static void flush_signals_thread(void)
#define rtw_warn_on(condition) WARN_ON(condition)
-__inline static int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *parg4)
+static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *parg4)
{
int ret = true;
@@ -136,7 +136,7 @@ __inline static int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *p
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
-__inline static u32 _RND4(u32 sz)
+static inline u32 _RND4(u32 sz)
{
u32 val;
@@ -147,7 +147,7 @@ __inline static u32 _RND4(u32 sz)
}
-__inline static u32 _RND8(u32 sz)
+static inline u32 _RND8(u32 sz)
{
u32 val;
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 2f1b51e614fb..c582ede1ac12 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -64,12 +64,12 @@
typedef struct work_struct _workitem;
-__inline static struct list_head *get_next(struct list_head *list)
+static inline struct list_head *get_next(struct list_head *list)
{
return list->next;
}
-__inline static struct list_head *get_list_head(struct __queue *queue)
+static inline struct list_head *get_list_head(struct __queue *queue)
{
return (&(queue->queue));
}
@@ -78,28 +78,28 @@ __inline static struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
container_of(ptr, type, member)
-__inline static void _set_timer(_timer *ptimer, u32 delay_time)
+static inline void _set_timer(_timer *ptimer, u32 delay_time)
{
mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
}
-__inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
+static inline void _cancel_timer(_timer *ptimer, u8 *bcancelled)
{
del_timer_sync(ptimer);
*bcancelled = true;/* true == 1; false == 0 */
}
-__inline static void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
+static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
{
INIT_WORK(pwork, pfunc);
}
-__inline static void _set_workitem(_workitem *pwork)
+static inline void _set_workitem(_workitem *pwork)
{
schedule_work(pwork);
}
-__inline static void _cancel_workitem_sync(_workitem *pwork)
+static inline void _cancel_workitem_sync(_workitem *pwork)
{
cancel_work_sync(pwork);
}
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
index 216d9492575e..22fc5d730d7b 100644
--- a/drivers/staging/rtl8723bs/include/rtw_debug.h
+++ b/drivers/staging/rtl8723bs/include/rtw_debug.h
@@ -267,81 +267,4 @@ void mac_reg_dump(void *sel, struct adapter *adapter);
void bb_reg_dump(void *sel, struct adapter *adapter);
void rf_reg_dump(void *sel, struct adapter *adapter);
-#ifdef PROC_DEBUG
-ssize_t proc_set_write_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_read_reg(struct seq_file *m, void *v);
-ssize_t proc_set_read_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_fwstate(struct seq_file *m, void *v);
-int proc_get_sec_info(struct seq_file *m, void *v);
-int proc_get_mlmext_state(struct seq_file *m, void *v);
-
-int proc_get_roam_flags(struct seq_file *m, void *v);
-ssize_t proc_set_roam_flags(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_roam_param(struct seq_file *m, void *v);
-ssize_t proc_set_roam_param(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-ssize_t proc_set_roam_tgt_addr(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_qos_option(struct seq_file *m, void *v);
-int proc_get_ht_option(struct seq_file *m, void *v);
-int proc_get_rf_info(struct seq_file *m, void *v);
-int proc_get_survey_info(struct seq_file *m, void *v);
-int proc_get_ap_info(struct seq_file *m, void *v);
-int proc_get_adapter_state(struct seq_file *m, void *v);
-int proc_get_trx_info(struct seq_file *m, void *v);
-int proc_get_rate_ctl(struct seq_file *m, void *v);
-ssize_t proc_set_rate_ctl(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_suspend_resume_info(struct seq_file *m, void *v);
-
-ssize_t proc_set_fwdl_test_case(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-ssize_t proc_set_wait_hiq_empty(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_all_sta_info(struct seq_file *m, void *v);
-
-int proc_get_rx_signal(struct seq_file *m, void *v);
-ssize_t proc_set_rx_signal(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_hw_status(struct seq_file *m, void *v);
-
-int proc_get_ht_enable(struct seq_file *m, void *v);
-ssize_t proc_set_ht_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_bw_mode(struct seq_file *m, void *v);
-ssize_t proc_set_bw_mode(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_ampdu_enable(struct seq_file *m, void *v);
-ssize_t proc_set_ampdu_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_rx_ampdu(struct seq_file *m, void *v);
-ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_rx_stbc(struct seq_file *m, void *v);
-ssize_t proc_set_rx_stbc(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_en_fwps(struct seq_file *m, void *v);
-ssize_t proc_set_en_fwps(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-/* int proc_get_two_path_rssi(struct seq_file *m, void *v); */
-int proc_get_rssi_disp(struct seq_file *m, void *v);
-ssize_t proc_set_rssi_disp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_btcoex_dbg(struct seq_file *m, void *v);
-ssize_t proc_set_btcoex_dbg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_btcoex_info(struct seq_file *m, void *v);
-
-int proc_get_odm_dbg_comp(struct seq_file *m, void *v);
-ssize_t proc_set_odm_dbg_comp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-int proc_get_odm_dbg_level(struct seq_file *m, void *v);
-ssize_t proc_set_odm_dbg_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-int proc_get_odm_adaptivity(struct seq_file *m, void *v);
-ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-
-#ifdef CONFIG_DBG_COUNTER
-int proc_get_rx_logs(struct seq_file *m, void *v);
-int proc_get_tx_logs(struct seq_file *m, void *v);
-int proc_get_int_logs(struct seq_file *m, void *v);
-#endif
-
-#endif /* PROC_DEBUG */
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index d3c07d1c36e9..362737b83c3a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -81,15 +81,13 @@ enum dot11AuthAlgrthmNum {
};
/* Scan type including active and passive scan. */
-typedef enum _RT_SCAN_TYPE
-{
+typedef enum _RT_SCAN_TYPE {
SCAN_PASSIVE,
SCAN_ACTIVE,
SCAN_MIX,
}RT_SCAN_TYPE, *PRT_SCAN_TYPE;
-enum _BAND
-{
+enum _BAND {
GHZ24_50 = 0,
GHZ_50,
GHZ_24,
@@ -498,13 +496,13 @@ extern sint rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
extern sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, sint keyid, u8 set_tx, bool enqueue);
extern sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
-__inline static u8 *get_bssid(struct mlme_priv *pmlmepriv)
+static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress => bssid */
/* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress => ibss mac address */
return pmlmepriv->cur_network.network.MacAddress;
}
-__inline static sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
if (pmlmepriv->fw_state & state)
return true;
@@ -512,7 +510,7 @@ __inline static sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
return false;
}
-__inline static sint get_fwstate(struct mlme_priv *pmlmepriv)
+static inline sint get_fwstate(struct mlme_priv *pmlmepriv)
{
return pmlmepriv->fw_state;
}
@@ -524,7 +522,7 @@ __inline static sint get_fwstate(struct mlme_priv *pmlmepriv)
* ### NOTE:#### (!!!!)
* MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
*/
-__inline static void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
@@ -533,7 +531,7 @@ __inline static void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
}
}
-__inline static void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
+static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
@@ -546,7 +544,7 @@ __inline static void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
* No Limit on the calling context,
* therefore set it to be the critical section...
*/
-__inline static void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
+static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
{
spin_lock_bh(&pmlmepriv->lock);
if (check_fwstate(pmlmepriv, state) == true)
@@ -554,7 +552,7 @@ __inline static void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
spin_unlock_bh(&pmlmepriv->lock);
}
-__inline static void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val)
+static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val)
{
spin_lock_bh(&pmlmepriv->lock);
pmlmepriv->num_of_scanned = val;
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 733bb9425448..fd3cf955c9f8 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -535,7 +535,7 @@ struct mlme_ext_priv
};
void init_mlme_default_rate_set(struct adapter *padapter);
-int init_mlme_ext_priv(struct adapter *padapter);
+void init_mlme_ext_priv(struct adapter *padapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
extern void init_mlme_ext_timer(struct adapter *padapter);
@@ -650,7 +650,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char* MacAddr, unsi
void report_add_sta_event(struct adapter *padapter, unsigned char* MacAddr, int cam_idx);
void report_wmm_edca_update(struct adapter *padapter);
-void beacon_timing_control(struct adapter *padapter);
u8 chk_bmc_sleepq_cmd(struct adapter *padapter);
extern u8 set_tx_beacon_cmd(struct adapter *padapter);
unsigned int setup_beacon_frame(struct adapter *padapter, unsigned char *beacon_frame);
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 5de946e66302..012d8f54814f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -405,7 +405,7 @@ struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
-__inline static u8 *get_rxmem(union recv_frame *precvframe)
+static inline u8 *get_rxmem(union recv_frame *precvframe)
{
/* always return rx_head... */
if (precvframe == NULL)
@@ -414,7 +414,7 @@ __inline static u8 *get_rxmem(union recv_frame *precvframe)
return precvframe->u.hdr.rx_head;
}
-__inline static u8 *get_recvframe_data(union recv_frame *precvframe)
+static inline u8 *get_recvframe_data(union recv_frame *precvframe)
{
/* alwasy return rx_data */
@@ -425,7 +425,7 @@ __inline static u8 *get_recvframe_data(union recv_frame *precvframe)
}
-__inline static u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
{
/* rx_data += sz; move rx_data sz bytes hereafter */
@@ -450,7 +450,7 @@ __inline static u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
}
-__inline static u8 *recvframe_put(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_put(union recv_frame *precvframe, sint sz)
{
/* rx_tai += sz; move rx_tail sz bytes hereafter */
@@ -479,7 +479,7 @@ __inline static u8 *recvframe_put(union recv_frame *precvframe, sint sz)
-__inline static u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
+static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
{
/* rmv data from rx_tail (by yitsen) */
@@ -503,7 +503,7 @@ __inline static u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
}
-__inline static union recv_frame *rxmem_to_recvframe(u8 *rxmem)
+static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
{
/* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */
/* from any given member of recv_frame. */
@@ -513,13 +513,13 @@ __inline static union recv_frame *rxmem_to_recvframe(u8 *rxmem)
}
-__inline static sint get_recvframe_len(union recv_frame *precvframe)
+static inline sint get_recvframe_len(union recv_frame *precvframe)
{
return precvframe->u.hdr.len;
}
-__inline static s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
+static inline s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
{
s32 SignalPower; /* in dBm. */
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index b9df42d0677e..3acce5630f8e 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -348,7 +348,7 @@ struct sta_priv {
};
-__inline static u32 wifi_mac_hash(u8 *mac)
+static inline u32 wifi_mac_hash(u8 *mac)
{
u32 x;
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index 8c50bbb20f3b..2faf83704ff0 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -347,7 +347,7 @@ enum WIFI_REG_DOMAIN {
(addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
)
-__inline static int IS_MCAST(unsigned char *da)
+static inline int IS_MCAST(unsigned char *da)
{
if ((*da) & 0x01)
return true;
@@ -355,20 +355,20 @@ __inline static int IS_MCAST(unsigned char *da)
return false;
}
-__inline static unsigned char * get_ra(unsigned char *pframe)
+static inline unsigned char * get_ra(unsigned char *pframe)
{
unsigned char *ra;
ra = GetAddr1Ptr(pframe);
return ra;
}
-__inline static unsigned char * get_ta(unsigned char *pframe)
+static inline unsigned char * get_ta(unsigned char *pframe)
{
unsigned char *ta;
ta = GetAddr2Ptr(pframe);
return ta;
}
-__inline static unsigned char * get_da(unsigned char *pframe)
+static inline unsigned char * get_da(unsigned char *pframe)
{
unsigned char *da;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -392,7 +392,7 @@ __inline static unsigned char * get_da(unsigned char *pframe)
}
-__inline static unsigned char * get_sa(unsigned char *pframe)
+static inline unsigned char * get_sa(unsigned char *pframe)
{
unsigned char *sa;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -415,7 +415,7 @@ __inline static unsigned char * get_sa(unsigned char *pframe)
return sa;
}
-__inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
+static inline unsigned char * get_hdr_bssid(unsigned char *pframe)
{
unsigned char *sa = NULL;
unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
@@ -439,7 +439,7 @@ __inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
}
-__inline static int IsFrameTypeCtrl(unsigned char *pframe)
+static inline int IsFrameTypeCtrl(unsigned char *pframe)
{
if (WIFI_CTRL_TYPE == GetFrameType(pframe))
return true;
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
index 88890b1c3c4c..723fc5b546ef 100644
--- a/drivers/staging/rtl8723bs/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -223,7 +223,7 @@ struct wlan_bssid_ex {
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability information) */
} __packed;
-__inline static uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
{
return (sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 9bc685632651..f819abb756dc 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -19,8 +19,6 @@
#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 5000 /* ms */
#define RTW_MAX_NUM_PMKIDS 4
-#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
-
static const u32 rtw_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -2024,8 +2022,6 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- padapter->mlmepriv.not_indic_disco = true;
-
old_type = rtw_wdev->iftype;
rtw_set_to_roam(padapter, 0);
@@ -2047,8 +2043,6 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
}
leave_ibss:
- padapter->mlmepriv.not_indic_disco = false;
-
return 0;
}
@@ -2246,8 +2240,6 @@ static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- padapter->mlmepriv.not_indic_disco = true;
-
rtw_set_to_roam(padapter, 0);
rtw_scan_abort(padapter);
@@ -2261,8 +2253,6 @@ static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
rtw_free_assoc_resources(padapter, 1);
rtw_pwr_wakeup(padapter);
- padapter->mlmepriv.not_indic_disco = false;
-
DBG_871X(FUNC_NDEV_FMT" return 0\n", FUNC_NDEV_ARG(ndev));
return 0;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 99e6b1028f71..d1b199e3e5bd 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -21,13 +21,10 @@
#define RATE_COUNT 4
/* combo scan */
-#define WEXT_CSCAN_AMOUNT 9
-#define WEXT_CSCAN_BUF_LEN 360
#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
#define WEXT_CSCAN_HEADER_SIZE 12
#define WEXT_CSCAN_SSID_SECTION 'S'
#define WEXT_CSCAN_CHANNEL_SECTION 'C'
-#define WEXT_CSCAN_NPROBE_SECTION 'N'
#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
@@ -215,8 +212,6 @@ static char *translate_scan(struct adapter *padapter,
} else if (ht_cap) {
if (mcs_rate&0x8000) { /* MCS15 */
max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
- } else if (mcs_rate&0x0080) { /* MCS7 */
- max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
} else { /* default MCS7 */
/* DBG_871X("wx_get_scan, mcs_rate_bitmap = 0x%x\n", mcs_rate); */
max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
@@ -4912,7 +4907,6 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
s32 k;
const iw_handler *priv; /* Private ioctl */
const struct iw_priv_args *priv_args; /* Private ioctl description */
- u32 num_priv; /* Number of ioctl */
u32 num_priv_args; /* Number of descriptions */
iw_handler handler;
int temp;
@@ -4948,7 +4942,6 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
priv = rtw_private_handler;
priv_args = rtw_private_args;
- num_priv = ARRAY_SIZE(rtw_private_handler);
num_priv_args = ARRAY_SIZE(rtw_private_args);
if (num_priv_args == 0) {
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 544e799d0a03..ec3a75485233 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -239,9 +239,6 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->channel = (u8)rtw_channel;
registry_par->wireless_mode = (u8)rtw_wireless_mode;
- if (registry_par->channel > 14)
- registry_par->channel = 1;
-
registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
registry_par->vcs_type = (u8)rtw_vcs_type;
registry_par->rts_thresh = (u16)rtw_rts_thresh;
@@ -448,12 +445,6 @@ static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state
DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev), state);
- switch (state) {
- case NETDEV_CHANGENAME:
- rtw_adapter_proc_replace(dev);
- break;
- }
-
return NOTIFY_DONE;
}
@@ -478,7 +469,6 @@ static int rtw_ndev_init(struct net_device *dev)
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
strncpy(adapter->old_ifname, dev->name, IFNAMSIZ);
- rtw_adapter_proc_init(dev);
return 0;
}
@@ -488,7 +478,6 @@ static void rtw_ndev_uninit(struct net_device *dev)
struct adapter *adapter = rtw_netdev_priv(dev);
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
- rtw_adapter_proc_deinit(dev);
}
static const struct net_device_ops rtw_netdev_ops = {
@@ -768,11 +757,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
goto exit;
}
- if (init_mlme_ext_priv(padapter) == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n"));
- ret8 = _FAIL;
- goto exit;
- }
+ init_mlme_ext_priv(padapter);
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
DBG_871X("Can't _rtw_init_xmit_priv\n");
@@ -1361,13 +1346,12 @@ void rtw_suspend_wow(struct adapter *padapter)
#endif /* ifdef CONFIG_WOWLAN */
#ifdef CONFIG_AP_WOWLAN
-int rtw_suspend_ap_wow(struct adapter *padapter)
+void rtw_suspend_ap_wow(struct adapter *padapter)
{
u8 ch, bw, offset;
struct net_device *pnetdev = padapter->pnetdev;
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
struct wowlan_ioctl_param poidparam;
- int ret = _SUCCESS;
DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
@@ -1409,7 +1393,6 @@ int rtw_suspend_ap_wow(struct adapter *padapter)
rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, 0, "AP-WOWLAN");
DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
- return ret;
}
#endif /* ifdef CONFIG_AP_WOWLAN */
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 62fdd24ba427..25a80041ce87 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -318,13 +318,9 @@ error:
void rtw_buf_free(u8 **buf, u32 *buf_len)
{
- u32 ori_len;
-
if (!buf || !buf_len)
return;
- ori_len = *buf_len;
-
if (*buf) {
*buf_len = 0;
kfree(*buf);
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
deleted file mode 100644
index 5f950fda48ea..000000000000
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ /dev/null
@@ -1,779 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtw_debug.h>
-#include "rtw_proc.h"
-
-#ifdef PROC_DEBUG
-
-static struct proc_dir_entry *rtw_proc;
-
-#define RTW_PROC_NAME "rtl8723bs"
-
-#define get_proc_net init_net.proc_net
-
-inline struct proc_dir_entry *rtw_proc_create_dir(const char *name, struct proc_dir_entry *parent, void *data)
-{
- struct proc_dir_entry *entry;
-
- entry = proc_mkdir_data(name, S_IRUGO|S_IXUGO, parent, data);
-
- return entry;
-}
-
-inline struct proc_dir_entry *rtw_proc_create_entry(const char *name, struct proc_dir_entry *parent,
- const struct file_operations *fops, void *data)
-{
- struct proc_dir_entry *entry;
-
- entry = proc_create_data(name, S_IFREG|S_IRUGO, parent, fops, data);
-
- return entry;
-}
-
-static int proc_get_dummy(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-static int proc_get_drv_version(struct seq_file *m, void *v)
-{
- dump_drv_version(m);
- return 0;
-}
-
-static int proc_get_log_level(struct seq_file *m, void *v)
-{
- dump_log_level(m);
- return 0;
-}
-
-static ssize_t proc_set_log_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- char tmp[32];
- int log_level;
-
- if (count < 1)
- return -EINVAL;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- sscanf(tmp, "%d ", &log_level);
- if (log_level >= _drv_always_ && log_level <= _drv_debug_) {
- GlobalDebugLevel = log_level;
- printk("%d\n", GlobalDebugLevel);
- }
- } else {
- return -EFAULT;
- }
-
- return count;
-}
-
-/*
-* rtw_drv_proc:
-* init/deinit when register/unregister driver
-*/
-static const struct rtw_proc_hdl drv_proc_hdls[] = {
- {"ver_info", proc_get_drv_version, NULL},
- {"log_level", proc_get_log_level, proc_set_log_level},
-};
-
-static const int drv_proc_hdls_num = sizeof(drv_proc_hdls) / sizeof(struct rtw_proc_hdl);
-
-static int rtw_drv_proc_open(struct inode *inode, struct file *file)
-{
- /* struct net_device *dev = proc_get_parent_data(inode); */
- ssize_t index = (ssize_t)PDE_DATA(inode);
- const struct rtw_proc_hdl *hdl = drv_proc_hdls+index;
-
- return single_open(file, hdl->show, NULL);
-}
-
-static ssize_t rtw_drv_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
-{
- ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
- const struct rtw_proc_hdl *hdl = drv_proc_hdls+index;
- ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
-
- if (write)
- return write(file, buffer, count, pos, NULL);
-
- return -EROFS;
-}
-
-static const struct file_operations rtw_drv_proc_fops = {
- .owner = THIS_MODULE,
- .open = rtw_drv_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = rtw_drv_proc_write,
-};
-
-int rtw_drv_proc_init(void)
-{
- int ret = _FAIL;
- ssize_t i;
- struct proc_dir_entry *entry = NULL;
-
- if (rtw_proc) {
- rtw_warn_on(1);
- goto exit;
- }
-
- rtw_proc = rtw_proc_create_dir(RTW_PROC_NAME, get_proc_net, NULL);
-
- if (!rtw_proc) {
- rtw_warn_on(1);
- goto exit;
- }
-
- for (i = 0; i < drv_proc_hdls_num; i++) {
- entry = rtw_proc_create_entry(drv_proc_hdls[i].name, rtw_proc, &rtw_drv_proc_fops, (void *)i);
- if (!entry) {
- rtw_warn_on(1);
- goto exit;
- }
- }
-
- ret = _SUCCESS;
-
-exit:
- return ret;
-}
-
-void rtw_drv_proc_deinit(void)
-{
- int i;
-
- if (!rtw_proc)
- return;
-
- for (i = 0; i < drv_proc_hdls_num; i++)
- remove_proc_entry(drv_proc_hdls[i].name, rtw_proc);
-
- remove_proc_entry(RTW_PROC_NAME, get_proc_net);
- rtw_proc = NULL;
-}
-
-static int proc_get_sd_f0_reg_dump(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- sd_f0_reg_dump(m, adapter);
-
- return 0;
-}
-
-static int proc_get_mac_reg_dump(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- mac_reg_dump(m, adapter);
-
- return 0;
-}
-
-static int proc_get_bb_reg_dump(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- bb_reg_dump(m, adapter);
-
- return 0;
-}
-
-static int proc_get_rf_reg_dump(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- rf_reg_dump(m, adapter);
-
- return 0;
-}
-static int proc_get_linked_info_dump(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- if (padapter)
- DBG_871X_SEL_NL(m, "linked_info_dump :%s\n", (padapter->bLinkInfoDump)?"enable":"disable");
-
- return 0;
-}
-
-static ssize_t proc_set_linked_info_dump(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- char tmp[2];
- int mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (padapter) {
- /* padapter->bLinkInfoDump = mode; */
- /* DBG_871X("linked_info_dump =%s\n", (padapter->bLinkInfoDump)?"enable":"disable"); */
- linked_info_dump(padapter, mode);
- }
-
- }
-
- return count;
-
-}
-
-static int proc_get_rx_info(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct dvobj_priv *psdpriv = padapter->dvobj;
- struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
-
- /* Counts of packets whose seq_num is less than preorder_ctrl->indicate_seq, Ex delay, retransmission, redundant packets and so on */
- DBG_871X_SEL_NL(m,"Counts of Packets Whose Seq_Num Less Than Reorder Control Seq_Num: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_drop_count);
- /* How many times the Rx Reorder Timer is triggered. */
- DBG_871X_SEL_NL(m,"Rx Reorder Time-out Trigger Counts: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_forced_indicate_count);
- /* Total counts of packets loss */
- DBG_871X_SEL_NL(m,"Rx Packet Loss Counts: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_loss_count);
- DBG_871X_SEL_NL(m,"Duplicate Management Frame Drop Count: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_dup_mgt_frame_drop_count);
- DBG_871X_SEL_NL(m,"AMPDU BA window shift Count: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_window_shift_cnt);
- return 0;
-}
-
-
-static ssize_t proc_reset_rx_info(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct dvobj_priv *psdpriv = padapter->dvobj;
- struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- char cmd[32];
- if (buffer && !copy_from_user(cmd, buffer, sizeof(cmd))) {
- if ('0' == cmd[0]) {
- pdbgpriv->dbg_rx_ampdu_drop_count = 0;
- pdbgpriv->dbg_rx_ampdu_forced_indicate_count = 0;
- pdbgpriv->dbg_rx_ampdu_loss_count = 0;
- pdbgpriv->dbg_rx_dup_mgt_frame_drop_count = 0;
- pdbgpriv->dbg_rx_ampdu_window_shift_cnt = 0;
- }
- }
-
- return count;
-}
-
-static int proc_get_cam(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter;
-
- char tmp[32];
- char cmd[5];
- u8 id;
-
- adapter = (struct adapter *)rtw_netdev_priv(dev);
- if (!adapter)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- /* c <id>: clear specific cam entry */
- /* wfc <id>: write specific cam entry from cam cache */
-
- int num = sscanf(tmp, "%4s %hhu", cmd, &id);
-
- if (num < 2)
- return count;
- if (id >= TOTAL_CAM_ENTRY)
- return -EINVAL;
-
- if (strcmp("c", cmd) == 0) {
- _clear_cam_entry(adapter, id);
- adapter->securitypriv.hw_decrypted = false; /* temporarily set this for TX path to use SW enc */
- } else if (strcmp("wfc", cmd) == 0) {
- write_cam_from_cache(adapter, id);
- }
- }
-
- return count;
-}
-
-static int proc_get_cam_cache(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- u8 i;
-
- DBG_871X_SEL_NL(m, "cam bitmap:0x%016llx\n", dvobj->cam_ctl.bitmap);
-
- DBG_871X_SEL_NL(m, "%-2s %-6s %-17s %-32s %-3s %-7s"
- /* %-2s %-2s %-4s %-5s" */
- "\n"
- , "id", "ctrl", "addr", "key", "kid", "type"
- /* "MK", "GK", "MFB", "valid" */
- );
-
- for (i = 0; i < 32; i++) {
- if (dvobj->cam_cache[i].ctrl != 0)
- DBG_871X_SEL_NL(m, "%2u 0x%04x "MAC_FMT" "KEY_FMT" %3u %-7s"
- /* %2u %2u 0x%02x %5u" */
- "\n", i
- , dvobj->cam_cache[i].ctrl
- , MAC_ARG(dvobj->cam_cache[i].mac)
- , KEY_ARG(dvobj->cam_cache[i].key)
- , (dvobj->cam_cache[i].ctrl)&0x03
- , security_type_str(((dvobj->cam_cache[i].ctrl)>>2)&0x07)
- /* ((dvobj->cam_cache[i].ctrl)>>5)&0x01 */
- /* ((dvobj->cam_cache[i].ctrl)>>6)&0x01 */
- /* ((dvobj->cam_cache[i].ctrl)>>8)&0x7f */
- /* ((dvobj->cam_cache[i].ctrl)>>15)&0x01 */
- );
- }
-
- return 0;
-}
-
-/*
-* rtw_adapter_proc:
-* init/deinit when register/unregister net_device
-*/
-static const struct rtw_proc_hdl adapter_proc_hdls[] = {
- {"write_reg", proc_get_dummy, proc_set_write_reg},
- {"read_reg", proc_get_read_reg, proc_set_read_reg},
- {"fwstate", proc_get_fwstate, NULL},
- {"sec_info", proc_get_sec_info, NULL},
- {"mlmext_state", proc_get_mlmext_state, NULL},
- {"qos_option", proc_get_qos_option, NULL},
- {"ht_option", proc_get_ht_option, NULL},
- {"rf_info", proc_get_rf_info, NULL},
- {"survey_info", proc_get_survey_info, NULL},
- {"ap_info", proc_get_ap_info, NULL},
- {"adapter_state", proc_get_adapter_state, NULL},
- {"trx_info", proc_get_trx_info, NULL},
- {"rate_ctl", proc_get_rate_ctl, proc_set_rate_ctl},
- {"cam", proc_get_cam, proc_set_cam},
- {"cam_cache", proc_get_cam_cache, NULL},
- {"suspend_info", proc_get_suspend_resume_info, NULL},
- {"rx_info", proc_get_rx_info, proc_reset_rx_info},
-
- {"roam_flags", proc_get_roam_flags, proc_set_roam_flags},
- {"roam_param", proc_get_roam_param, proc_set_roam_param},
- {"roam_tgt_addr", proc_get_dummy, proc_set_roam_tgt_addr},
-
- {"sd_f0_reg_dump", proc_get_sd_f0_reg_dump, NULL},
-
- {"fwdl_test_case", proc_get_dummy, proc_set_fwdl_test_case},
- {"wait_hiq_empty", proc_get_dummy, proc_set_wait_hiq_empty},
-
- {"mac_reg_dump", proc_get_mac_reg_dump, NULL},
- {"bb_reg_dump", proc_get_bb_reg_dump, NULL},
- {"rf_reg_dump", proc_get_rf_reg_dump, NULL},
-
- {"all_sta_info", proc_get_all_sta_info, NULL},
-
- {"rx_signal", proc_get_rx_signal, proc_set_rx_signal},
- {"hw_info", proc_get_hw_status, NULL},
-
- {"ht_enable", proc_get_ht_enable, proc_set_ht_enable},
- {"bw_mode", proc_get_bw_mode, proc_set_bw_mode},
- {"ampdu_enable", proc_get_ampdu_enable, proc_set_ampdu_enable},
- {"rx_stbc", proc_get_rx_stbc, proc_set_rx_stbc},
- {"rx_ampdu", proc_get_rx_ampdu, proc_set_rx_ampdu},
-
- {"en_fwps", proc_get_en_fwps, proc_set_en_fwps},
-
- /* path_rssi", proc_get_two_path_rssi, NULL}, */
- {"rssi_disp", proc_get_rssi_disp, proc_set_rssi_disp},
-
- {"btcoex_dbg", proc_get_btcoex_dbg, proc_set_btcoex_dbg},
- {"btcoex", proc_get_btcoex_info, NULL},
-
- {"linked_info_dump", proc_get_linked_info_dump, proc_set_linked_info_dump},
-#ifdef CONFIG_DBG_COUNTER
- {"rx_logs", proc_get_rx_logs, NULL},
- {"tx_logs", proc_get_tx_logs, NULL},
- {"int_logs", proc_get_int_logs, NULL},
-#endif
-};
-
-static const int adapter_proc_hdls_num = sizeof(adapter_proc_hdls) / sizeof(struct rtw_proc_hdl);
-
-static int rtw_adapter_proc_open(struct inode *inode, struct file *file)
-{
- ssize_t index = (ssize_t)PDE_DATA(inode);
- const struct rtw_proc_hdl *hdl = adapter_proc_hdls+index;
-
- return single_open(file, hdl->show, proc_get_parent_data(inode));
-}
-
-static ssize_t rtw_adapter_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
-{
- ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
- const struct rtw_proc_hdl *hdl = adapter_proc_hdls+index;
- ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
-
- if (write)
- return write(file, buffer, count, pos, ((struct seq_file *)file->private_data)->private);
-
- return -EROFS;
-}
-
-static const struct file_operations rtw_adapter_proc_fops = {
- .owner = THIS_MODULE,
- .open = rtw_adapter_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = rtw_adapter_proc_write,
-};
-
-int proc_get_odm_dbg_comp(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- rtw_odm_dbg_comp_msg(m, adapter);
-
- return 0;
-}
-
-ssize_t proc_set_odm_dbg_comp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
-
- u64 dbg_comp;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%llx", &dbg_comp);
-
- if (num != 1)
- return count;
-
- rtw_odm_dbg_comp_set(adapter, dbg_comp);
- }
-
- return count;
-}
-
-int proc_get_odm_dbg_level(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- rtw_odm_dbg_level_msg(m, adapter);
-
- return 0;
-}
-
-ssize_t proc_set_odm_dbg_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
-
- u32 dbg_level;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%u", &dbg_level);
-
- if (num != 1)
- return count;
-
- rtw_odm_dbg_level_set(adapter, dbg_level);
- }
-
- return count;
-}
-
-static int proc_get_odm_ability(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
-
- rtw_odm_ability_msg(m, adapter);
-
- return 0;
-}
-
-static ssize_t proc_set_odm_ability(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
-
- u32 ability;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%x", &ability);
-
- if (num != 1)
- return count;
-
- rtw_odm_ability_set(adapter, ability);
- }
-
- return count;
-}
-
-int proc_get_odm_adaptivity(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- rtw_odm_adaptivity_parm_msg(m, padapter);
-
- return 0;
-}
-
-ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 TH_L2H_ini;
- s8 TH_EDCCA_HL_diff;
- u32 IGI_Base;
- int ForceEDCCA;
- u8 AdapEn_RSSI;
- u8 IGI_LowerBound;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
-
- int num = sscanf(tmp, "%x %hhd %x %d %hhu %hhu",
- &TH_L2H_ini, &TH_EDCCA_HL_diff, &IGI_Base, &ForceEDCCA, &AdapEn_RSSI, &IGI_LowerBound);
-
- if (num != 6)
- return count;
-
- rtw_odm_adaptivity_parm_set(padapter, (s8)TH_L2H_ini, TH_EDCCA_HL_diff, (s8)IGI_Base, (bool)ForceEDCCA, AdapEn_RSSI, IGI_LowerBound);
- }
-
- return count;
-}
-
-/*
-* rtw_odm_proc:
-* init/deinit when register/unregister net_device, along with rtw_adapter_proc
-*/
-static const struct rtw_proc_hdl odm_proc_hdls[] = {
- {"dbg_comp", proc_get_odm_dbg_comp, proc_set_odm_dbg_comp},
- {"dbg_level", proc_get_odm_dbg_level, proc_set_odm_dbg_level},
- {"ability", proc_get_odm_ability, proc_set_odm_ability},
- {"adaptivity", proc_get_odm_adaptivity, proc_set_odm_adaptivity},
-};
-
-static const int odm_proc_hdls_num = sizeof(odm_proc_hdls) / sizeof(struct rtw_proc_hdl);
-
-static int rtw_odm_proc_open(struct inode *inode, struct file *file)
-{
- ssize_t index = (ssize_t)PDE_DATA(inode);
- const struct rtw_proc_hdl *hdl = odm_proc_hdls+index;
-
- return single_open(file, hdl->show, proc_get_parent_data(inode));
-}
-
-static ssize_t rtw_odm_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
-{
- ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
- const struct rtw_proc_hdl *hdl = odm_proc_hdls+index;
- ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
-
- if (write)
- return write(file, buffer, count, pos, ((struct seq_file *)file->private_data)->private);
-
- return -EROFS;
-}
-
-static const struct file_operations rtw_odm_proc_fops = {
- .owner = THIS_MODULE,
- .open = rtw_odm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = rtw_odm_proc_write,
-};
-
-static struct proc_dir_entry *rtw_odm_proc_init(struct net_device *dev)
-{
- struct proc_dir_entry *dir_odm = NULL;
- struct proc_dir_entry *entry = NULL;
- struct adapter *adapter = rtw_netdev_priv(dev);
- ssize_t i;
-
- if (!adapter->dir_dev) {
- rtw_warn_on(1);
- goto exit;
- }
-
- if (adapter->dir_odm) {
- rtw_warn_on(1);
- goto exit;
- }
-
- dir_odm = rtw_proc_create_dir("odm", adapter->dir_dev, dev);
- if (!dir_odm) {
- rtw_warn_on(1);
- goto exit;
- }
-
- adapter->dir_odm = dir_odm;
-
- for (i = 0; i < odm_proc_hdls_num; i++) {
- entry = rtw_proc_create_entry(odm_proc_hdls[i].name, dir_odm, &rtw_odm_proc_fops, (void *)i);
- if (!entry) {
- rtw_warn_on(1);
- goto exit;
- }
- }
-
-exit:
- return dir_odm;
-}
-
-static void rtw_odm_proc_deinit(struct adapter *adapter)
-{
- struct proc_dir_entry *dir_odm = NULL;
- int i;
-
- dir_odm = adapter->dir_odm;
-
- if (!dir_odm) {
- rtw_warn_on(1);
- return;
- }
-
- for (i = 0; i < odm_proc_hdls_num; i++)
- remove_proc_entry(odm_proc_hdls[i].name, dir_odm);
-
- remove_proc_entry("odm", adapter->dir_dev);
-
- adapter->dir_odm = NULL;
-}
-
-struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev)
-{
- struct proc_dir_entry *drv_proc = rtw_proc;
- struct proc_dir_entry *dir_dev = NULL;
- struct proc_dir_entry *entry = NULL;
- struct adapter *adapter = rtw_netdev_priv(dev);
- ssize_t i;
-
- if (!drv_proc) {
- rtw_warn_on(1);
- goto exit;
- }
-
- if (adapter->dir_dev) {
- rtw_warn_on(1);
- goto exit;
- }
-
- dir_dev = rtw_proc_create_dir(dev->name, drv_proc, dev);
- if (!dir_dev) {
- rtw_warn_on(1);
- goto exit;
- }
-
- adapter->dir_dev = dir_dev;
-
- for (i = 0; i < adapter_proc_hdls_num; i++) {
- entry = rtw_proc_create_entry(adapter_proc_hdls[i].name, dir_dev, &rtw_adapter_proc_fops, (void *)i);
- if (!entry) {
- rtw_warn_on(1);
- goto exit;
- }
- }
-
- rtw_odm_proc_init(dev);
-
-exit:
- return dir_dev;
-}
-
-void rtw_adapter_proc_deinit(struct net_device *dev)
-{
- struct proc_dir_entry *drv_proc = rtw_proc;
- struct proc_dir_entry *dir_dev = NULL;
- struct adapter *adapter = rtw_netdev_priv(dev);
- int i;
-
- dir_dev = adapter->dir_dev;
-
- if (!dir_dev) {
- rtw_warn_on(1);
- return;
- }
-
- for (i = 0; i < adapter_proc_hdls_num; i++)
- remove_proc_entry(adapter_proc_hdls[i].name, dir_dev);
-
- rtw_odm_proc_deinit(adapter);
-
- remove_proc_entry(dev->name, drv_proc);
-
- adapter->dir_dev = NULL;
-}
-
-void rtw_adapter_proc_replace(struct net_device *dev)
-{
- struct proc_dir_entry *drv_proc = rtw_proc;
- struct proc_dir_entry *dir_dev = NULL;
- struct adapter *adapter = rtw_netdev_priv(dev);
- int i;
-
- dir_dev = adapter->dir_dev;
-
- if (!dir_dev) {
- rtw_warn_on(1);
- return;
- }
-
- for (i = 0; i < adapter_proc_hdls_num; i++)
- remove_proc_entry(adapter_proc_hdls[i].name, dir_dev);
-
- rtw_odm_proc_deinit(adapter);
-
- remove_proc_entry(adapter->old_ifname, drv_proc);
-
- adapter->dir_dev = NULL;
-
- rtw_adapter_proc_init(dev);
-
-}
-
-#endif /* PROC_DEBUG */
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.h b/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
deleted file mode 100644
index c7e6f62b61ef..000000000000
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_PROC_H__
-#define __RTW_PROC_H__
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-struct rtw_proc_hdl {
- char *name;
- int (*show)(struct seq_file *, void *);
- ssize_t (*write)(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
-};
-
-#ifdef PROC_DEBUG
-
-int rtw_drv_proc_init(void);
-void rtw_drv_proc_deinit(void);
-struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev);
-void rtw_adapter_proc_deinit(struct net_device *dev);
-void rtw_adapter_proc_replace(struct net_device *dev);
-
-#else //!PROC_DEBUG
-
-static inline int rtw_drv_proc_init(void) {return 0;}
-static inline void rtw_drv_proc_deinit(void) {}
-static inline struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev){return NULL;}
-static inline void rtw_adapter_proc_deinit(struct net_device *dev){}
-static inline void rtw_adapter_proc_replace(struct net_device *dev){}
-
-#endif //!PROC_DEBUG
-
-#endif //__RTW_PROC_H__
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 540a7eed621d..d3784c44f6d0 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -371,7 +371,7 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
rtw_hal_chip_configure(padapter);
- hal_btcoex_Initialize(padapter);
+ hal_btcoex_Initialize((void *) padapter);
/* 3 6. read efuse/eeprom data */
rtw_hal_read_chip_info(padapter);
@@ -620,12 +620,10 @@ static int __init rtw_drv_entry(void)
#endif /* BTCOEXVERSION */
sdio_drvpriv.drv_registered = true;
- rtw_drv_proc_init();
ret = sdio_register_driver(&sdio_drvpriv.r871xs_drv);
if (ret != 0) {
sdio_drvpriv.drv_registered = false;
- rtw_drv_proc_deinit();
rtw_ndev_notifier_unregister();
DBG_871X("%s: register driver failed!!(%d)\n", __func__, ret);
goto exit;
@@ -646,7 +644,6 @@ static void __exit rtw_drv_halt(void)
sdio_unregister_driver(&sdio_drvpriv.r871xs_drv);
- rtw_drv_proc_deinit();
rtw_ndev_notifier_unregister();
DBG_871X_LEVEL(_drv_always_, "module exit success\n");
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index aa2f62acc994..578b9f734231 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -33,11 +33,6 @@
REG_RULE(2467 - 10, 2472 + 10, 40, 0, 20, \
NL80211_RRF_PASSIVE_SCAN)
-/* 2G chan 14, PASSIVS SCAN, NO OFDM (B only) */
-#define RTW_2GHZ_CH14 \
- REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, \
- NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
-
static const struct ieee80211_regdomain rtw_regdom_rd = {
.n_reg_rules = 3,
.alpha2 = "99",
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 1128eec3bd08..e853fa9cc950 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -3842,7 +3842,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- int retval = STATUS_FAIL;
+ int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
u8 *buf = NULL;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 8277d7895608..561851cc8780 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -393,10 +393,9 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
*offset = 0;
*index = *index + 1;
}
- if ((i == (sg_cnt - 1)) || !resid)
- option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
- else
- option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ if ((i == sg_cnt - 1) || !resid)
+ option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
@@ -541,10 +540,9 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
+ option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
if (j == (sg_cnt - 1))
- option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
- else
- option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
+ option |= RTSX_SG_END;
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index a06045344301..25c31496757e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -2573,17 +2573,13 @@ SD_UNLOCK_ENTRY:
retval = sd_sdr_tuning(chip);
if (retval != STATUS_SUCCESS) {
- if (sd20_mode) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
goto status_fail;
- } else {
- retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS)
- goto status_fail;
- try_sdio = false;
- sd20_mode = true;
- goto switch_fail;
- }
+ try_sdio = false;
+ sd20_mode = true;
+ goto switch_fail;
}
sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
@@ -2598,17 +2594,13 @@ SD_UNLOCK_ENTRY:
if (read_lba0) {
retval = sd_read_lba0(chip);
if (retval != STATUS_SUCCESS) {
- if (sd20_mode) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
goto status_fail;
- } else {
- retval = sd_init_power(chip);
- if (retval != STATUS_SUCCESS)
- goto status_fail;
- try_sdio = false;
- sd20_mode = true;
- goto switch_fail;
- }
+ try_sdio = false;
+ sd20_mode = true;
+ goto switch_fail;
}
}
}
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 482c1c6ba422..64ef4d258a91 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
- * All rights are reserved. Reproduction or in part is prohibited
- * without the written consent of the copyright owner.
- *
* RegSC.h --- SM718 SDK
* This file contains the definitions for the System Configuration registers.
*/
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 5c0ac747ea2b..0ef8d4ff2ef9 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
- * All rights are reserved. Reproduction or in part is prohibited
- * without the written consent of the copyright owner.
- *
* swi2c.c --- SM750/SM718 DDK
* This file contains the source code for I2C using software
* implementation.
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index 5868feea791b..dfa166060da7 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -2,9 +2,6 @@
/*
* Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
*
- * All rights are reserved. Reproduction or in part is prohibited
- * without the written consent of the copyright owner.
- *
* swi2c.h --- SM750/SM718 DDK
* This file contains the definitions for i2c using software
* implementation.
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 9d4f1dab0968..40dd573e73c3 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1750,7 +1750,8 @@ static int visornic_poll(struct napi_struct *napi, int budget)
}
/* poll_for_irq - checks the status of the response queue
- * @v: Void pointer to the visronic devdata struct.
+ * @t: pointer to the 'struct timer_list' from which we can retrieve the
+ * the visornic devdata struct.
*
* Main function of the vnic_incoming thread. Periodically check the response
* queue and drain it if needed.
diff --git a/drivers/uwb/Kconfig b/drivers/staging/uwb/Kconfig
index 259e053e1e09..259e053e1e09 100644
--- a/drivers/uwb/Kconfig
+++ b/drivers/staging/uwb/Kconfig
diff --git a/drivers/uwb/Makefile b/drivers/staging/uwb/Makefile
index 32f4de7afbd6..32f4de7afbd6 100644
--- a/drivers/uwb/Makefile
+++ b/drivers/staging/uwb/Makefile
diff --git a/drivers/staging/uwb/TODO b/drivers/staging/uwb/TODO
new file mode 100644
index 000000000000..abae57000534
--- /dev/null
+++ b/drivers/staging/uwb/TODO
@@ -0,0 +1,8 @@
+TODO: Remove in late 2019 unless there are users
+
+There seems to not be any real wireless USB devices anywhere in the wild
+anymore. It turned out to be a failed technology :(
+
+This will be removed from the tree if no one objects.
+
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/uwb/address.c b/drivers/staging/uwb/address.c
index 857d5cd56a95..857d5cd56a95 100644
--- a/drivers/uwb/address.c
+++ b/drivers/staging/uwb/address.c
diff --git a/drivers/uwb/allocator.c b/drivers/staging/uwb/allocator.c
index 2e1590124d5f..1f429fba20b7 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/staging/uwb/allocator.c
@@ -6,7 +6,7 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/uwb.h>
+#include "uwb.h"
#include "uwb-internal.h"
diff --git a/drivers/uwb/beacon.c b/drivers/staging/uwb/beacon.c
index c483c19c5ef8..c483c19c5ef8 100644
--- a/drivers/uwb/beacon.c
+++ b/drivers/staging/uwb/beacon.c
diff --git a/drivers/uwb/driver.c b/drivers/staging/uwb/driver.c
index 5755c2e49ffc..5755c2e49ffc 100644
--- a/drivers/uwb/driver.c
+++ b/drivers/staging/uwb/driver.c
diff --git a/drivers/uwb/drp-avail.c b/drivers/staging/uwb/drp-avail.c
index 02392ab82a7d..02392ab82a7d 100644
--- a/drivers/uwb/drp-avail.c
+++ b/drivers/staging/uwb/drp-avail.c
diff --git a/drivers/uwb/drp-ie.c b/drivers/staging/uwb/drp-ie.c
index 4b545b41161c..b2a862cf76de 100644
--- a/drivers/uwb/drp-ie.c
+++ b/drivers/staging/uwb/drp-ie.c
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
-#include <linux/uwb.h>
+#include "uwb.h"
#include "uwb-internal.h"
diff --git a/drivers/uwb/drp.c b/drivers/staging/uwb/drp.c
index 869987bede7b..869987bede7b 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/staging/uwb/drp.c
diff --git a/drivers/uwb/est.c b/drivers/staging/uwb/est.c
index d4141ffdd775..d4141ffdd775 100644
--- a/drivers/uwb/est.c
+++ b/drivers/staging/uwb/est.c
diff --git a/drivers/uwb/hwa-rc.c b/drivers/staging/uwb/hwa-rc.c
index cd03b7f827c1..b6effad749d7 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/staging/uwb/hwa-rc.c
@@ -38,9 +38,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/usb/wusb.h>
-#include <linux/usb/wusb-wa.h>
-#include <linux/uwb.h>
+#include "../wusbcore/include/wusb.h"
+#include "../wusbcore/include/wusb-wa.h"
+#include "uwb.h"
#include "uwb-internal.h"
diff --git a/drivers/uwb/i1480/Makefile b/drivers/staging/uwb/i1480/Makefile
index d26fb9b845ae..d26fb9b845ae 100644
--- a/drivers/uwb/i1480/Makefile
+++ b/drivers/staging/uwb/i1480/Makefile
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/staging/uwb/i1480/dfu/Makefile
index 4739fdac5922..4739fdac5922 100644
--- a/drivers/uwb/i1480/dfu/Makefile
+++ b/drivers/staging/uwb/i1480/dfu/Makefile
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/staging/uwb/i1480/dfu/dfu.c
index ec1af858ead9..9d51ce8faad1 100644
--- a/drivers/uwb/i1480/dfu/dfu.c
+++ b/drivers/staging/uwb/i1480/dfu/dfu.c
@@ -17,9 +17,9 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/device.h>
-#include <linux/uwb.h>
#include <linux/random.h>
#include <linux/export.h>
+#include "../../uwb.h"
/*
* i1480_rceb_check - Check RCEB for expected field values
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/staging/uwb/i1480/dfu/i1480-dfu.h
index 9dd567d174b3..b21d058ecc23 100644
--- a/drivers/uwb/i1480/dfu/i1480-dfu.h
+++ b/drivers/staging/uwb/i1480/dfu/i1480-dfu.h
@@ -50,9 +50,9 @@
#ifndef __i1480_DFU_H__
#define __i1480_DFU_H__
-#include <linux/uwb/spec.h>
#include <linux/types.h>
#include <linux/completion.h>
+#include "../../include/spec.h"
#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/staging/uwb/i1480/dfu/mac.c
index ddc224f01a7f..6e4d6c9cecf5 100644
--- a/drivers/uwb/i1480/dfu/mac.c
+++ b/drivers/staging/uwb/i1480/dfu/mac.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
-#include <linux/uwb.h>
+#include "../../uwb.h"
#include "i1480-dfu.h"
/*
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/staging/uwb/i1480/dfu/phy.c
index 50da4527c113..13512c7dda0b 100644
--- a/drivers/uwb/i1480/dfu/phy.c
+++ b/drivers/staging/uwb/i1480/dfu/phy.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
-#include <linux/usb/wusb.h>
+#include "../../../wusbcore/include/wusb.h"
#include "i1480-dfu.h"
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/staging/uwb/i1480/dfu/usb.c
index 6129a8f4b5f2..d41086bdd783 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/staging/uwb/i1480/dfu/usb.c
@@ -25,9 +25,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/uwb.h>
-#include <linux/usb/wusb.h>
-#include <linux/usb/wusb-wa.h>
+#include "../../uwb.h"
+#include "../../../wusbcore/include/wusb.h"
+#include "../../../wusbcore/include/wusb-wa.h"
#include "i1480-dfu.h"
struct i1480_usb {
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/staging/uwb/i1480/i1480-est.c
index 1346c409d10e..106e0a44b138 100644
--- a/drivers/uwb/i1480/i1480-est.c
+++ b/drivers/staging/uwb/i1480/i1480-est.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/usb.h>
-#include <linux/uwb.h>
+#include "../uwb.h"
#include "dfu/i1480-dfu.h"
diff --git a/drivers/uwb/ie-rcv.c b/drivers/staging/uwb/ie-rcv.c
index 51a4706e0dd3..51a4706e0dd3 100644
--- a/drivers/uwb/ie-rcv.c
+++ b/drivers/staging/uwb/ie-rcv.c
diff --git a/drivers/uwb/ie.c b/drivers/staging/uwb/ie.c
index b11678dd0380..b11678dd0380 100644
--- a/drivers/uwb/ie.c
+++ b/drivers/staging/uwb/ie.c
diff --git a/drivers/staging/uwb/include/debug-cmd.h b/drivers/staging/uwb/include/debug-cmd.h
new file mode 100644
index 000000000000..f97db6c3bcc0
--- /dev/null
+++ b/drivers/staging/uwb/include/debug-cmd.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ultra Wide Band
+ * Debug interface commands
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ */
+#ifndef __LINUX__UWB__DEBUG_CMD_H__
+#define __LINUX__UWB__DEBUG_CMD_H__
+
+#include <linux/types.h>
+
+/*
+ * Debug interface commands
+ *
+ * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
+ *
+ * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
+ */
+
+enum uwb_dbg_cmd_type {
+ UWB_DBG_CMD_RSV_ESTABLISH = 1,
+ UWB_DBG_CMD_RSV_TERMINATE = 2,
+ UWB_DBG_CMD_IE_ADD = 3,
+ UWB_DBG_CMD_IE_RM = 4,
+ UWB_DBG_CMD_RADIO_START = 5,
+ UWB_DBG_CMD_RADIO_STOP = 6,
+};
+
+struct uwb_dbg_cmd_rsv_establish {
+ __u8 target[6];
+ __u8 type;
+ __u16 max_mas;
+ __u16 min_mas;
+ __u8 max_interval;
+};
+
+struct uwb_dbg_cmd_rsv_terminate {
+ int index;
+};
+
+struct uwb_dbg_cmd_ie {
+ __u8 data[128];
+ int len;
+};
+
+struct uwb_dbg_cmd {
+ __u32 type;
+ union {
+ struct uwb_dbg_cmd_rsv_establish rsv_establish;
+ struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
+ struct uwb_dbg_cmd_ie ie_add;
+ struct uwb_dbg_cmd_ie ie_rm;
+ };
+};
+
+#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/drivers/staging/uwb/include/spec.h b/drivers/staging/uwb/include/spec.h
new file mode 100644
index 000000000000..5f75caf7b4ba
--- /dev/null
+++ b/drivers/staging/uwb/include/spec.h
@@ -0,0 +1,767 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ultra Wide Band
+ * UWB Standard definitions
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * All these definitions are based on the ECMA-368 standard.
+ *
+ * Note all definitions are Little Endian in the wire, and we will
+ * convert them to host order before operating on the bitfields (that
+ * yes, we use extensively).
+ */
+
+#ifndef __LINUX__UWB_SPEC_H__
+#define __LINUX__UWB_SPEC_H__
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+#include <linux/if_ether.h>
+
+#define i1480_FW 0x00000303
+/* #define i1480_FW 0x00000302 */
+
+/**
+ * Number of Medium Access Slots in a superframe.
+ *
+ * UWB divides time in SuperFrames, each one divided in 256 pieces, or
+ * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
+ * basic bandwidth allocation unit in UWB.
+ */
+enum { UWB_NUM_MAS = 256 };
+
+/**
+ * Number of Zones in superframe.
+ *
+ * UWB divides the superframe into zones with numbering starting from BPST.
+ * See MBOA MAC[16.8.6]
+ */
+enum { UWB_NUM_ZONES = 16 };
+
+/*
+ * Number of MAS in a zone.
+ */
+#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
+
+/*
+ * Number of MAS required before a row can be considered available.
+ */
+#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1)
+
+/*
+ * Number of streams per DRP reservation between a pair of devices.
+ *
+ * [ECMA-368] section 16.8.6.
+ */
+enum { UWB_NUM_STREAMS = 8 };
+
+/*
+ * mMasLength
+ *
+ * The length of a MAS in microseconds.
+ *
+ * [ECMA-368] section 17.16.
+ */
+enum { UWB_MAS_LENGTH_US = 256 };
+
+/*
+ * mBeaconSlotLength
+ *
+ * The length of the beacon slot in microseconds.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
+
+/*
+ * mMaxLostBeacons
+ *
+ * The number beacons missing in consecutive superframes before a
+ * device can be considered as unreachable.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_MAX_LOST_BEACONS = 3 };
+
+/*
+ * mDRPBackOffWinMin
+ *
+ * The minimum number of superframes to wait before trying to reserve
+ * extra MAS.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_DRP_BACKOFF_WIN_MIN = 2 };
+
+/*
+ * mDRPBackOffWinMax
+ *
+ * The maximum number of superframes to wait before trying to reserve
+ * extra MAS.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
+
+/*
+ * Length of a superframe in microseconds.
+ */
+#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
+
+/**
+ * UWB MAC address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_mac_addr {
+ u8 data[ETH_ALEN];
+} __attribute__((packed));
+
+
+/**
+ * UWB device address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_dev_addr {
+ u8 data[2];
+} __attribute__((packed));
+
+
+/**
+ * Types of UWB addresses
+ *
+ * Order matters (by size).
+ */
+enum uwb_addr_type {
+ UWB_ADDR_DEV = 0,
+ UWB_ADDR_MAC = 1,
+};
+
+
+/** Size of a char buffer for printing a MAC/device address */
+enum { UWB_ADDR_STRSIZE = 32 };
+
+
+/** UWB WiMedia protocol IDs. */
+enum uwb_prid {
+ UWB_PRID_WLP_RESERVED = 0x0000,
+ UWB_PRID_WLP = 0x0001,
+ UWB_PRID_WUSB_BOT = 0x0010,
+ UWB_PRID_WUSB = 0x0010,
+ UWB_PRID_WUSB_TOP = 0x001F,
+};
+
+
+/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
+enum uwb_phy_rate {
+ UWB_PHY_RATE_53 = 0,
+ UWB_PHY_RATE_80,
+ UWB_PHY_RATE_106,
+ UWB_PHY_RATE_160,
+ UWB_PHY_RATE_200,
+ UWB_PHY_RATE_320,
+ UWB_PHY_RATE_400,
+ UWB_PHY_RATE_480,
+ UWB_PHY_RATE_INVALID
+};
+
+
+/**
+ * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
+ */
+enum uwb_scan_type {
+ UWB_SCAN_ONLY = 0,
+ UWB_SCAN_OUTSIDE_BP,
+ UWB_SCAN_WHILE_INACTIVE,
+ UWB_SCAN_DISABLED,
+ UWB_SCAN_ONLY_STARTTIME,
+ UWB_SCAN_TOP
+};
+
+
+/** ACK Policy types (MBOA MAC[7.2.1.3]) */
+enum uwb_ack_pol {
+ UWB_ACK_NO = 0,
+ UWB_ACK_INM = 1,
+ UWB_ACK_B = 2,
+ UWB_ACK_B_REQ = 3,
+};
+
+
+/** DRP reservation types ([ECMA-368 table 106) */
+enum uwb_drp_type {
+ UWB_DRP_TYPE_ALIEN_BP = 0,
+ UWB_DRP_TYPE_HARD,
+ UWB_DRP_TYPE_SOFT,
+ UWB_DRP_TYPE_PRIVATE,
+ UWB_DRP_TYPE_PCA,
+};
+
+
+/** DRP Reason Codes ([ECMA-368] table 107) */
+enum uwb_drp_reason {
+ UWB_DRP_REASON_ACCEPTED = 0,
+ UWB_DRP_REASON_CONFLICT,
+ UWB_DRP_REASON_PENDING,
+ UWB_DRP_REASON_DENIED,
+ UWB_DRP_REASON_MODIFIED,
+};
+
+/** Relinquish Request Reason Codes ([ECMA-368] table 113) */
+enum uwb_relinquish_req_reason {
+ UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0,
+ UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION,
+};
+
+/**
+ * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
+ */
+enum uwb_drp_notif_reason {
+ UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
+ UWB_DRP_NOTIF_CONFLICT,
+ UWB_DRP_NOTIF_TERMINATE,
+};
+
+
+/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
+struct uwb_drp_alloc {
+ __le16 zone_bm;
+ __le16 mas_bm;
+} __attribute__((packed));
+
+
+/** General MAC Header format (ECMA-368[16.2]) */
+struct uwb_mac_frame_hdr {
+ __le16 Frame_Control;
+ struct uwb_dev_addr DestAddr;
+ struct uwb_dev_addr SrcAddr;
+ __le16 Sequence_Control;
+ __le16 Access_Information;
+} __attribute__((packed));
+
+
+/**
+ * uwb_beacon_frame - a beacon frame including MAC headers
+ *
+ * [ECMA] section 16.3.
+ */
+struct uwb_beacon_frame {
+ struct uwb_mac_frame_hdr hdr;
+ struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
+ u8 Beacon_Slot_Number;
+ u8 Device_Control;
+ u8 IEData[];
+} __attribute__((packed));
+
+
+/** Information Element codes (MBOA MAC[T54]) */
+enum uwb_ie {
+ UWB_PCA_AVAILABILITY = 2,
+ UWB_IE_DRP_AVAILABILITY = 8,
+ UWB_IE_DRP = 9,
+ UWB_BP_SWITCH_IE = 11,
+ UWB_MAC_CAPABILITIES_IE = 12,
+ UWB_PHY_CAPABILITIES_IE = 13,
+ UWB_APP_SPEC_PROBE_IE = 15,
+ UWB_IDENTIFICATION_IE = 19,
+ UWB_MASTER_KEY_ID_IE = 20,
+ UWB_RELINQUISH_REQUEST_IE = 21,
+ UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
+ UWB_APP_SPEC_IE = 255,
+};
+
+
+/**
+ * Header common to all Information Elements (IEs)
+ */
+struct uwb_ie_hdr {
+ u8 element_id; /* enum uwb_ie */
+ u8 length;
+} __attribute__((packed));
+
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
+struct uwb_ie_drp {
+ struct uwb_ie_hdr hdr;
+ __le16 drp_control;
+ struct uwb_dev_addr dev_addr;
+ struct uwb_drp_alloc allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
+}
+
+static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
+}
+
+static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
+}
+
+static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
+}
+
+static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
+}
+
+static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
+}
+
+static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
+}
+
+static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
+ enum uwb_drp_reason reason_code)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
+struct uwb_ie_drp_avail {
+ struct uwb_ie_hdr hdr;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* Relinqish Request IE ([ECMA-368] section 16.8.19). */
+struct uwb_relinquish_request_ie {
+ struct uwb_ie_hdr hdr;
+ __le16 relinquish_req_control;
+ struct uwb_dev_addr dev_addr;
+ struct uwb_drp_alloc allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie)
+{
+ return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf;
+}
+
+static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie,
+ int reason_code)
+{
+ u16 ctrl = le16_to_cpu(ie->relinquish_req_control);
+ ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0);
+ ie->relinquish_req_control = cpu_to_le16(ctrl);
+}
+
+/**
+ * The Vendor ID is set to an OUI that indicates the vendor of the device.
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_vendor_id {
+ u8 data[3];
+} __attribute__((packed));
+
+/**
+ * The device type ID
+ * FIXME: clarify what this means
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_device_type_id {
+ u8 data[3];
+} __attribute__((packed));
+
+
+/**
+ * UWB device information types
+ * ECMA-368 [16.8.10]
+ */
+enum uwb_dev_info_type {
+ UWB_DEV_INFO_VENDOR_ID = 0,
+ UWB_DEV_INFO_VENDOR_TYPE,
+ UWB_DEV_INFO_NAME,
+};
+
+/**
+ * UWB device information found in Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_dev_info {
+ u8 type; /* enum uwb_dev_info_type */
+ u8 length;
+ u8 data[];
+} __attribute__((packed));
+
+/**
+ * UWB Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_identification_ie {
+ struct uwb_ie_hdr hdr;
+ struct uwb_dev_info info[];
+} __attribute__((packed));
+
+/*
+ * UWB Radio Controller
+ *
+ * These definitions are common to the Radio Control layers as
+ * exported by the WUSB1.0 HWA and WHCI interfaces.
+ */
+
+/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
+struct uwb_rccb {
+ u8 bCommandType; /* enum hwa_cet */
+ __le16 wCommand; /* Command code */
+ u8 bCommandContext; /* Context ID */
+} __attribute__((packed));
+
+
+/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
+struct uwb_rceb {
+ u8 bEventType; /* enum hwa_cet */
+ __le16 wEvent; /* Event code */
+ u8 bEventContext; /* Context ID */
+} __attribute__((packed));
+
+
+enum {
+ UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
+ UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
+};
+
+/* Commands to the radio controller */
+enum uwb_rc_cmd {
+ UWB_RC_CMD_CHANNEL_CHANGE = 16,
+ UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
+ UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
+ UWB_RC_CMD_RESET = 19,
+ UWB_RC_CMD_SCAN = 20, /* Scan management */
+ UWB_RC_CMD_SET_BEACON_FILTER = 21,
+ UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
+ UWB_RC_CMD_SET_IE = 23, /* Information Element management */
+ UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
+ UWB_RC_CMD_SET_TX_POWER = 25,
+ UWB_RC_CMD_SLEEP = 26,
+ UWB_RC_CMD_START_BEACON = 27,
+ UWB_RC_CMD_STOP_BEACON = 28,
+ UWB_RC_CMD_BP_MERGE = 29,
+ UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
+ UWB_RC_CMD_SET_ASIE_NOTIF = 31,
+};
+
+/* Notifications from the radio controller */
+enum uwb_rc_evt {
+ UWB_RC_EVT_IE_RCV = 0,
+ UWB_RC_EVT_BEACON = 1,
+ UWB_RC_EVT_BEACON_SIZE = 2,
+ UWB_RC_EVT_BPOIE_CHANGE = 3,
+ UWB_RC_EVT_BP_SLOT_CHANGE = 4,
+ UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
+ UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
+ UWB_RC_EVT_DRP_AVAIL = 7,
+ UWB_RC_EVT_DRP = 8,
+ UWB_RC_EVT_BP_SWITCH_STATUS = 9,
+ UWB_RC_EVT_CMD_FRAME_RCV = 10,
+ UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
+ /* Events (command responses) use the same code as the command */
+ UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
+};
+
+enum uwb_rc_extended_type_1_cmd {
+ UWB_RC_SET_DAA_ENERGY_MASK = 32,
+ UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
+};
+
+enum uwb_rc_extended_type_1_evt {
+ UWB_RC_DAA_ENERGY_DETECTED = 0,
+};
+
+/* Radio Control Result Code. [WHCI] table 3-3. */
+enum {
+ UWB_RC_RES_SUCCESS = 0,
+ UWB_RC_RES_FAIL,
+ UWB_RC_RES_FAIL_HARDWARE,
+ UWB_RC_RES_FAIL_NO_SLOTS,
+ UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
+ UWB_RC_RES_FAIL_INVALID_PARAMETER,
+ UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
+ UWB_RC_RES_FAIL_INVALID_IE_DATA,
+ UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
+ UWB_RC_RES_FAIL_CANCELLED,
+ UWB_RC_RES_FAIL_INVALID_STATE,
+ UWB_RC_RES_FAIL_INVALID_SIZE,
+ UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
+ UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
+ UWB_RC_RES_FAIL_TIME_OUT = 255,
+};
+
+/* Confirm event. [WHCI] section 3.1.3.1 etc. */
+struct uwb_rc_evt_confirm {
+ struct uwb_rceb rceb;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Device Address Management event. [WHCI] section 3.1.3.2. */
+struct uwb_rc_evt_dev_addr_mgmt {
+ struct uwb_rceb rceb;
+ u8 baAddr[ETH_ALEN];
+ u8 bResultCode;
+} __attribute__((packed));
+
+
+/* Get IE Event. [WHCI] section 3.1.3.3. */
+struct uwb_rc_evt_get_ie {
+ struct uwb_rceb rceb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
+struct uwb_rc_evt_set_drp_ie {
+ struct uwb_rceb rceb;
+ __le16 wRemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Set IE Event. [WHCI] section 3.1.3.8. */
+struct uwb_rc_evt_set_ie {
+ struct uwb_rceb rceb;
+ __le16 RemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Scan command. [WHCI] 3.1.3.5. */
+struct uwb_rc_cmd_scan {
+ struct uwb_rccb rccb;
+ u8 bChannelNumber;
+ u8 bScanState;
+ __le16 wStartTime;
+} __attribute__((packed));
+
+/* Set DRP IE command. [WHCI] section 3.1.3.7. */
+struct uwb_rc_cmd_set_drp_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/* Set IE command. [WHCI] section 3.1.3.8. */
+struct uwb_rc_cmd_set_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
+struct uwb_rc_evt_set_daa_energy_mask {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
+struct uwb_rc_evt_set_notification_filter_ex {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* IE Received notification. [WHCI] section 3.1.4.1. */
+struct uwb_rc_evt_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr SrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Type of the received beacon. [WHCI] section 3.1.4.2. */
+enum uwb_rc_beacon_type {
+ UWB_RC_BEACON_TYPE_SCAN = 0,
+ UWB_RC_BEACON_TYPE_NEIGHBOR,
+ UWB_RC_BEACON_TYPE_OL_ALIEN,
+ UWB_RC_BEACON_TYPE_NOL_ALIEN,
+};
+
+/* Beacon received notification. [WHCI] 3.1.4.2. */
+struct uwb_rc_evt_beacon {
+ struct uwb_rceb rceb;
+ u8 bChannelNumber;
+ u8 bBeaconType;
+ __le16 wBPSTOffset;
+ u8 bLQI;
+ u8 bRSSI;
+ __le16 wBeaconInfoLength;
+ u8 BeaconInfo[];
+} __attribute__((packed));
+
+
+/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
+struct uwb_rc_evt_beacon_size {
+ struct uwb_rceb rceb;
+ __le16 wNewBeaconSize;
+} __attribute__((packed));
+
+
+/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
+struct uwb_rc_evt_bpoie_change {
+ struct uwb_rceb rceb;
+ __le16 wBPOIELength;
+ u8 BPOIE[];
+} __attribute__((packed));
+
+
+/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
+struct uwb_rc_evt_bp_slot_change {
+ struct uwb_rceb rceb;
+ u8 slot_info;
+} __attribute__((packed));
+
+static inline int uwb_rc_evt_bp_slot_change_slot_num(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return evt->slot_info & 0x7f;
+}
+
+static inline int uwb_rc_evt_bp_slot_change_no_slot(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return (evt->slot_info & 0x80) >> 7;
+}
+
+/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
+struct uwb_rc_evt_bp_switch_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
+struct uwb_rc_evt_dev_addr_conflict {
+ struct uwb_rceb rceb;
+} __attribute__((packed));
+
+/* DRP notification. [WHCI] section 3.1.4.9. */
+struct uwb_rc_evt_drp {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr src_addr;
+ u8 reason;
+ u8 beacon_slot_number;
+ __le16 ie_length;
+ u8 ie_data[];
+} __attribute__((packed));
+
+static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
+{
+ return evt->reason & 0x0f;
+}
+
+
+/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
+struct uwb_rc_evt_drp_avail {
+ struct uwb_rceb rceb;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* BP switch status notification. [WHCI] section 3.1.4.10. */
+struct uwb_rc_evt_bp_switch_status {
+ struct uwb_rceb rceb;
+ u8 status;
+ u8 slot_offset;
+ __le16 bpst_offset;
+ u8 move_countdown;
+} __attribute__((packed));
+
+/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
+struct uwb_rc_evt_cmd_frame_rcv {
+ struct uwb_rceb rceb;
+ __le16 receive_time;
+ struct uwb_dev_addr wSrcAddr;
+ struct uwb_dev_addr wDstAddr;
+ __le16 control;
+ __le16 reserved;
+ __le16 dataLength;
+ u8 data[];
+} __attribute__((packed));
+
+/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
+struct uwb_rc_evt_channel_change_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
+struct uwb_rc_evt_daa_energy_detected {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 bandID;
+ u8 reserved;
+ u8 toneBmp[16];
+} __attribute__((packed));
+
+
+/**
+ * Radio Control Interface Class Descriptor
+ *
+ * WUSB 1.0 [8.6.1.2]
+ */
+struct uwb_rc_control_intf_class_desc {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdRCIVersion;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/drivers/staging/uwb/include/umc.h b/drivers/staging/uwb/include/umc.h
new file mode 100644
index 000000000000..ddbceb39ad15
--- /dev/null
+++ b/drivers/staging/uwb/include/umc.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UWB Multi-interface Controller support.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
+ * controller, host controller) are presented as devices on the "umc"
+ * bus.
+ *
+ * The radio controller is not strictly a UMC capability but it's
+ * useful to present it as such.
+ *
+ * References:
+ *
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ *
+ * How this works is kind of convoluted but simple. The whci.ko driver
+ * loads when WHCI devices are detected. These WHCI devices expose
+ * many devices in the same PCI function (they couldn't have reused
+ * functions, no), so for each PCI function that exposes these many
+ * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
+ * with umc_device_create() and adds it to the bus with
+ * umc_device_register().
+ *
+ * umc_device_register() calls device_register() which will push the
+ * bus management code to load your UMC driver's somehting_probe()
+ * that you have registered for that capability code.
+ *
+ * Now when the WHCI device is removed, whci_remove() will go over
+ * each umc_dev assigned to each of the PCI function's capabilities
+ * and through whci_del_cap() call umc_device_unregister() each
+ * created umc_dev. Of course, if you are bound to the device, your
+ * driver's something_remove() will be called.
+ */
+
+#ifndef _LINUX_UWB_UMC_H_
+#define _LINUX_UWB_UMC_H_
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+/*
+ * UMC capability IDs.
+ *
+ * 0x00 is reserved so use it for the radio controller device.
+ *
+ * [WHCI] table 2-8
+ */
+#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
+#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
+
+/**
+ * struct umc_dev - UMC capability device
+ *
+ * @version: version of the specification this capability conforms to.
+ * @cap_id: capability ID.
+ * @bar: PCI Bar (64 bit) where the resource lies
+ * @resource: register space resource.
+ * @irq: interrupt line.
+ */
+struct umc_dev {
+ u16 version;
+ u8 cap_id;
+ u8 bar;
+ struct resource resource;
+ unsigned irq;
+ struct device dev;
+};
+
+#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
+
+/**
+ * struct umc_driver - UMC capability driver
+ * @cap_id: supported capability ID.
+ * @match: driver specific capability matching function.
+ * @match_data: driver specific data for match() (e.g., a
+ * table of pci_device_id's if umc_match_pci_id() is used).
+ */
+struct umc_driver {
+ char *name;
+ u8 cap_id;
+ int (*match)(struct umc_driver *, struct umc_dev *);
+ const void *match_data;
+
+ int (*probe)(struct umc_dev *);
+ void (*remove)(struct umc_dev *);
+ int (*pre_reset)(struct umc_dev *);
+ int (*post_reset)(struct umc_dev *);
+
+ struct device_driver driver;
+};
+
+#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
+
+extern struct bus_type umc_bus_type;
+
+struct umc_dev *umc_device_create(struct device *parent, int n);
+int __must_check umc_device_register(struct umc_dev *umc);
+void umc_device_unregister(struct umc_dev *umc);
+
+int __must_check __umc_driver_register(struct umc_driver *umc_drv,
+ struct module *mod,
+ const char *mod_name);
+
+/**
+ * umc_driver_register - register a UMC capabiltity driver.
+ * @umc_drv: pointer to the driver.
+ */
+#define umc_driver_register(umc_drv) \
+ __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME)
+
+void umc_driver_unregister(struct umc_driver *umc_drv);
+
+/*
+ * Utility function you can use to match (umc_driver->match) against a
+ * null-terminated array of 'struct pci_device_id' in
+ * umc_driver->match_data.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
+
+/**
+ * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
+ * @umc_dev: UMC device whose parent PCI device we are looking for
+ *
+ * DIRTY!!! DON'T RELY ON THIS
+ *
+ * FIXME: This is as dirty as it gets, but we need some way to check
+ * the correct type of umc_dev->parent (so that for example, we can
+ * cast to pci_dev). Casting to pci_dev is necessary because at some
+ * point we need to request resources from the device. Mapping is
+ * easily over come (ioremap and stuff are bus agnostic), but hooking
+ * up to some error handlers (such as pci error handlers) might need
+ * this.
+ *
+ * THIS might (probably will) be removed in the future, so don't count
+ * on it.
+ */
+static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
+{
+ struct pci_dev *pci_dev = NULL;
+ if (dev_is_pci(umc_dev->dev.parent))
+ pci_dev = to_pci_dev(umc_dev->dev.parent);
+ return pci_dev;
+}
+
+/**
+ * umc_dev_get() - reference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ *
+ * NOTE: we are assuming in this whole scheme that the parent device
+ * is referenced at _probe() time and unreferenced at _remove()
+ * time by the parent's subsystem.
+ */
+static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
+{
+ get_device(&umc_dev->dev);
+ return umc_dev;
+}
+
+/**
+ * umc_dev_put() - unreference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void umc_dev_put(struct umc_dev *umc_dev)
+{
+ put_device(&umc_dev->dev);
+}
+
+/**
+ * umc_set_drvdata - set UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ * @data: Data to set.
+ */
+static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
+{
+ dev_set_drvdata(&umc_dev->dev, data);
+}
+
+/**
+ * umc_get_drvdata - recover UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
+{
+ return dev_get_drvdata(&umc_dev->dev);
+}
+
+int umc_controller_reset(struct umc_dev *umc);
+
+#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/drivers/staging/uwb/include/whci.h b/drivers/staging/uwb/include/whci.h
new file mode 100644
index 000000000000..1a5c2cc2b008
--- /dev/null
+++ b/drivers/staging/uwb/include/whci.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * References:
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ */
+#ifndef _LINUX_UWB_WHCI_H_
+#define _LINUX_UWB_WHCI_H_
+
+#include <linux/pci.h>
+
+/*
+ * UWB interface capability registers (offsets from UWBBASE)
+ *
+ * [WHCI] section 2.2
+ */
+#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
+# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
+#define UWBCAPDATA(n) (8*(n))
+# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
+# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
+# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
+# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
+# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
+
+/* Size of the WHCI capability data (including the RC capability) for
+ a device with n capabilities. */
+#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
+
+
+/*
+ * URC registers (offsets from URCBASE)
+ *
+ * [WHCI] section 2.3
+ */
+#define URCCMD 0x00
+# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
+# define URCCMD_RS (1 << 30) /* Run/Stop */
+# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
+# define URCCMD_ACTIVE (1 << 15) /* Command is active */
+# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
+# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
+#define URCSTS 0x04
+# define URCSTS_EPS (1 << 17) /* Event Processing Status */
+# define URCSTS_HALTED (1 << 16) /* RC halted */
+# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
+# define URCSTS_ER (1 << 9) /* Event Ready */
+# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
+# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
+# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
+#define URCINTR 0x08
+# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
+#define URCCMDADDR 0x10
+#define URCEVTADDR 0x18
+# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
+
+
+/** Write 32 bit @value to little endian register at @addr */
+static inline
+void le_writel(u32 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+
+/** Read from 32 bit little endian register at @addr */
+static inline
+u32 le_readl(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+
+/** Write 64 bit @value to little endian register at @addr */
+static inline
+void le_writeq(u64 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+ iowrite32(value >> 32, addr + 4);
+}
+
+
+/** Read from 64 bit little endian register at @addr */
+static inline
+u64 le_readq(void __iomem *addr)
+{
+ u64 value;
+ value = ioread32(addr);
+ value |= (u64)ioread32(addr + 4) << 32;
+ return value;
+}
+
+extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
+ u32 mask, u32 result,
+ unsigned long max_ms, const char *tag);
+
+#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/drivers/uwb/lc-dev.c b/drivers/staging/uwb/lc-dev.c
index 3e5c07fd6b10..3e5c07fd6b10 100644
--- a/drivers/uwb/lc-dev.c
+++ b/drivers/staging/uwb/lc-dev.c
diff --git a/drivers/uwb/lc-rc.c b/drivers/staging/uwb/lc-rc.c
index ee31b221cdc2..ee31b221cdc2 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/staging/uwb/lc-rc.c
diff --git a/drivers/uwb/neh.c b/drivers/staging/uwb/neh.c
index 1695584be412..1695584be412 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/staging/uwb/neh.c
diff --git a/drivers/uwb/pal.c b/drivers/staging/uwb/pal.c
index 765fd426dbd1..a541e646a603 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/staging/uwb/pal.c
@@ -6,9 +6,9 @@
*/
#include <linux/kernel.h>
#include <linux/debugfs.h>
-#include <linux/uwb.h>
#include <linux/export.h>
+#include "uwb.h"
#include "uwb-internal.h"
/**
diff --git a/drivers/uwb/radio.c b/drivers/staging/uwb/radio.c
index 240dd755927e..6afb75ce1b5f 100644
--- a/drivers/uwb/radio.c
+++ b/drivers/staging/uwb/radio.c
@@ -5,9 +5,9 @@
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
-#include <linux/uwb.h>
#include <linux/export.h>
+#include "uwb.h"
#include "uwb-internal.h"
diff --git a/drivers/uwb/reset.c b/drivers/staging/uwb/reset.c
index 8fc7c14d903e..8fc7c14d903e 100644
--- a/drivers/uwb/reset.c
+++ b/drivers/staging/uwb/reset.c
diff --git a/drivers/uwb/rsv.c b/drivers/staging/uwb/rsv.c
index ec924deb0a32..f45a04ff7275 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/staging/uwb/rsv.c
@@ -5,11 +5,11 @@
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
-#include <linux/uwb.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/export.h>
+#include "uwb.h"
#include "uwb-internal.h"
static void uwb_rsv_timer(struct timer_list *t);
diff --git a/drivers/uwb/scan.c b/drivers/staging/uwb/scan.c
index ffc3f452302d..ffc3f452302d 100644
--- a/drivers/uwb/scan.c
+++ b/drivers/staging/uwb/scan.c
diff --git a/drivers/uwb/umc-bus.c b/drivers/staging/uwb/umc-bus.c
index 0fdc38078eee..8b931f66a720 100644
--- a/drivers/uwb/umc-bus.c
+++ b/drivers/staging/uwb/umc-bus.c
@@ -8,8 +8,8 @@
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/module.h>
-#include <linux/uwb/umc.h>
#include <linux/pci.h>
+#include "include/umc.h"
static int umc_bus_pre_reset_helper(struct device *dev, void *data)
{
diff --git a/drivers/uwb/umc-dev.c b/drivers/staging/uwb/umc-dev.c
index c845ca414bb2..0c71caae00be 100644
--- a/drivers/uwb/umc-dev.c
+++ b/drivers/staging/uwb/umc-dev.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/uwb/umc.h>
+#include "include/umc.h"
static void umc_device_release(struct device *dev)
{
diff --git a/drivers/uwb/umc-drv.c b/drivers/staging/uwb/umc-drv.c
index b141d520efbf..ed3bd220e8c2 100644
--- a/drivers/uwb/umc-drv.c
+++ b/drivers/staging/uwb/umc-drv.c
@@ -6,7 +6,7 @@
*/
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/uwb/umc.h>
+#include "include/umc.h"
int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
const char *mod_name)
diff --git a/drivers/uwb/uwb-debug.c b/drivers/staging/uwb/uwb-debug.c
index 5457b6d42387..dd14df219ef8 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/staging/uwb/uwb-debug.c
@@ -19,8 +19,7 @@
#include <linux/uaccess.h>
#include <linux/seq_file.h>
-#include <linux/uwb/debug-cmd.h>
-
+#include "include/debug-cmd.h"
#include "uwb-internal.h"
/*
diff --git a/drivers/uwb/uwb-internal.h b/drivers/staging/uwb/uwb-internal.h
index 00de0a5333d2..4c2fdac7f610 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/staging/uwb/uwb-internal.h
@@ -17,8 +17,8 @@
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/uwb.h>
#include <linux/mutex.h>
+#include "uwb.h"
struct uwb_beca_e;
diff --git a/drivers/staging/uwb/uwb.h b/drivers/staging/uwb/uwb.h
new file mode 100644
index 000000000000..6a59706ba3a0
--- /dev/null
+++ b/drivers/staging/uwb/uwb.h
@@ -0,0 +1,817 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Ultra Wide Band
+ * UWB API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * FIXME: doc: overview of the API, different parts and pointers
+ */
+
+#ifndef __LINUX__UWB_H__
+#define __LINUX__UWB_H__
+
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <asm/page.h>
+#include "include/spec.h"
+
+struct uwb_dev;
+struct uwb_beca_e;
+struct uwb_rc;
+struct uwb_rsv;
+struct uwb_dbg;
+
+/**
+ * struct uwb_dev - a UWB Device
+ * @rc: UWB Radio Controller that discovered the device (kind of its
+ * parent).
+ * @bce: a beacon cache entry for this device; or NULL if the device
+ * is a local radio controller.
+ * @mac_addr: the EUI-48 address of this device.
+ * @dev_addr: the current DevAddr used by this device.
+ * @beacon_slot: the slot number the beacon is using.
+ * @streams: bitmap of streams allocated to reservations targeted at
+ * this device. For an RC, this is the streams allocated for
+ * reservations targeted at DevAddrs.
+ *
+ * A UWB device may either by a neighbor or part of a local radio
+ * controller.
+ */
+struct uwb_dev {
+ struct mutex mutex;
+ struct list_head list_node;
+ struct device dev;
+ struct uwb_rc *rc; /* radio controller */
+ struct uwb_beca_e *bce; /* Beacon Cache Entry */
+
+ struct uwb_mac_addr mac_addr;
+ struct uwb_dev_addr dev_addr;
+ int beacon_slot;
+ DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
+ DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
+};
+#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
+
+/**
+ * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
+ *
+ * RC[CE]Bs have a 'context ID' field that matches the command with
+ * the event received to confirm it.
+ *
+ * Maximum number of context IDs
+ */
+enum { UWB_RC_CTX_MAX = 256 };
+
+
+/** Notification chain head for UWB generated events to listeners */
+struct uwb_notifs_chain {
+ struct list_head list;
+ struct mutex mutex;
+};
+
+/* Beacon cache list */
+struct uwb_beca {
+ struct list_head list;
+ size_t entries;
+ struct mutex mutex;
+};
+
+/* Event handling thread. */
+struct uwbd {
+ int pid;
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+};
+
+/**
+ * struct uwb_mas_bm - a bitmap of all MAS in a superframe
+ * @bm: a bitmap of length #UWB_NUM_MAS
+ */
+struct uwb_mas_bm {
+ DECLARE_BITMAP(bm, UWB_NUM_MAS);
+ DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
+ int safe;
+ int unsafe;
+};
+
+/**
+ * uwb_rsv_state - UWB Reservation state.
+ *
+ * NONE - reservation is not active (no DRP IE being transmitted).
+ *
+ * Owner reservation states:
+ *
+ * INITIATED - owner has sent an initial DRP request.
+ * PENDING - target responded with pending Reason Code.
+ * MODIFIED - reservation manager is modifying an established
+ * reservation with a different MAS allocation.
+ * ESTABLISHED - the reservation has been successfully negotiated.
+ *
+ * Target reservation states:
+ *
+ * DENIED - request is denied.
+ * ACCEPTED - request is accepted.
+ * PENDING - PAL has yet to make a decision to whether to accept or
+ * deny.
+ *
+ * FIXME: further target states TBD.
+ */
+enum uwb_rsv_state {
+ UWB_RSV_STATE_NONE = 0,
+ UWB_RSV_STATE_O_INITIATED,
+ UWB_RSV_STATE_O_PENDING,
+ UWB_RSV_STATE_O_MODIFIED,
+ UWB_RSV_STATE_O_ESTABLISHED,
+ UWB_RSV_STATE_O_TO_BE_MOVED,
+ UWB_RSV_STATE_O_MOVE_EXPANDING,
+ UWB_RSV_STATE_O_MOVE_COMBINING,
+ UWB_RSV_STATE_O_MOVE_REDUCING,
+ UWB_RSV_STATE_T_ACCEPTED,
+ UWB_RSV_STATE_T_DENIED,
+ UWB_RSV_STATE_T_CONFLICT,
+ UWB_RSV_STATE_T_PENDING,
+ UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
+ UWB_RSV_STATE_T_EXPANDING_CONFLICT,
+ UWB_RSV_STATE_T_EXPANDING_PENDING,
+ UWB_RSV_STATE_T_EXPANDING_DENIED,
+ UWB_RSV_STATE_T_RESIZED,
+
+ UWB_RSV_STATE_LAST,
+};
+
+enum uwb_rsv_target_type {
+ UWB_RSV_TARGET_DEV,
+ UWB_RSV_TARGET_DEVADDR,
+};
+
+/**
+ * struct uwb_rsv_target - the target of a reservation.
+ *
+ * Reservations unicast and targeted at a single device
+ * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
+ * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
+ */
+struct uwb_rsv_target {
+ enum uwb_rsv_target_type type;
+ union {
+ struct uwb_dev *dev;
+ struct uwb_dev_addr devaddr;
+ };
+};
+
+struct uwb_rsv_move {
+ struct uwb_mas_bm final_mas;
+ struct uwb_ie_drp *companion_drp_ie;
+ struct uwb_mas_bm companion_mas;
+};
+
+/*
+ * Number of streams reserved for reservations targeted at DevAddrs.
+ */
+#define UWB_NUM_GLOBAL_STREAMS 1
+
+typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
+
+/**
+ * struct uwb_rsv - a DRP reservation
+ *
+ * Data structure management:
+ *
+ * @rc: the radio controller this reservation is for
+ * (as target or owner)
+ * @rc_node: a list node for the RC
+ * @pal_node: a list node for the PAL
+ *
+ * Owner and target parameters:
+ *
+ * @owner: the UWB device owning this reservation
+ * @target: the target UWB device
+ * @type: reservation type
+ *
+ * Owner parameters:
+ *
+ * @max_mas: maxiumum number of MAS
+ * @min_mas: minimum number of MAS
+ * @sparsity: owner selected sparsity
+ * @is_multicast: true iff multicast
+ *
+ * @callback: callback function when the reservation completes
+ * @pal_priv: private data for the PAL making the reservation
+ *
+ * Reservation status:
+ *
+ * @status: negotiation status
+ * @stream: stream index allocated for this reservation
+ * @tiebreaker: conflict tiebreaker for this reservation
+ * @mas: reserved MAS
+ * @drp_ie: the DRP IE
+ * @ie_valid: true iff the DRP IE matches the reservation parameters
+ *
+ * DRP reservations are uniquely identified by the owner, target and
+ * stream index. However, when using a DevAddr as a target (e.g., for
+ * a WUSB cluster reservation) the responses may be received from
+ * devices with different DevAddrs. In this case, reservations are
+ * uniquely identified by just the stream index. A number of stream
+ * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
+ */
+struct uwb_rsv {
+ struct uwb_rc *rc;
+ struct list_head rc_node;
+ struct list_head pal_node;
+ struct kref kref;
+
+ struct uwb_dev *owner;
+ struct uwb_rsv_target target;
+ enum uwb_drp_type type;
+ int max_mas;
+ int min_mas;
+ int max_interval;
+ bool is_multicast;
+
+ uwb_rsv_cb_f callback;
+ void *pal_priv;
+
+ enum uwb_rsv_state state;
+ bool needs_release_companion_mas;
+ u8 stream;
+ u8 tiebreaker;
+ struct uwb_mas_bm mas;
+ struct uwb_ie_drp *drp_ie;
+ struct uwb_rsv_move mv;
+ bool ie_valid;
+ struct timer_list timer;
+ struct work_struct handle_timeout_work;
+};
+
+static const
+struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
+
+static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
+{
+ bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
+}
+
+/**
+ * struct uwb_drp_avail - a radio controller's view of MAS usage
+ * @global: MAS unused by neighbors (excluding reservations targeted
+ * or owned by the local radio controller) or the beaon period
+ * @local: MAS unused by local established reservations
+ * @pending: MAS unused by local pending reservations
+ * @ie: DRP Availability IE to be included in the beacon
+ * @ie_valid: true iff @ie is valid and does not need to regenerated from
+ * @global and @local
+ *
+ * Each radio controller maintains a view of MAS usage or
+ * availability. MAS available for a new reservation are determined
+ * from the intersection of @global, @local, and @pending.
+ *
+ * The radio controller must transmit a DRP Availability IE that's the
+ * intersection of @global and @local.
+ *
+ * A set bit indicates the MAS is unused and available.
+ *
+ * rc->rsvs_mutex should be held before accessing this data structure.
+ *
+ * [ECMA-368] section 17.4.3.
+ */
+struct uwb_drp_avail {
+ DECLARE_BITMAP(global, UWB_NUM_MAS);
+ DECLARE_BITMAP(local, UWB_NUM_MAS);
+ DECLARE_BITMAP(pending, UWB_NUM_MAS);
+ struct uwb_ie_drp_avail ie;
+ bool ie_valid;
+};
+
+struct uwb_drp_backoff_win {
+ u8 window;
+ u8 n;
+ int total_expired;
+ struct timer_list timer;
+ bool can_reserve_extra_mases;
+};
+
+const char *uwb_rsv_state_str(enum uwb_rsv_state state);
+const char *uwb_rsv_type_str(enum uwb_drp_type type);
+
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
+ void *pal_priv);
+void uwb_rsv_destroy(struct uwb_rsv *rsv);
+
+int uwb_rsv_establish(struct uwb_rsv *rsv);
+int uwb_rsv_modify(struct uwb_rsv *rsv,
+ int max_mas, int min_mas, int sparsity);
+void uwb_rsv_terminate(struct uwb_rsv *rsv);
+
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
+
+void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
+
+/**
+ * Radio Control Interface instance
+ *
+ *
+ * Life cycle rules: those of the UWB Device.
+ *
+ * @index: an index number for this radio controller, as used in the
+ * device name.
+ * @version: version of protocol supported by this device
+ * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
+ * @cmd: Backend implementation to execute commands; rw and call
+ * only with uwb_dev.dev.sem taken.
+ * @reset: Hardware reset of radio controller and any PAL controllers.
+ * @filter: Backend implementation to manipulate data to and from device
+ * to be compliant to specification assumed by driver (WHCI
+ * 0.95).
+ *
+ * uwb_dev.dev.mutex is used to execute commands and update
+ * the corresponding structures; can't use a spinlock
+ * because rc->cmd() can sleep.
+ * @ies: This is a dynamically allocated array cacheing the
+ * IEs (settable by the host) that the beacon of this
+ * radio controller is currently sending.
+ *
+ * In reality, we store here the full command we set to
+ * the radio controller (which is basically a command
+ * prefix followed by all the IEs the beacon currently
+ * contains). This way we don't have to realloc and
+ * memcpy when setting it.
+ *
+ * We set this up in uwb_rc_ie_setup(), where we alloc
+ * this struct, call get_ie() [so we know which IEs are
+ * currently being sent, if any].
+ *
+ * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
+ * amount used is given by sizeof(*ies) plus ies->wIELength
+ * (which is a little endian quantity all the time).
+ * @ies_mutex: protect the IE cache
+ * @dbg: information for the debug interface
+ */
+struct uwb_rc {
+ struct uwb_dev uwb_dev;
+ int index;
+ u16 version;
+
+ struct module *owner;
+ void *priv;
+ int (*start)(struct uwb_rc *rc);
+ void (*stop)(struct uwb_rc *rc);
+ int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
+ int (*reset)(struct uwb_rc *rc);
+ int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
+ int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
+ size_t *, size_t *);
+
+ spinlock_t neh_lock; /* protects neh_* and ctx_* */
+ struct list_head neh_list; /* Open NE handles */
+ unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
+ u8 ctx_roll;
+
+ int beaconing; /* Beaconing state [channel number] */
+ int beaconing_forced;
+ int scanning;
+ enum uwb_scan_type scan_type:3;
+ unsigned ready:1;
+ struct uwb_notifs_chain notifs_chain;
+ struct uwb_beca uwb_beca;
+
+ struct uwbd uwbd;
+
+ struct uwb_drp_backoff_win bow;
+ struct uwb_drp_avail drp_avail;
+ struct list_head reservations;
+ struct list_head cnflt_alien_list;
+ struct uwb_mas_bm cnflt_alien_bitmap;
+ struct mutex rsvs_mutex;
+ spinlock_t rsvs_lock;
+ struct workqueue_struct *rsv_workq;
+
+ struct delayed_work rsv_update_work;
+ struct delayed_work rsv_alien_bp_work;
+ int set_drp_ie_pending;
+ struct mutex ies_mutex;
+ struct uwb_rc_cmd_set_ie *ies;
+ size_t ies_capacity;
+
+ struct list_head pals;
+ int active_pals;
+
+ struct uwb_dbg *dbg;
+};
+
+
+/**
+ * struct uwb_pal - a UWB PAL
+ * @name: descriptive name for this PAL (wusbhc, wlp, etc.).
+ * @device: a device for the PAL. Used to link the PAL and the radio
+ * controller in sysfs.
+ * @rc: the radio controller the PAL uses.
+ * @channel_changed: called when the channel used by the radio changes.
+ * A channel of -1 means the channel has been stopped.
+ * @new_rsv: called when a peer requests a reservation (may be NULL if
+ * the PAL cannot accept reservation requests).
+ * @channel: channel being used by the PAL; 0 if the PAL isn't using
+ * the radio; -1 if the PAL wishes to use the radio but
+ * cannot.
+ * @debugfs_dir: a debugfs directory which the PAL can use for its own
+ * debugfs files.
+ *
+ * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
+ * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
+ *
+ * The PALs using a radio controller must register themselves to
+ * permit the UWB stack to coordinate usage of the radio between the
+ * various PALs or to allow PALs to response to certain requests from
+ * peers.
+ *
+ * A struct uwb_pal should be embedded in a containing structure
+ * belonging to the PAL and initialized with uwb_pal_init()). Fields
+ * should be set appropriately by the PAL before registering the PAL
+ * with uwb_pal_register().
+ */
+struct uwb_pal {
+ struct list_head node;
+ const char *name;
+ struct device *device;
+ struct uwb_rc *rc;
+
+ void (*channel_changed)(struct uwb_pal *pal, int channel);
+ void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
+
+ int channel;
+ struct dentry *debugfs_dir;
+};
+
+void uwb_pal_init(struct uwb_pal *pal);
+int uwb_pal_register(struct uwb_pal *pal);
+void uwb_pal_unregister(struct uwb_pal *pal);
+
+int uwb_radio_start(struct uwb_pal *pal);
+void uwb_radio_stop(struct uwb_pal *pal);
+
+/*
+ * General public API
+ *
+ * This API can be used by UWB device drivers or by those implementing
+ * UWB Radio Controllers
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+ const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
+static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
+{
+ get_device(&uwb_dev->dev);
+}
+static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
+{
+ put_device(&uwb_dev->dev);
+}
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
+
+/**
+ * Callback function for 'uwb_{dev,rc}_foreach()'.
+ *
+ * @dev: Linux device instance
+ * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
+ * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
+ *
+ * @returns: 0 to continue the iterations, any other val to stop
+ * iterating and return the value to the caller of
+ * _foreach().
+ */
+typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
+
+struct uwb_rc *uwb_rc_alloc(void);
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
+void uwb_rc_put(struct uwb_rc *rc);
+
+typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
+ struct uwb_rceb *reply, ssize_t reply_size);
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg);
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ struct uwb_rceb *reply, size_t reply_size);
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ struct uwb_rceb **preply);
+
+size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
+
+int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
+int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
+int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
+int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
+int __uwb_mac_addr_assigned_check(struct device *, void *);
+int __uwb_dev_addr_assigned_check(struct device *, void *);
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
+ const struct uwb_dev_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 0);
+}
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
+ const struct uwb_mac_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 1);
+}
+
+/* @returns 0 if device addresses @addr2 and @addr1 are equal */
+static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
+ const struct uwb_dev_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
+static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
+ const struct uwb_mac_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns !0 if a MAC @addr is a broadcast address */
+static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr bcast = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ return !uwb_mac_addr_cmp(addr, &bcast);
+}
+
+/* @returns !0 if a MAC @addr is all zeroes*/
+static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr unset = {
+ .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+ };
+ return !uwb_mac_addr_cmp(addr, &unset);
+}
+
+/* @returns !0 if the address is in use. */
+static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
+ struct uwb_dev_addr *addr)
+{
+ return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
+}
+
+/*
+ * UWB Radio Controller API
+ *
+ * This API is used (in addition to the general API) to implement UWB
+ * Radio Controllers.
+ */
+void uwb_rc_init(struct uwb_rc *);
+int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
+void uwb_rc_rm(struct uwb_rc *);
+void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
+void uwb_rc_neh_error(struct uwb_rc *, int);
+void uwb_rc_reset_all(struct uwb_rc *rc);
+void uwb_rc_pre_reset(struct uwb_rc *rc);
+int uwb_rc_post_reset(struct uwb_rc *rc);
+
+/**
+ * uwb_rsv_is_owner - is the owner of this reservation the RC?
+ * @rsv: the reservation
+ */
+static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
+{
+ return rsv->owner == &rsv->rc->uwb_dev;
+}
+
+/**
+ * enum uwb_notifs - UWB events that can be passed to any listeners
+ * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
+ * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
+ *
+ * Higher layers can register callback functions with the radio
+ * controller using uwb_notifs_register(). The radio controller
+ * maintains a list of all registered handlers and will notify all
+ * nodes when an event occurs.
+ */
+enum uwb_notifs {
+ UWB_NOTIF_ONAIR,
+ UWB_NOTIF_OFFAIR,
+};
+
+/* Callback function registered with UWB */
+struct uwb_notifs_handler {
+ struct list_head list_node;
+ void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
+ void *data;
+};
+
+int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
+int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
+
+
+/**
+ * UWB radio controller Event Size Entry (for creating entry tables)
+ *
+ * WUSB and WHCI define events and notifications, and they might have
+ * fixed or variable size.
+ *
+ * Each event/notification has a size which is not necessarily known
+ * in advance based on the event code. As well, vendor specific
+ * events/notifications will have a size impossible to determine
+ * unless we know about the device's specific details.
+ *
+ * It was way too smart of the spec writers not to think that it would
+ * be impossible for a generic driver to skip over vendor specific
+ * events/notifications if there are no LENGTH fields in the HEADER of
+ * each message...the transaction size cannot be counted on as the
+ * spec does not forbid to pack more than one event in a single
+ * transaction.
+ *
+ * Thus, we guess sizes with tables (or for events, when you know the
+ * size ahead of time you can use uwb_rc_neh_extra_size*()). We
+ * register tables with the known events and their sizes, and then we
+ * traverse those tables. For those with variable length, we provide a
+ * way to lookup the size inside the event/notification's
+ * payload. This allows device-specific event size tables to be
+ * registered.
+ *
+ * @size: Size of the payload
+ *
+ * @offset: if != 0, at offset @offset-1 starts a field with a length
+ * that has to be added to @size. The format of the field is
+ * given by @type.
+ *
+ * @type: Type and length of the offset field. Most common is LE 16
+ * bits (that's why that is zero); others are there mostly to
+ * cover for bugs and weirdos.
+ */
+struct uwb_est_entry {
+ size_t size;
+ unsigned offset;
+ enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
+};
+
+int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+ size_t len);
+
+/* -- Misc */
+
+enum {
+ EDC_MAX_ERRORS = 10,
+ EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+ unsigned long timestart;
+ u16 errorcount;
+};
+
+static inline
+void edc_init(struct edc *edc)
+{
+ edc->timestart = jiffies;
+}
+
+/* Called when an error occurred.
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurrences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
+ */
+static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
+{
+ unsigned long now;
+
+ now = jiffies;
+ if (now - err_hist->timestart > timeframe) {
+ err_hist->errorcount = 1;
+ err_hist->timestart = now;
+ } else if (++err_hist->errorcount > max_err) {
+ err_hist->errorcount = 0;
+ err_hist->timestart = now;
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Information Element handling */
+
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
+int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
+
+/*
+ * Transmission statistics
+ *
+ * UWB uses LQI and RSSI (one byte values) for reporting radio signal
+ * strength and line quality indication. We do quick and dirty
+ * averages of those. They are signed values, btw.
+ *
+ * For 8 bit quantities, we keep the min, the max, an accumulator
+ * (@sigma) and a # of samples. When @samples gets to 255, we compute
+ * the average (@sigma / @samples), place it in @sigma and reset
+ * @samples to 1 (so we use it as the first sample).
+ *
+ * Now, statistically speaking, probably I am kicking the kidneys of
+ * some books I have in my shelves collecting dust, but I just want to
+ * get an approx, not the Nobel.
+ *
+ * LOCKING: there is no locking per se, but we try to keep a lockless
+ * schema. Only _add_samples() modifies the values--as long as you
+ * have other locking on top that makes sure that no two calls of
+ * _add_sample() happen at the same time, then we are fine. Now, for
+ * resetting the values we just set @samples to 0 and that makes the
+ * next _add_sample() to start with defaults. Reading the values in
+ * _show() currently can race, so you need to make sure the calls are
+ * under the same lock that protects calls to _add_sample(). FIXME:
+ * currently unlocked (It is not ultraprecise but does the trick. Bite
+ * me).
+ */
+struct stats {
+ s8 min, max;
+ s16 sigma;
+ atomic_t samples;
+};
+
+static inline
+void stats_init(struct stats *stats)
+{
+ atomic_set(&stats->samples, 0);
+ wmb();
+}
+
+static inline
+void stats_add_sample(struct stats *stats, s8 sample)
+{
+ s8 min, max;
+ s16 sigma;
+ unsigned samples = atomic_read(&stats->samples);
+ if (samples == 0) { /* it was zero before, so we initialize */
+ min = 127;
+ max = -128;
+ sigma = 0;
+ } else {
+ min = stats->min;
+ max = stats->max;
+ sigma = stats->sigma;
+ }
+
+ if (sample < min) /* compute new values */
+ min = sample;
+ else if (sample > max)
+ max = sample;
+ sigma += sample;
+
+ stats->min = min; /* commit */
+ stats->max = max;
+ stats->sigma = sigma;
+ if (atomic_add_return(1, &stats->samples) > 255) {
+ /* wrapped around! reset */
+ stats->sigma = sigma / 256;
+ atomic_set(&stats->samples, 1);
+ }
+}
+
+static inline ssize_t stats_show(struct stats *stats, char *buf)
+{
+ int min, max, avg;
+ int samples = atomic_read(&stats->samples);
+ if (samples == 0)
+ min = max = avg = 0;
+ else {
+ min = stats->min;
+ max = stats->max;
+ avg = stats->sigma / samples;
+ }
+ return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
+}
+
+static inline ssize_t stats_store(struct stats *stats, const char *buf,
+ size_t size)
+{
+ stats_init(stats);
+ return size;
+}
+
+#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/drivers/uwb/uwbd.c b/drivers/staging/uwb/uwbd.c
index dc5c743d4f5f..dc5c743d4f5f 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/staging/uwb/uwbd.c
diff --git a/drivers/uwb/whc-rc.c b/drivers/staging/uwb/whc-rc.c
index 22397f70dee2..34020ed351ab 100644
--- a/drivers/uwb/whc-rc.c
+++ b/drivers/staging/uwb/whc-rc.c
@@ -33,9 +33,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/uwb.h>
-#include <linux/uwb/whci.h>
-#include <linux/uwb/umc.h>
+#include "uwb.h"
+#include "include/whci.h"
+#include "include/umc.h"
#include "uwb-internal.h"
diff --git a/drivers/uwb/whci.c b/drivers/staging/uwb/whci.c
index be8a8b8e857b..a8832f64d708 100644
--- a/drivers/uwb/whci.c
+++ b/drivers/staging/uwb/whci.c
@@ -10,8 +10,8 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/uwb/whci.h>
-#include <linux/uwb/umc.h>
+#include "include/whci.h"
+#include "include/umc.h"
struct whci_card {
struct pci_dev *pci;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index ea54cc27e645..d4d1e44b16b2 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -75,34 +75,27 @@ static const struct v4l2_fract
/* video formats */
static struct mmal_fmt formats[] = {
{
- .name = "4:2:0, planar, YUV",
.fourcc = V4L2_PIX_FMT_YUV420,
- .flags = 0,
.mmal = MMAL_ENCODING_I420,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = 1,
}, {
- .name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
- .flags = 0,
.mmal = MMAL_ENCODING_YUYV,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = 0,
}, {
- .name = "RGB24 (LE)",
.fourcc = V4L2_PIX_FMT_RGB24,
- .flags = 0,
.mmal = MMAL_ENCODING_RGB24,
.depth = 24,
.mmal_component = COMP_CAMERA,
.ybbp = 3,
.remove_padding = 0,
}, {
- .name = "JPEG",
.fourcc = V4L2_PIX_FMT_JPEG,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_JPEG,
@@ -111,7 +104,6 @@ static struct mmal_fmt formats[] = {
.ybbp = 0,
.remove_padding = 0,
}, {
- .name = "H264",
.fourcc = V4L2_PIX_FMT_H264,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_H264,
@@ -120,7 +112,6 @@ static struct mmal_fmt formats[] = {
.ybbp = 0,
.remove_padding = 0,
}, {
- .name = "MJPEG",
.fourcc = V4L2_PIX_FMT_MJPEG,
.flags = V4L2_FMT_FLAG_COMPRESSED,
.mmal = MMAL_ENCODING_MJPEG,
@@ -129,72 +120,56 @@ static struct mmal_fmt formats[] = {
.ybbp = 0,
.remove_padding = 0,
}, {
- .name = "4:2:2, packed, YVYU",
.fourcc = V4L2_PIX_FMT_YVYU,
- .flags = 0,
.mmal = MMAL_ENCODING_YVYU,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = 0,
}, {
- .name = "4:2:2, packed, VYUY",
.fourcc = V4L2_PIX_FMT_VYUY,
- .flags = 0,
.mmal = MMAL_ENCODING_VYUY,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = 0,
}, {
- .name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
- .flags = 0,
.mmal = MMAL_ENCODING_UYVY,
.depth = 16,
.mmal_component = COMP_CAMERA,
.ybbp = 2,
.remove_padding = 0,
}, {
- .name = "4:2:0, planar, NV12",
.fourcc = V4L2_PIX_FMT_NV12,
- .flags = 0,
.mmal = MMAL_ENCODING_NV12,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = 1,
}, {
- .name = "RGB24 (BE)",
.fourcc = V4L2_PIX_FMT_BGR24,
- .flags = 0,
.mmal = MMAL_ENCODING_BGR24,
.depth = 24,
.mmal_component = COMP_CAMERA,
.ybbp = 3,
.remove_padding = 0,
}, {
- .name = "4:2:0, planar, YVU",
.fourcc = V4L2_PIX_FMT_YVU420,
- .flags = 0,
.mmal = MMAL_ENCODING_YV12,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = 1,
}, {
- .name = "4:2:0, planar, NV21",
.fourcc = V4L2_PIX_FMT_NV21,
- .flags = 0,
.mmal = MMAL_ENCODING_NV21,
.depth = 12,
.mmal_component = COMP_CAMERA,
.ybbp = 1,
.remove_padding = 1,
}, {
- .name = "RGB32 (BE)",
.fourcc = V4L2_PIX_FMT_BGR32,
- .flags = 0,
.mmal = MMAL_ENCODING_BGRA,
.depth = 32,
.mmal_component = COMP_CAMERA,
@@ -716,9 +691,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
fmt = &formats[f->index];
- strlcpy((char *)f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
- f->flags = fmt->flags;
return 0;
}
@@ -919,9 +892,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
fmt = &formats[f->index];
- strlcpy((char *)f->description, fmt->name, sizeof(f->description));
f->pixelformat = fmt->fourcc;
- f->flags = fmt->flags;
return 0;
}
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index 6f56c517d850..ff5398737b4a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
@@ -26,7 +26,6 @@ struct mmal_msg_context;
/* mapping between v4l and mmal video modes */
struct mmal_fmt {
- char *name;
u32 fourcc; /* v4l2 format id */
int flags; /* v4l2 flags field */
u32 mmal;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index f738e7f99e96..47897e81ec58 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -56,7 +56,7 @@ struct vchiq_mmal_port {
/* component port belongs to, allows simple deref */
struct vchiq_mmal_component *component;
- struct vchiq_mmal_port *connected; /* port conencted to */
+ struct vchiq_mmal_port *connected; /* port connected to */
/* buffer info */
struct vchiq_mmal_port_buffer minimum_buffer;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 61c69f353cdb..8dc730cfe7a6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -141,10 +141,8 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return PTR_ERR(g_regs);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "failed to get IRQ\n");
+ if (irq <= 0)
return irq;
- }
err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
"VCHIQ doorbell", state);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index cc4383d1ec3e..b1595b13dea8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -2824,7 +2824,6 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
- int local_uc, local_entity_uc;
if (!arm_state)
goto out;
@@ -2849,8 +2848,8 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
ret = VCHIQ_ERROR;
goto unlock;
}
- local_uc = --arm_state->videocore_use_count;
- local_entity_uc = --(*entity_uc);
+ --arm_state->videocore_use_count;
+ --(*entity_uc);
if (!vchiq_videocore_wanted(state)) {
if (vchiq_platform_use_suspend_timer() &&
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 183f5cf887e0..56a23a297fa4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -3322,13 +3322,13 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
shared->tx_pos, shared->slot_queue_recycle);
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Slots claimed:");
vchiq_dump(dump_context, buf, len + 1);
@@ -3336,7 +3336,7 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_slot_info slot_info =
*SLOT_INFO_FROM_INDEX(state, i);
if (slot_info.use_count != slot_info.release_count) {
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" %d: %d/%d", i, slot_info.use_count,
slot_info.release_count);
vchiq_dump(dump_context, buf, len + 1);
@@ -3344,7 +3344,7 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
- len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
+ len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
debug_names[i], shared->debug[i], shared->debug[i]);
vchiq_dump(dump_context, buf, len + 1);
}
@@ -3357,11 +3357,11 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
int len;
int i;
- len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
+ len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
conn_state_names[state->conn_state]);
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
state->local->tx_pos,
state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
@@ -3369,13 +3369,13 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Version: %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
vchiq_dump(dump_context, buf, len + 1);
if (VCHIQ_ENABLE_STATS) {
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
"error_count=%d",
state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
@@ -3383,7 +3383,7 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
vchiq_dump(dump_context, buf, len + 1);
}
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Slots: %d available (%d data), %d recyclable, %d stalls "
"(%d data)",
((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
@@ -3416,7 +3416,7 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
+ len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
service->ref_count - 1); /*Don't include the lock just taken*/
@@ -3428,17 +3428,17 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
int tx_pending, rx_pending;
if (service->remoteport != VCHIQ_PORT_FREE) {
- int len2 = snprintf(remoteport, sizeof(remoteport),
+ int len2 = scnprintf(remoteport, sizeof(remoteport),
"%u", service->remoteport);
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
- snprintf(remoteport + len2,
+ scnprintf(remoteport + len2,
sizeof(remoteport) - len2,
" (client %x)", service->client_id);
} else
strcpy(remoteport, "n/a");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
VCHIQ_FOURCC_AS_4CHARS(fourcc),
remoteport,
@@ -3455,7 +3455,7 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
rx_pending = service->bulk_rx.local_insert -
service->bulk_rx.remote_insert;
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Bulk: tx_pending=%d (size %d),"
" rx_pending=%d (size %d)",
tx_pending,
@@ -3468,7 +3468,7 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
if (VCHIQ_ENABLE_STATS) {
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Ctrl: tx_count=%d, tx_bytes=%llu, "
"rx_count=%d, rx_bytes=%llu",
service->stats.ctrl_tx_count,
@@ -3477,7 +3477,7 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.ctrl_rx_bytes);
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" Bulk: tx_count=%d, tx_bytes=%llu, "
"rx_count=%d, rx_bytes=%llu",
service->stats.bulk_tx_count,
@@ -3486,7 +3486,7 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.bulk_rx_bytes);
vchiq_dump(dump_context, buf, len + 1);
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
" %d quota stalls, %d slot stalls, "
"%d bulk stalls, %d aborted, %d errors",
service->stats.quota_stalls,
@@ -3562,9 +3562,9 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
for (offset = 0; offset < 16; offset++) {
if (offset < num_bytes)
- s += snprintf(s, 4, "%02x ", mem[offset]);
+ s += scnprintf(s, 4, "%02x ", mem[offset]);
else
- s += snprintf(s, 4, " ");
+ s += scnprintf(s, 4, " ");
}
for (offset = 0; offset < 16; offset++) {
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9def0748ffee..4e9cfacf75f2 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -287,12 +287,12 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
buf->duration_a = vnt_get_duration_le(priv,
tx_context->pkt_type, need_ack);
buf->duration_b = vnt_get_duration_le(priv,
- PK_TYPE_11B, need_ack);
+ PK_TYPE_11B, need_ack);
}
buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
buf->time_stamp_off_b = vnt_time_stamp_off(priv,
- priv->top_cck_basic_rate);
+ priv->top_cck_basic_rate);
tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
@@ -325,7 +325,7 @@ static u16 vnt_rxtx_datahead_g_fb(struct vnt_usb_send_context *tx_context,
buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
buf->time_stamp_off_b = vnt_time_stamp_off(priv,
- priv->top_cck_basic_rate);
+ priv->top_cck_basic_rate);
tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
@@ -655,7 +655,7 @@ static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
u8 need_ack = tx_context->need_ack;
buf->rrv_time = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate, need_ack);
+ frame_len, current_rate, need_ack);
if (need_mic)
head = &tx_head->tx_ab.tx.mic.head;
@@ -1036,7 +1036,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
/* Get Duration and TimeStampOff */
short_head->duration = vnt_get_duration_le(priv,
- PK_TYPE_11B, false);
+ PK_TYPE_11B, false);
short_head->time_stamp_off =
vnt_time_stamp_off(priv, current_rate);
}
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index ff351a7a0876..d3304df6bd53 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -216,7 +216,7 @@ static void vnt_submit_rx_urb_complete(struct urb *urb)
}
urb->transfer_buffer = skb_put(rcb->skb,
- skb_tailroom(rcb->skb));
+ skb_tailroom(rcb->skb));
}
if (usb_submit_urb(urb, GFP_ATOMIC)) {
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
index 4f7d1c2be4d0..da5235950a70 100644
--- a/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
+++ b/drivers/staging/wilc1000/microchip,wilc1000,sdio.txt
@@ -10,7 +10,9 @@ Required properties:
Optional:
- bus-width : Number of data lines wired up the slot. Default 1 bit.
-
+- rtc_clk : Clock connected on the rtc clock line. Must be assigned
+ a frequency with assigned-clocks property, and must be
+ connected to a clock provider.
Examples:
mmc1: mmc@fc000000 {
@@ -24,6 +26,10 @@ mmc1: mmc@fc000000 {
wilc_sdio@0 {
compatible = "microchip,wilc1000-sdio";
irq-gpios = <&pioC 27 0>;
+ clocks = <&pck1>;
+ clock-names = "rtc_clk";
+ assigned-clocks = <&pck1>;
+ assigned-clock-rates = <32768>;
status = "okay";
reg = <0>;
bus-width = <4>;
diff --git a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
index 87db87b2d901..34236932dbb6 100644
--- a/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
+++ b/drivers/staging/wilc1000/microchip,wilc1000,spi.txt
@@ -9,6 +9,10 @@ Required properties:
- reg : Chip select address of device
- irq-gpios : Connect to a host IRQ
+Optional:
+- rtc_clk : Clock connected on the rtc clock line. Must be assigned
+ a frequency with assigned-clocks property, and must be
+ connected to a clock provider.
Examples:
@@ -21,6 +25,10 @@ spi1: spi@fc018000 {
spi-max-frequency = <48000000>;
reg = <0>;
irq-gpios = <&pioC 27 0>;
+ clocks = <&pck1>;
+ clock-names = "rtc_clk";
+ assigned-clocks = <&pck1>;
+ assigned-clock-rates = <32768>;
status = "okay";
};
};
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c
index 9345cabe3c93..f2b7d5a1be17 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/wilc_hif.c
@@ -248,7 +248,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
goto error;
}
- if (vif->obtaining_ip || vif->connecting) {
+ if (vif->connecting) {
netdev_err(vif->ndev, "Don't do obss scan\n");
result = -EBUSY;
goto error;
@@ -679,13 +679,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
if (mac_status == WILC_MAC_STATUS_CONNECTED &&
conn_info->status == WLAN_STATUS_SUCCESS) {
ether_addr_copy(hif_drv->assoc_bssid, conn_info->bssid);
- wilc_set_power_mgmt(vif, 0, 0);
-
hif_drv->hif_state = HOST_IF_CONNECTED;
-
- vif->obtaining_ip = true;
- mod_timer(&vif->during_ip_timer,
- jiffies + msecs_to_jiffies(10000));
} else {
hif_drv->hif_state = HOST_IF_IDLE;
}
@@ -708,15 +702,11 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
handle_scan_done(vif, SCAN_EVENT_ABORTED);
}
- if (hif_drv->conn_info.conn_result) {
- vif->obtaining_ip = false;
- wilc_set_power_mgmt(vif, 0, 0);
-
+ if (hif_drv->conn_info.conn_result)
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
0, hif_drv->conn_info.arg);
- } else {
+ else
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- }
eth_zero_addr(hif_drv->assoc_bssid);
@@ -772,9 +762,6 @@ int wilc_disconnect(struct wilc_vif *vif)
wid.val = (s8 *)&dummy_reason_code;
wid.size = sizeof(char);
- vif->obtaining_ip = false;
- wilc_set_power_mgmt(vif, 0, 0);
-
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result) {
netdev_err(vif->ndev, "Failed to send disconnect\n");
@@ -811,15 +798,6 @@ int wilc_disconnect(struct wilc_vif *vif)
return 0;
}
-void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
-{
- if (!vif->hif_drv)
- return;
- if (vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP ||
- vif->hif_drv->hif_state == HOST_IF_CONNECTING)
- wilc_disconnect(vif);
-}
-
int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
{
struct wid wid_list[5];
@@ -924,7 +902,7 @@ static int handle_remain_on_chan(struct wilc_vif *vif,
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
return -EBUSY;
- if (vif->obtaining_ip || vif->connecting)
+ if (vif->connecting)
return -EBUSY;
remain_on_chan_flag = true;
@@ -1069,13 +1047,9 @@ static void handle_scan_timer(struct work_struct *work)
static void handle_scan_complete(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
- struct wilc *wilc = msg->vif->wilc;
del_timer(&msg->vif->hif_drv->scan_timer);
- if (!wilc_wlan_get_num_conn_ifcs(wilc))
- wilc_chip_sleep_manually(wilc);
-
handle_scan_done(msg->vif, SCAN_EVENT_DONE);
kfree(msg);
@@ -1426,18 +1400,14 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
return result;
}
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
- u8 ifc_id)
+int wilc_set_operation_mode(struct wilc_vif *vif, int index, u8 mode,
+ u8 ifc_id)
{
struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
int result;
struct wilc_drv_handler drv;
- if (!hif_drv)
- return -EFAULT;
-
- wid.id = WID_SET_DRV_HANDLER;
+ wid.id = WID_SET_OPERATION_MODE;
wid.type = WID_STR;
wid.size = sizeof(drv);
wid.val = (u8 *)&drv;
@@ -1452,26 +1422,6 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
return result;
}
-int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
-{
- struct wid wid;
- struct wilc_op_mode op_mode;
- int result;
-
- wid.id = WID_SET_OPERATION_MODE;
- wid.type = WID_INT;
- wid.size = sizeof(op_mode);
- wid.val = (u8 *)&op_mode;
-
- op_mode.mode = cpu_to_le32(mode);
-
- result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
- if (result)
- netdev_err(vif->ndev, "Failed to set operation mode\n");
-
- return result;
-}
-
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, u32 *out_val)
{
struct wid wid;
@@ -1610,7 +1560,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
*hif_drv_handler = hif_drv;
vif->hif_drv = hif_drv;
- vif->obtaining_ip = false;
if (wilc->clients_count == 0)
mutex_init(&wilc->deinit_lock);
@@ -1648,8 +1597,6 @@ int wilc_deinit(struct wilc_vif *vif)
del_timer_sync(&vif->periodic_rssi);
del_timer_sync(&hif_drv->remain_on_ch_timer);
- wilc_set_wfi_drv_handler(vif, 0, 0, 0);
-
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
hif_drv->usr_scan_req.arg);
@@ -2024,9 +1971,6 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
int result;
s8 power_mode;
- if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
- return 0;
-
if (enabled)
power_mode = WILC_FW_MIN_FAST_PS;
else
diff --git a/drivers/staging/wilc1000/wilc_hif.h b/drivers/staging/wilc1000/wilc_hif.h
index be1d2497cde9..ac5fe57f872b 100644
--- a/drivers/staging/wilc1000/wilc_hif.h
+++ b/drivers/staging/wilc1000/wilc_hif.h
@@ -219,11 +219,9 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie,
void *user_arg);
int wilc_listen_state_expired(struct wilc_vif *vif, u64 cookie);
void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
- u8 ifc_id);
-int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode);
+int wilc_set_operation_mode(struct wilc_vif *vif, int index, u8 mode,
+ u8 ifc_id);
int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
-void wilc_resolve_disconnect_aberration(struct wilc_vif *vif);
int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/wilc_mon.c
index 7d7933d40924..d6f14f69ad64 100644
--- a/drivers/staging/wilc1000/wilc_mon.c
+++ b/drivers/staging/wilc1000/wilc_mon.c
@@ -35,8 +35,7 @@ void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size)
return;
/* Get WILC header */
- memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
- le32_to_cpus(&header);
+ header = get_unaligned_le32(buff - HOST_HDR_OFFSET);
/*
* The packet offset field contain info about what type of management
* the frame we are dealing with and ack status
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/wilc_netdev.c
index 565e2b5d0616..508acb8bb089 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/wilc_netdev.c
@@ -11,6 +11,7 @@
#include <linux/inetdevice.h>
#include "wilc_wfi_cfgoperations.h"
+#include "wilc_wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
@@ -59,7 +60,7 @@ static int init_irq(struct net_device *dev)
ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine,
isr_bh_routine,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"WILC_IRQ", dev);
if (ret < 0)
netdev_err(dev, "Failed to request IRQ\n");
@@ -474,7 +475,7 @@ static void wilc_wlan_deinitialize(struct net_device *dev)
wlan_deinitialize_threads(dev);
deinit_irq(dev);
- wilc_wlan_stop(wl);
+ wilc_wlan_stop(wl, vif);
wilc_wlan_cleanup(dev);
wlan_deinit_locks(dev);
@@ -503,12 +504,6 @@ static int wlan_initialize_threads(struct net_device *dev)
return 0;
}
-static int dev_state_ev_handler(struct notifier_block *this,
- unsigned long event, void *ptr);
-static struct notifier_block g_dev_notifier = {
- .notifier_call = dev_state_ev_handler
-};
-
static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
{
int ret = 0;
@@ -574,12 +569,11 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
ret = -EIO;
goto fail_fw_start;
}
- register_inetaddr_notifier(&g_dev_notifier);
wl->initialized = true;
return 0;
fail_fw_start:
- wilc_wlan_stop(wl);
+ wilc_wlan_stop(wl, vif);
fail_irq_enable:
if (!wl->dev_irq_num &&
@@ -632,10 +626,8 @@ static int wilc_mac_open(struct net_device *ndev)
return ret;
}
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif), vif->iftype,
- vif->idx);
- wilc_set_operation_mode(vif, vif->iftype);
-
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+ vif->idx);
wilc_get_mac_address(vif, mac_add);
netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
ether_addr_copy(ndev->dev_addr, mac_add);
@@ -780,7 +772,6 @@ static int wilc_mac_close(struct net_device *ndev)
if (wl->open_ifcs == 0) {
netdev_dbg(ndev, "Deinitializing wilc1000\n");
wl->close = 1;
- unregister_inetaddr_notifier(&g_dev_notifier);
wilc_wlan_deinitialize(ndev);
}
@@ -863,63 +854,6 @@ static const struct net_device_ops wilc_netdev_ops = {
.ndo_set_rx_mode = wilc_set_multicast_list,
};
-static int dev_state_ev_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct in_ifaddr *dev_iface = ptr;
- struct wilc_priv *priv;
- struct host_if_drv *hif_drv;
- struct net_device *dev;
- struct wilc_vif *vif;
-
- if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev)
- return NOTIFY_DONE;
-
- dev = (struct net_device *)dev_iface->ifa_dev->dev;
- if (dev->netdev_ops != &wilc_netdev_ops)
- return NOTIFY_DONE;
-
- if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
- return NOTIFY_DONE;
-
- vif = netdev_priv(dev);
- priv = &vif->priv;
-
- hif_drv = (struct host_if_drv *)priv->hif_drv;
-
- switch (event) {
- case NETDEV_UP:
- if (vif->iftype == WILC_STATION_MODE ||
- vif->iftype == WILC_CLIENT_MODE) {
- hif_drv->ifc_up = 1;
- vif->obtaining_ip = false;
- del_timer(&vif->during_ip_timer);
- }
-
- if (vif->wilc->enable_ps)
- wilc_set_power_mgmt(vif, 1, 0);
-
- break;
-
- case NETDEV_DOWN:
- if (vif->iftype == WILC_STATION_MODE ||
- vif->iftype == WILC_CLIENT_MODE) {
- hif_drv->ifc_up = 0;
- vif->obtaining_ip = false;
- wilc_set_power_mgmt(vif, 0, 0);
- }
-
- wilc_resolve_disconnect_aberration(vif);
-
- break;
-
- default:
- break;
- }
-
- return NOTIFY_DONE;
-}
-
void wilc_netdev_cleanup(struct wilc *wilc)
{
int i;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 4c1c81fed11f..c787c5da8f2b 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -4,6 +4,7 @@
* All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/host.h>
@@ -151,6 +152,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->dev = &func->dev;
wilc->gpio_irq = gpio;
+ wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc_clk");
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
}
@@ -162,6 +169,10 @@ static void wilc_sdio_remove(struct sdio_func *func)
/* free the GPIO in module remove */
if (wilc->gpio_irq)
gpiod_put(wilc->gpio_irq);
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
wilc_netdev_cleanup(wilc);
}
@@ -193,9 +204,10 @@ static int wilc_sdio_suspend(struct device *dev)
dev_info(dev, "sdio suspend\n");
chip_wakeup(wilc);
- if (!wilc->suspend_event) {
- wilc_chip_sleep_manually(wilc);
- } else {
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
+ if (wilc->suspend_event) {
host_sleep_notify(wilc);
chip_allow_sleep(wilc);
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index d72fdd333050..22f21831649b 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -70,15 +70,6 @@ struct wilc_p2p_mgmt_data {
static const u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
static const u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
-#define WILC_IP_TIMEOUT_MS 15000
-
-static void clear_during_ip(struct timer_list *t)
-{
- struct wilc_vif *vif = from_timer(vif, t, during_ip_timer);
-
- vif->obtaining_ip = false;
-}
-
static void cfg_scan_result(enum scan_event scan_event,
struct wilc_rcvd_net_info *info, void *user_void)
{
@@ -176,7 +167,6 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
u16 reason = 0;
- vif->obtaining_ip = false;
priv->p2p.local_random = 0x01;
priv->p2p.recv_random = 0x00;
priv->p2p.is_wilc_ie = false;
@@ -1038,8 +1028,7 @@ void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
s32 freq;
__le16 fc;
- memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
- le32_to_cpus(&header);
+ header = get_unaligned_le32(buff - HOST_HDR_OFFSET);
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -1404,8 +1393,7 @@ static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
if (!priv->hif_drv)
return -EIO;
- if (vif->wilc->enable_ps)
- wilc_set_power_mgmt(vif, enabled, timeout);
+ wilc_set_power_mgmt(vif, enabled, timeout);
return 0;
}
@@ -1421,8 +1409,6 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
priv->p2p.local_random = 0x01;
priv->p2p.recv_random = 0x00;
priv->p2p.is_wilc_ie = false;
- vif->obtaining_ip = false;
- del_timer(&vif->during_ip_timer);
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -1433,13 +1419,11 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)
wilc_wfi_deinit_mon_interface(wl, true);
vif->iftype = WILC_STATION_MODE;
- wilc_set_operation_mode(vif, WILC_STATION_MODE);
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
memset(priv->assoc_stainfo.sta_associated_bss, 0,
WILC_MAX_NUM_STA * ETH_ALEN);
-
- wl->enable_ps = true;
- wilc_set_power_mgmt(vif, 1, 0);
break;
case NL80211_IFTYPE_P2P_CLIENT:
@@ -1448,37 +1432,26 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
priv->wdev.iftype = type;
vif->monitor_flag = 0;
vif->iftype = WILC_CLIENT_MODE;
- wilc_set_operation_mode(vif, WILC_STATION_MODE);
-
- wl->enable_ps = false;
- wilc_set_power_mgmt(vif, 0, 0);
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
break;
case NL80211_IFTYPE_AP:
- wl->enable_ps = false;
dev->ieee80211_ptr->iftype = type;
priv->wdev.iftype = type;
vif->iftype = WILC_AP_MODE;
- if (wl->initialized) {
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
- 0, vif->idx);
- wilc_set_operation_mode(vif, WILC_AP_MODE);
- wilc_set_power_mgmt(vif, 0, 0);
- }
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_AP_MODE, vif->idx);
break;
case NL80211_IFTYPE_P2P_GO:
- vif->obtaining_ip = true;
- mod_timer(&vif->during_ip_timer,
- jiffies + msecs_to_jiffies(WILC_IP_TIMEOUT_MS));
- wilc_set_operation_mode(vif, WILC_AP_MODE);
dev->ieee80211_ptr->iftype = type;
priv->wdev.iftype = type;
vif->iftype = WILC_GO_MODE;
-
- wl->enable_ps = false;
- wilc_set_power_mgmt(vif, 0, 0);
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_AP_MODE, vif->idx);
break;
default:
@@ -1500,7 +1473,6 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
netdev_err(dev, "Error in setting channel\n");
wilc_wlan_set_bssid(dev, dev->dev_addr, WILC_AP_MODE);
- wilc_set_power_mgmt(vif, 0, 0);
return wilc_add_beacon(vif, settings->beacon_interval,
settings->dtim_period, &settings->beacon);
@@ -1687,16 +1659,16 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
vif->monitor_flag = 0;
mutex_lock(&wl->vif_mutex);
- wilc_set_wfi_drv_handler(vif, 0, 0, 0);
- for (i = vif->idx; i < wl->vif_num ; i++) {
+ wilc_set_operation_mode(vif, 0, 0, 0);
+ for (i = vif->idx; i < wl->vif_num; i++) {
if ((i + 1) >= wl->vif_num) {
wl->vif[i] = NULL;
} else {
vif = wl->vif[i + 1];
vif->idx = i;
wl->vif[i] = vif;
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
- vif->iftype, vif->idx);
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ vif->iftype, vif->idx);
}
}
wl->vif_num--;
@@ -1851,7 +1823,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
*wilc = wl;
wl->io_type = io_type;
wl->hif_func = ops;
- wl->enable_ps = false;
wl->chip_ps_state = WILC_CHIP_WAKEDUP;
INIT_LIST_HEAD(&wl->txq_head.list);
INIT_LIST_HEAD(&wl->rxq_head.list);
@@ -1949,8 +1920,6 @@ int wilc_init_host_int(struct net_device *net)
struct wilc_vif *vif = netdev_priv(net);
struct wilc_priv *priv = &vif->priv;
- timer_setup(&vif->during_ip_timer, clear_during_ip, 0);
-
priv->p2p_listen_state = false;
mutex_init(&priv->scan_req_lock);
@@ -1969,11 +1938,10 @@ void wilc_deinit_host_int(struct net_device *net)
priv->p2p_listen_state = false;
+ flush_workqueue(vif->wilc->hif_workqueue);
mutex_destroy(&priv->scan_req_lock);
ret = wilc_deinit(vif);
- del_timer_sync(&vif->during_ip_timer);
-
if (ret)
netdev_err(net, "Error while deinitializing host interface\n");
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 1e74a08e7cf1..978a8bdbfc40 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -203,7 +203,6 @@ struct wilc_vif {
struct net_device *ndev;
u8 mode;
struct timer_list during_ip_timer;
- bool obtaining_ip;
struct timer_list periodic_rssi;
struct rf_info periodic_stat;
struct tcp_ack_filter ack_filter;
@@ -217,6 +216,7 @@ struct wilc {
int io_type;
s8 mac_status;
struct gpio_desc *gpio_irq;
+ struct clk *rtc_clk;
bool initialized;
int dev_irq_num;
int close;
@@ -262,7 +262,6 @@ struct wilc {
struct device *dev;
bool suspend_event;
- bool enable_ps;
int clients_count;
struct workqueue_struct *hif_workqueue;
enum chip_ps_states chip_ps_state;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index d46876edcfeb..771d8cb68dc1 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -455,20 +455,6 @@ void chip_wakeup(struct wilc *wilc)
}
EXPORT_SYMBOL_GPL(chip_wakeup);
-void wilc_chip_sleep_manually(struct wilc *wilc)
-{
- if (wilc->chip_ps_state != WILC_CHIP_WAKEDUP)
- return;
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
-
- chip_allow_sleep(wilc);
- wilc->hif_func->hif_write_reg(wilc, 0x10a8, 1);
-
- wilc->chip_ps_state = WILC_CHIP_SLEEPING_MANUAL;
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
-}
-EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
-
void host_wakeup_notify(struct wilc *wilc)
{
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
@@ -703,8 +689,7 @@ static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size)
do {
buff_ptr = buffer + offset;
- memcpy(&header, buff_ptr, 4);
- le32_to_cpus(&header);
+ header = get_unaligned_le32(buff_ptr);
is_cfg_packet = (header >> 31) & 0x1;
pkt_offset = (header >> 22) & 0x1ff;
@@ -773,26 +758,6 @@ static void wilc_unknown_isr_ext(struct wilc *wilc)
wilc->hif_func->hif_clear_int_ext(wilc, 0);
}
-static void wilc_pllupdate_isr_ext(struct wilc *wilc, u32 int_stats)
-{
- int trials = 10;
-
- wilc->hif_func->hif_clear_int_ext(wilc, PLL_INT_CLR);
-
- if (wilc->io_type == WILC_HIF_SDIO)
- mdelay(WILC_PLL_TO_SDIO);
- else
- mdelay(WILC_PLL_TO_SPI);
-
- while (!(is_wilc1000(wilc_get_chipid(wilc, true)) && --trials))
- mdelay(1);
-}
-
-static void wilc_sleeptimer_isr_ext(struct wilc *wilc, u32 int_stats1)
-{
- wilc->hif_func->hif_clear_int_ext(wilc, SLEEP_INT_CLR);
-}
-
static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
{
u32 offset = wilc->rx_buffer_offset;
@@ -842,15 +807,9 @@ void wilc_handle_isr(struct wilc *wilc)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
wilc->hif_func->hif_read_int(wilc, &int_status);
- if (int_status & PLL_INT_EXT)
- wilc_pllupdate_isr_ext(wilc, int_status);
-
if (int_status & DATA_INT_EXT)
wilc_wlan_handle_isr_ext(wilc, int_status);
- if (int_status & SLEEP_INT_EXT)
- wilc_sleeptimer_isr_ext(wilc, int_status);
-
if (!(int_status & (ALL_INT_EXT)))
wilc_unknown_isr_ext(wilc);
@@ -874,10 +833,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
offset = 0;
do {
- memcpy(&addr, &buffer[offset], 4);
- memcpy(&size, &buffer[offset + 4], 4);
- le32_to_cpus(&addr);
- le32_to_cpus(&size);
+ addr = get_unaligned_le32(&buffer[offset]);
+ size = get_unaligned_le32(&buffer[offset + 4]);
acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
@@ -985,72 +942,52 @@ int wilc_wlan_start(struct wilc *wilc)
return (ret < 0) ? ret : 0;
}
-int wilc_wlan_stop(struct wilc *wilc)
+int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
{
u32 reg = 0;
int ret;
- u8 timeout = 10;
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
if (!ret) {
+ netdev_err(vif->ndev, "Error while reading reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return ret;
+ return -EIO;
}
- reg &= ~BIT(10);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
+ (reg | WILC_ABORT_REQ_BIT));
if (!ret) {
+ netdev_err(vif->ndev, "Error while writing reg\n");
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return ret;
+ return -EIO;
}
- do {
- ret = wilc->hif_func->hif_read_reg(wilc,
- WILC_GLB_RESET_0, &reg);
- if (!ret) {
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return ret;
- }
-
- if ((reg & BIT(10))) {
- reg &= ~BIT(10);
- ret = wilc->hif_func->hif_write_reg(wilc,
- WILC_GLB_RESET_0,
- reg);
- timeout--;
- } else {
- ret = wilc->hif_func->hif_read_reg(wilc,
- WILC_GLB_RESET_0,
- &reg);
- if (!ret) {
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return ret;
- }
- break;
- }
-
- } while (timeout);
- reg = (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(8) | BIT(9) | BIT(26) |
- BIT(29) | BIT(30) | BIT(31));
-
- wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
- reg = (u32)~BIT(10);
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
+ if (!ret) {
+ netdev_err(vif->ndev, "Error while reading reg\n");
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return -EIO;
+ }
+ reg = BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
+ if (!ret) {
+ netdev_err(vif->ndev, "Error while writing reg\n");
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return -EIO;
+ }
release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return ret;
+ return 0;
}
void wilc_wlan_cleanup(struct net_device *dev)
{
struct txq_entry_t *tqe;
struct rxq_entry_t *rqe;
- u32 reg = 0;
- int ret;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
@@ -1075,23 +1012,6 @@ void wilc_wlan_cleanup(struct net_device *dev)
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
wilc->tx_buffer = NULL;
-
- acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
-
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (!ret) {
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return;
- }
-
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
- (reg | ABORT_INT));
- if (!ret) {
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- return;
- }
-
- release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
wilc->hif_func->hif_deinit(NULL);
}
@@ -1196,11 +1116,6 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
return ret_size;
}
-int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer, u32 buffer_size)
-{
- return wilc_wlan_cfg_get_wid_value(wl, wid, buffer, buffer_size);
-}
-
int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
u32 count)
{
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index d2eef7b4c3b7..7469fa47d588 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -98,6 +98,7 @@
#define WILC_VMM_TBL_RX_SHADOW_BASE WILC_AHB_SHARE_MEM_BASE
#define WILC_VMM_TBL_RX_SHADOW_SIZE 256
+#define WILC_FW_HOST_COMM 0x13c0
#define WILC_GP_REG_0 0x149c
#define WILC_GP_REG_1 0x14a0
@@ -127,9 +128,7 @@
#define WILC_CFG_RSP_STATUS 2
#define WILC_CFG_RSP_SCAN 3
-#define WILC_PLL_TO_SDIO 4
-#define WILC_PLL_TO_SPI 2
-#define ABORT_INT BIT(31)
+#define WILC_ABORT_REQ_BIT BIT(31)
#define WILC_RX_BUFF_SIZE (96 * 1024)
#define WILC_TX_BUFF_SIZE (64 * 1024)
@@ -184,14 +183,10 @@
#define EN_VMM BIT(8)
#define DATA_INT_EXT INT_0
-#define PLL_INT_EXT INT_1
-#define SLEEP_INT_EXT INT_2
-#define ALL_INT_EXT (DATA_INT_EXT | PLL_INT_EXT | SLEEP_INT_EXT)
-#define NUM_INT_EXT 3
+#define ALL_INT_EXT DATA_INT_EXT
+#define NUM_INT_EXT 1
#define DATA_INT_CLR CLR_INT0
-#define PLL_INT_CLR CLR_INT1
-#define SLEEP_INT_CLR CLR_INT2
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
@@ -280,7 +275,7 @@ struct wilc_vif;
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
-int wilc_wlan_stop(struct wilc *wilc);
+int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif);
int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size,
void (*tx_complete_fn)(void *, int));
@@ -291,12 +286,8 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler);
int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler);
-int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
- u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size, void (*func)(void *, int));
-void wilc_chip_sleep_manually(struct wilc *wilc);
-
void wilc_enable_tcp_ack_filter(struct wilc_vif *vif, bool value);
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc);
netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 9dc5de4eb08d..3f53807cee0f 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -52,57 +52,35 @@ static const struct wilc_cfg_str g_cfg_str[] = {
static int wilc_wlan_cfg_set_byte(u8 *frame, u32 offset, u16 id, u8 val8)
{
- u8 *buf;
-
if ((offset + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
-
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
- buf[2] = 1;
- buf[3] = 0;
- buf[4] = val8;
+ put_unaligned_le16(id, &frame[offset]);
+ put_unaligned_le16(1, &frame[offset + 2]);
+ frame[offset + 4] = val8;
return 5;
}
static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16)
{
- u8 *buf;
-
if ((offset + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
-
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
- buf[2] = 2;
- buf[3] = 0;
- buf[4] = (u8)val16;
- buf[5] = (u8)(val16 >> 8);
+ put_unaligned_le16(id, &frame[offset]);
+ put_unaligned_le16(2, &frame[offset + 2]);
+ put_unaligned_le16(val16, &frame[offset + 4]);
return 6;
}
static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
{
- u8 *buf;
-
if ((offset + 7) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
-
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
- buf[2] = 4;
- buf[3] = 0;
- buf[4] = (u8)val32;
- buf[5] = (u8)(val32 >> 8);
- buf[6] = (u8)(val32 >> 16);
- buf[7] = (u8)(val32 >> 24);
+ put_unaligned_le16(id, &frame[offset]);
+ put_unaligned_le16(4, &frame[offset + 2]);
+ put_unaligned_le32(val32, &frame[offset + 4]);
return 8;
}
@@ -110,46 +88,35 @@ static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str,
u32 size)
{
- u8 *buf;
-
if ((offset + size + 4) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
-
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
- buf[2] = (u8)size;
- buf[3] = (u8)(size >> 8);
-
+ put_unaligned_le16(id, &frame[offset]);
+ put_unaligned_le16(size, &frame[offset + 2]);
if (str && size != 0)
- memcpy(&buf[4], str, size);
+ memcpy(&frame[offset + 4], str, size);
return (size + 4);
}
static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
{
- u8 *buf;
u32 i;
u8 checksum = 0;
if ((offset + size + 5) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
- buf[2] = (u8)size;
- buf[3] = (u8)(size >> 8);
+ put_unaligned_le16(id, &frame[offset]);
+ put_unaligned_le16(size, &frame[offset + 2]);
if ((b) && size != 0) {
- memcpy(&buf[4], b, size);
+ memcpy(&frame[offset + 4], b, size);
for (i = 0; i < size; i++)
- checksum += buf[i + 4];
+ checksum += frame[offset + i + 4];
}
- buf[size + 4] = checksum;
+ frame[offset + size + 4] = checksum;
return (size + 5);
}
@@ -307,21 +274,16 @@ int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id)
{
- u8 *buf;
-
if ((offset + 2) >= WILC_MAX_CFG_FRAME_SIZE)
return 0;
- buf = &frame[offset];
-
- buf[0] = (u8)id;
- buf[1] = (u8)(id >> 8);
+ put_unaligned_le16(id, &frame[offset]);
return 2;
}
-int wilc_wlan_cfg_get_wid_value(struct wilc *wl, u16 wid, u8 *buffer,
- u32 buffer_size)
+int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
+ u32 buffer_size)
{
u32 type = (wid >> 12) & 0xf;
int i, ret = 0;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h
index e5ca6cea0682..614c5673f232 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h
@@ -44,8 +44,8 @@ struct wilc_cfg {
struct wilc;
int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size);
int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id);
-int wilc_wlan_cfg_get_wid_value(struct wilc *wl, u16 wid, u8 *buffer,
- u32 buffer_size);
+int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer,
+ u32 buffer_size);
void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
struct wilc_cfg_rsp *rsp);
int wilc_wlan_cfg_init(struct wilc *wl);
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index b89d0e0f04cc..70eac586f80c 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -724,7 +724,6 @@ enum {
/* NMAC Integer WID list */
/* Custom Integer WID list */
WID_GET_INACTIVE_TIME = 0x2084,
- WID_SET_OPERATION_MODE = 0X2086,
/* EMAC String WID list */
WID_SSID = 0x3000,
WID_FIRMWARE_VERSION = 0x3001,
@@ -755,9 +754,9 @@ enum {
WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */
WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */
WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */
- WID_SET_DRV_HANDLER = 0x3079,
/* NMAC String WID list */
+ WID_SET_OPERATION_MODE = 0x3079,
WID_11N_P_ACTION_REQ = 0x3080,
WID_HUT_TEST_ID = 0x3081,
WID_PMKID_INFO = 0x3082,
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index ab734534093b..28d372a0663a 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -226,11 +226,9 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
/*---------------------------------------------------*/
/* Low level req/resp CTLX formatters and submitters */
-static int
+static inline int
hfa384x_docmd(struct hfa384x *hw,
- enum cmd_mode mode,
- struct hfa384x_metacmd *cmd,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
+ struct hfa384x_metacmd *cmd);
static int
hfa384x_dorrid(struct hfa384x *hw,
@@ -250,21 +248,17 @@ hfa384x_dowrid(struct hfa384x *hw,
static int
hfa384x_dormem(struct hfa384x *hw,
- enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
- unsigned int len,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
+ unsigned int len);
static int
hfa384x_dowmem(struct hfa384x *hw,
- enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
- unsigned int len,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
+ unsigned int len);
static int hfa384x_isgood_pdrcode(u16 pdrcode);
@@ -820,99 +814,6 @@ static void hfa384x_cb_status(struct hfa384x *hw,
}
}
-static inline int hfa384x_docmd_wait(struct hfa384x *hw,
- struct hfa384x_metacmd *cmd)
-{
- return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
-}
-
-static inline int
-hfa384x_docmd_async(struct hfa384x *hw,
- struct hfa384x_metacmd *cmd,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_docmd(hw, DOASYNC, cmd, cmdcb, usercb, usercb_data);
-}
-
-static inline int
-hfa384x_dorrid_wait(struct hfa384x *hw, u16 rid, void *riddata,
- unsigned int riddatalen)
-{
- return hfa384x_dorrid(hw, DOWAIT,
- rid, riddata, riddatalen, NULL, NULL, NULL);
-}
-
-static inline int
-hfa384x_dorrid_async(struct hfa384x *hw,
- u16 rid, void *riddata, unsigned int riddatalen,
- ctlx_cmdcb_t cmdcb,
- ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_dorrid(hw, DOASYNC,
- rid, riddata, riddatalen,
- cmdcb, usercb, usercb_data);
-}
-
-static inline int
-hfa384x_dowrid_wait(struct hfa384x *hw, u16 rid, void *riddata,
- unsigned int riddatalen)
-{
- return hfa384x_dowrid(hw, DOWAIT,
- rid, riddata, riddatalen, NULL, NULL, NULL);
-}
-
-static inline int
-hfa384x_dowrid_async(struct hfa384x *hw,
- u16 rid, void *riddata, unsigned int riddatalen,
- ctlx_cmdcb_t cmdcb,
- ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_dowrid(hw, DOASYNC,
- rid, riddata, riddatalen,
- cmdcb, usercb, usercb_data);
-}
-
-static inline int
-hfa384x_dormem_wait(struct hfa384x *hw,
- u16 page, u16 offset, void *data, unsigned int len)
-{
- return hfa384x_dormem(hw, DOWAIT,
- page, offset, data, len, NULL, NULL, NULL);
-}
-
-static inline int
-hfa384x_dormem_async(struct hfa384x *hw,
- u16 page, u16 offset, void *data, unsigned int len,
- ctlx_cmdcb_t cmdcb,
- ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_dormem(hw, DOASYNC,
- page, offset, data, len,
- cmdcb, usercb, usercb_data);
-}
-
-static inline int
-hfa384x_dowmem_wait(struct hfa384x *hw,
- u16 page, u16 offset, void *data, unsigned int len)
-{
- return hfa384x_dowmem(hw, DOWAIT,
- page, offset, data, len, NULL, NULL, NULL);
-}
-
-static inline int
-hfa384x_dowmem_async(struct hfa384x *hw,
- u16 page,
- u16 offset,
- void *data,
- unsigned int len,
- ctlx_cmdcb_t cmdcb,
- ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_dowmem(hw, DOASYNC,
- page, offset, data, len,
- cmdcb, usercb, usercb_data);
-}
-
/*----------------------------------------------------------------
* hfa384x_cmd_initialize
*
@@ -944,7 +845,7 @@ int hfa384x_cmd_initialize(struct hfa384x *hw)
cmd.parm1 = 0;
cmd.parm2 = 0;
- result = hfa384x_docmd_wait(hw, &cmd);
+ result = hfa384x_docmd(hw, &cmd);
pr_debug("cmdresp.init: status=0x%04x, resp0=0x%04x, resp1=0x%04x, resp2=0x%04x\n",
cmd.result.status,
@@ -990,7 +891,7 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
cmd.parm1 = 0;
cmd.parm2 = 0;
- return hfa384x_docmd_wait(hw, &cmd);
+ return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1024,7 +925,7 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
cmd.parm1 = 0;
cmd.parm2 = 0;
- return hfa384x_docmd_wait(hw, &cmd);
+ return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1067,7 +968,7 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
cmd.parm1 = 0;
cmd.parm2 = 0;
- return hfa384x_docmd_wait(hw, &cmd);
+ return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1124,7 +1025,7 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
cmd.parm1 = highaddr;
cmd.parm2 = codelen;
- return hfa384x_docmd_wait(hw, &cmd);
+ return hfa384x_docmd(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1284,13 +1185,8 @@ cleanup:
*
* Arguments:
* hw device structure
- * mode DOWAIT or DOASYNC
* cmd cmd structure. Includes all arguments and result
* data points. All in host order. in host order
- * cmdcb command-specific callback
- * usercb user callback for async calls, NULL for DOWAIT calls
- * usercb_data user supplied data pointer for async calls, NULL
- * for DOWAIT calls
*
* Returns:
* 0 success
@@ -1306,11 +1202,9 @@ cleanup:
* process
*----------------------------------------------------------------
*/
-static int
+static inline int
hfa384x_docmd(struct hfa384x *hw,
- enum cmd_mode mode,
- struct hfa384x_metacmd *cmd,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
+ struct hfa384x_metacmd *cmd)
{
int result;
struct hfa384x_usbctlx *ctlx;
@@ -1333,15 +1227,15 @@ hfa384x_docmd(struct hfa384x *hw,
pr_debug("cmdreq: cmd=0x%04x parm0=0x%04x parm1=0x%04x parm2=0x%04x\n",
cmd->cmd, cmd->parm0, cmd->parm1, cmd->parm2);
- ctlx->reapable = mode;
- ctlx->cmdcb = cmdcb;
- ctlx->usercb = usercb;
- ctlx->usercb_data = usercb_data;
+ ctlx->reapable = DOWAIT;
+ ctlx->cmdcb = NULL;
+ ctlx->usercb = NULL;
+ ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
- } else if (mode == DOWAIT) {
+ } else {
struct usbctlx_cmd_completor cmd_completor;
struct usbctlx_completor *completor;
@@ -1540,14 +1434,10 @@ done:
*
* Arguments:
* hw device structure
- * mode DOWAIT or DOASYNC
* page MAC address space page (CMD format)
* offset MAC address space offset
* data Ptr to data buffer to receive read
* len Length of the data to read (max == 2048)
- * cmdcb command callback for async calls, NULL for DOWAIT calls
- * usercb user callback for async calls, NULL for DOWAIT calls
- * usercb_data user supplied data pointer for async calls
*
* Returns:
* 0 success
@@ -1559,18 +1449,15 @@ done:
* Side effects:
*
* Call context:
- * interrupt (DOASYNC)
- * process (DOWAIT or DOASYNC)
+ * process (DOWAIT)
*----------------------------------------------------------------
*/
static int
hfa384x_dormem(struct hfa384x *hw,
- enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
- unsigned int len,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
+ unsigned int len)
{
int result;
struct hfa384x_usbctlx *ctlx;
@@ -1598,15 +1485,15 @@ hfa384x_dormem(struct hfa384x *hw,
pr_debug("pktsize=%zd\n", ROUNDUP64(sizeof(ctlx->outbuf.rmemreq)));
- ctlx->reapable = mode;
- ctlx->cmdcb = cmdcb;
- ctlx->usercb = usercb;
- ctlx->usercb_data = usercb_data;
+ ctlx->reapable = DOWAIT;
+ ctlx->cmdcb = NULL;
+ ctlx->usercb = NULL;
+ ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
- } else if (mode == DOWAIT) {
+ } else {
struct usbctlx_rmem_completor completor;
result =
@@ -1632,14 +1519,10 @@ done:
*
* Arguments:
* hw device structure
- * mode DOWAIT or DOASYNC
* page MAC address space page (CMD format)
* offset MAC address space offset
* data Ptr to data buffer containing write data
* len Length of the data to read (max == 2048)
- * cmdcb command callback for async calls, NULL for DOWAIT calls
- * usercb user callback for async calls, NULL for DOWAIT calls
- * usercb_data user supplied data pointer for async calls.
*
* Returns:
* 0 success
@@ -1652,17 +1535,15 @@ done:
*
* Call context:
* interrupt (DOWAIT)
- * process (DOWAIT or DOASYNC)
+ * process (DOWAIT)
*----------------------------------------------------------------
*/
static int
hfa384x_dowmem(struct hfa384x *hw,
- enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
- unsigned int len,
- ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
+ unsigned int len)
{
int result;
struct hfa384x_usbctlx *ctlx;
@@ -1689,15 +1570,15 @@ hfa384x_dowmem(struct hfa384x *hw,
sizeof(ctlx->outbuf.wmemreq.offset) +
sizeof(ctlx->outbuf.wmemreq.page) + len;
- ctlx->reapable = mode;
- ctlx->cmdcb = cmdcb;
- ctlx->usercb = usercb;
- ctlx->usercb_data = usercb_data;
+ ctlx->reapable = DOWAIT;
+ ctlx->cmdcb = NULL;
+ ctlx->usercb = NULL;
+ ctlx->usercb_data = NULL;
result = hfa384x_usbctlx_submit(hw, ctlx);
if (result != 0) {
kfree(ctlx);
- } else if (mode == DOWAIT) {
+ } else {
struct usbctlx_cmd_completor completor;
struct hfa384x_cmdresult wmemresult;
@@ -2004,10 +1885,10 @@ int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
writelen = writelen > HFA384x_USB_RWMEM_MAXLEN ?
HFA384x_USB_RWMEM_MAXLEN : writelen;
- result = hfa384x_dowmem_wait(hw,
- writepage,
- writeoffset,
- writebuf, writelen);
+ result = hfa384x_dowmem(hw,
+ writepage,
+ writeoffset,
+ writebuf, writelen);
}
/* set the download 'write flash' mode */
@@ -2061,7 +1942,7 @@ exit_proc:
*/
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
- return hfa384x_dorrid_wait(hw, rid, buf, len);
+ return hfa384x_dorrid(hw, DOWAIT, rid, buf, len, NULL, NULL, NULL);
}
/*----------------------------------------------------------------
@@ -2094,8 +1975,8 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
void *buf,
u16 len, ctlx_usercb_t usercb, void *usercb_data)
{
- return hfa384x_dowrid_async(hw, rid, buf, len,
- hfa384x_cb_status, usercb, usercb_data);
+ return hfa384x_dowrid(hw, DOASYNC, rid, buf, len, hfa384x_cb_status,
+ usercb, usercb_data);
}
/*----------------------------------------------------------------
@@ -2261,12 +2142,11 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
currlen = HFA384x_USB_RWMEM_MAXLEN;
/* Do blocking ctlx */
- result = hfa384x_dowmem_wait(hw,
- currpage,
- curroffset,
- data +
- (i * HFA384x_USB_RWMEM_MAXLEN),
- currlen);
+ result = hfa384x_dowmem(hw,
+ currpage,
+ curroffset,
+ data + (i * HFA384x_USB_RWMEM_MAXLEN),
+ currlen);
if (result)
break;
@@ -2338,8 +2218,8 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
curroffset = HFA384x_ADDR_CMD_MKOFF(pdaloc[i].cardaddr);
/* units of bytes */
- result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
- len);
+ result = hfa384x_dormem(hw, currpage, curroffset, buf,
+ len);
if (result) {
netdev_warn(hw->wlandev->netdev,
@@ -2422,7 +2302,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
*/
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
{
- return hfa384x_dowrid_wait(hw, rid, buf, len);
+ return hfa384x_dowrid(hw, DOWAIT, rid, buf, len, NULL, NULL, NULL);
}
/*----------------------------------------------------------------
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 1eba5fa28d8f..7d7d77b04255 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -126,13 +126,6 @@ static int prism2mib_privacyinvoked(struct mibrec *mib,
struct p80211msg_dot11req_mibset *msg,
void *data);
-static int prism2mib_excludeunencrypted(struct mibrec *mib,
- int isget,
- struct wlandevice *wlandev,
- struct hfa384x *hw,
- struct p80211msg_dot11req_mibset *msg,
- void *data);
-
static int
prism2mib_fragmentationthreshold(struct mibrec *mib,
int isget,
@@ -176,7 +169,7 @@ static struct mibrec mibtab[] = {
{DIDMIB_DOT11SMT_PRIVACYTABLE_EXCLUDEUNENCRYPTED,
F_STA | F_READ | F_WRITE,
HFA384x_RID_CNFWEPFLAGS, HFA384x_WEPFLAGS_EXCLUDE, 0,
- prism2mib_excludeunencrypted},
+ prism2mib_flag},
/* dot11mac MIB's */
@@ -594,41 +587,6 @@ static int prism2mib_privacyinvoked(struct mibrec *mib,
}
/*
- * prism2mib_excludeunencrypted
- *
- * Get/set the dot11ExcludeUnencrypted value.
- *
- * MIB record parameters:
- * parm1 Prism2 RID value.
- * parm2 Bit value for ExcludeUnencrypted flag.
- * parm3 Not used.
- *
- * Arguments:
- * mib MIB record.
- * isget MIBGET/MIBSET flag.
- * wlandev wlan device structure.
- * priv "priv" structure.
- * hw "hw" structure.
- * msg Message structure.
- * data Data buffer.
- *
- * Returns:
- * 0 - Success.
- * ~0 - Error.
- *
- */
-
-static int prism2mib_excludeunencrypted(struct mibrec *mib,
- int isget,
- struct wlandevice *wlandev,
- struct hfa384x *hw,
- struct p80211msg_dot11req_mibset *msg,
- void *data)
-{
- return prism2mib_flag(mib, isget, wlandev, hw, msg, data);
-}
-
-/*
* prism2mib_fragmentationthreshold
*
* Get/set the fragmentation threshold.
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index fb5441399131..8f25496188aa 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -846,7 +846,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
snum, HFA384x_RID_NICSERIALNUMBER_LEN);
if (!result) {
- netdev_info(wlandev->netdev, "Prism2 card SN: %*pEhp\n",
+ netdev_info(wlandev->netdev, "Prism2 card SN: %*pE\n",
HFA384x_RID_NICSERIALNUMBER_LEN, snum);
} else {
netdev_err(wlandev->netdev, "Failed to retrieve Prism2 Card SN\n");
diff --git a/drivers/staging/wusbcore/Documentation/wusb-cbaf b/drivers/staging/wusbcore/Documentation/wusb-cbaf
new file mode 100644
index 000000000000..8b3d43efce90
--- /dev/null
+++ b/drivers/staging/wusbcore/Documentation/wusb-cbaf
@@ -0,0 +1,130 @@
+#! /bin/bash
+#
+
+set -e
+
+progname=$(basename $0)
+function help
+{
+ cat <<EOF
+Usage: $progname COMMAND DEVICEs [ARGS]
+
+Command for manipulating the pairing/authentication credentials of a
+Wireless USB device that supports wired-mode Cable-Based-Association.
+
+Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
+
+
+DEVICE
+
+ sysfs path to the device to authenticate; for example, both this
+ guys are the same:
+
+ /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
+ /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
+
+COMMAND/ARGS are
+
+ start
+
+ Start a WUSB host controller (by setting up a CHID)
+
+ set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
+
+ Sets host information in the device; after this you can call the
+ get-cdid to see how does this device report itself to us.
+
+ get-cdid DEVICE
+
+ Get the device ID associated to the HOST-CHID we sent with
+ 'set-chid'. We might not know about it.
+
+ set-cc DEVICE
+
+ If we allow the device to connect, set a random new CDID and CK
+ (connection key). Device saves them for the next time it wants to
+ connect wireless. We save them for that next time also so we can
+ authenticate the device (when we see the CDID he uses to id
+ itself) and the CK to crypto talk to it.
+
+CHID is always 16 hex bytes in 'XX YY ZZ...' form
+BANDGROUP is almost always 0001
+
+Examples:
+
+ You can default most arguments to '' to get a sane value:
+
+ $ $progname set-chid '' '' '' "My host name"
+
+ A full sequence:
+
+ $ $progname set-chid '' '' '' "My host name"
+ $ $progname get-cdid ''
+ $ $progname set-cc ''
+
+EOF
+}
+
+
+# Defaults
+# FIXME: CHID should come from a database :), band group from the host
+host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
+host_band_group="0001"
+host_name=$(hostname)
+
+devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
+hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
+
+result=0
+case $1 in
+ start)
+ for dev in ${2:-$hdevs}
+ do
+ echo $host_CHID > $dev/wusb_chid
+ echo I: started host $(basename $dev) >&2
+ done
+ ;;
+ stop)
+ for dev in ${2:-$hdevs}
+ do
+ echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
+ echo I: stopped host $(basename $dev) >&2
+ done
+ ;;
+ set-chid)
+ shift
+ for dev in ${2:-$devs}; do
+ echo "${4:-$host_name}" > $dev/wusb_host_name
+ echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
+ echo ${2:-$host_CHID} > $dev/wusb_chid
+ done
+ ;;
+ get-cdid)
+ for dev in ${2:-$devs}
+ do
+ cat $dev/wusb_cdid
+ done
+ ;;
+ set-cc)
+ for dev in ${2:-$devs}; do
+ shift
+ CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
+ CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
+ echo "$CDID" > $dev/wusb_cdid
+ echo "$CK" > $dev/wusb_ck
+
+ echo I: CC set >&2
+ echo "CHID: $(cat $dev/wusb_chid)"
+ echo "CDID:$CDID"
+ echo "CK: $CK"
+ done
+ ;;
+ help|h|--help|-h)
+ help
+ ;;
+ *)
+ echo "E: Unknown usage" 1>&2
+ help 1>&2
+ result=1
+esac
+exit $result
diff --git a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
new file mode 100644
index 000000000000..dc5e21609bb5
--- /dev/null
+++ b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
@@ -0,0 +1,457 @@
+================================
+Linux UWB + Wireless USB + WiNET
+================================
+
+ Copyright (C) 2005-2006 Intel Corporation
+
+ Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License version
+ 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+
+Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
+updated content.
+
+ * Design-overview.txt-1.8
+
+This code implements a Ultra Wide Band stack for Linux, as well as
+drivers for the USB based UWB radio controllers defined in the
+Wireless USB 1.0 specification (including Wireless USB host controller
+and an Intel WiNET controller).
+
+.. Contents
+ 1. Introduction
+ 1. HWA: Host Wire adapters, your Wireless USB dongle
+
+ 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
+ devices
+ 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
+ adapter
+ 2. The UWB stack
+ 1. Devices and hosts: the basic structure
+
+ 2. Host Controller life cycle
+
+ 3. On the air: beacons and enumerating the radio neighborhood
+
+ 4. Device lists
+ 5. Bandwidth allocation
+
+ 3. Wireless USB Host Controller drivers
+
+ 4. Glossary
+
+
+Introduction
+============
+
+UWB is a wide-band communication protocol that is to serve also as the
+low-level protocol for others (much like TCP sits on IP). Currently
+these others are Wireless USB and TCP/IP, but seems Bluetooth and
+Firewire/1394 are coming along.
+
+UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
+~-41dB (or 0.074 uW/MHz--geography specific data is still being
+negotiated w/ regulators, so watch for changes). That band is divided in
+a bunch of ~1.5 GHz wide channels (or band groups) composed of three
+subbands/subchannels (528 MHz each). Each channel is independent of each
+other, so you could consider them different "busses". Initially this
+driver considers them all a single one.
+
+Radio time is divided in 65536 us long /superframes/, each one divided
+in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
+time/media allocation units for transferring data. At the beginning of
+each superframe there is a Beacon Period (BP), where every device
+transmit its beacon on a single MAS. The length of the BP depends on how
+many devices are present and the length of their beacons.
+
+Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
+bit address) and send periodic beacons to advertise themselves and pass
+info on what they are and do. They advertise their capabilities and a
+bunch of other stuff.
+
+The different logical parts of this driver are:
+
+ *
+
+ *UWB*: the Ultra-Wide-Band stack -- manages the radio and
+ associated spectrum to allow for devices sharing it. Allows to
+ control bandwidth assignment, beaconing, scanning, etc
+
+ *
+
+ *WUSB*: the layer that sits on top of UWB to provide Wireless USB.
+ The Wireless USB spec defines means to control a UWB radio and to
+ do the actual WUSB.
+
+
+HWA: Host Wire adapters, your Wireless USB dongle
+-------------------------------------------------
+
+WUSB also defines a device called a Host Wire Adaptor (HWA), which in
+mere terms is a USB dongle that enables your PC to have UWB and Wireless
+USB. The Wireless USB Host Controller in a HWA looks to the host like a
+[Wireless] USB controller connected via USB (!)
+
+The HWA itself is broken in two or three main interfaces:
+
+ *
+
+ *RC*: Radio control -- this implements an interface to the
+ Ultra-Wide-Band radio controller. The driver for this implements a
+ USB-based UWB Radio Controller to the UWB stack.
+
+ *
+
+ *HC*: the wireless USB host controller. It looks like a USB host
+ whose root port is the radio and the WUSB devices connect to it.
+ To the system it looks like a separate USB host. The driver (will)
+ implement a USB host controller (similar to UHCI, OHCI or EHCI)
+ for which the root hub is the radio...To reiterate: it is a USB
+ controller that is connected via USB instead of PCI.
+
+ *
+
+ *WINET*: some HW provide a WiNET interface (IP over UWB). This
+ package provides a driver for it (it looks like a network
+ interface, winetX). The driver detects when there is a link up for
+ their type and kick into gear.
+
+
+DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
+---------------------------------------------------------------
+
+These are the complement to HWAs. They are a USB host for connecting
+wired devices, but it is connected to your PC connected via Wireless
+USB. To the system it looks like yet another USB host. To the untrained
+eye, it looks like a hub that connects upstream wirelessly.
+
+We still offer no support for this; however, it should share a lot of
+code with the HWA-RC driver; there is a bunch of factorization work that
+has been done to support that in upcoming releases.
+
+
+WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
+-------------------------------------------------------------------
+
+This is your usual PCI device that implements WHCI. Similar in concept
+to EHCI, it allows your wireless USB devices (including DWAs) to connect
+to your host via a PCI interface. As in the case of the HWA, it has a
+Radio Control interface and the WUSB Host Controller interface per se.
+
+There is still no driver support for this, but will be in upcoming
+releases.
+
+
+The UWB stack
+=============
+
+The main mission of the UWB stack is to keep a tally of which devices
+are in radio proximity to allow drivers to connect to them. As well, it
+provides an API for controlling the local radio controllers (RCs from
+now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
+
+
+Devices and hosts: the basic structure
+--------------------------------------
+
+The main building block here is the UWB device (struct uwb_dev). For
+each device that pops up in radio presence (ie: the UWB host receives a
+beacon from it) you get a struct uwb_dev that will show up in
+/sys/bus/uwb/devices.
+
+For each RC that is detected, a new struct uwb_rc and struct uwb_dev are
+created. An entry is also created in /sys/class/uwb_rc for each RC.
+
+Each RC driver is implemented by a separate driver that plugs into the
+interface that the UWB stack provides through a struct uwb_rc_ops. The
+spec creators have been nice enough to make the message format the same
+for HWA and WHCI RCs, so the driver is really a very thin transport that
+moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
+and sends the replies and notifications back to the API
+[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
+is chartered, among other things, to keep the tab of how the UWB radio
+neighborhood looks, creating and destroying devices as they show up or
+disappear.
+
+Command execution is very simple: a command block is sent and a event
+block or reply is expected back. For sending/receiving command/events, a
+handle called /neh/ (Notification/Event Handle) is opened with
+/uwb_rc_neh_open()/.
+
+The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
+the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
+for the PCI connected WHCI controller.
+
+
+Host Controller life cycle
+--------------------------
+
+So let's say we connect a dongle to the system: it is detected and
+firmware uploaded if needed [for Intel's i1480
+/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
+Now we have a real HWA device connected and
+/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
+Wire-Adaptor environment and then suck it into the UWB stack's vision of
+the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
+
+ *
+
+ [*] The stack should put a new RC to scan for devices
+ [/uwb_rc_scan()/] so it finds what's available around and tries to
+ connect to them, but this is policy stuff and should be driven
+ from user space. As of now, the operator is expected to do it
+ manually; see the release notes for documentation on the procedure.
+
+When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
+takes time of tearing everything down safely (or not...).
+
+
+On the air: beacons and enumerating the radio neighborhood
+----------------------------------------------------------
+
+So assuming we have devices and we have agreed for a channel to connect
+on (let's say 9), we put the new RC to beacon:
+
+ *
+
+ $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
+
+Now it is visible. If there were other devices in the same radio channel
+and beacon group (that's what the zero is for), the dongle's radio
+control interface will send beacon notifications on its
+notification/event endpoint (NEEP). The beacon notifications are part of
+the event stream that is funneled into the API with
+/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
+daemon through a notification list.
+
+UWBD wakes up and scans the event list; finds a beacon and adds it to
+the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
+the same device, he considers it to be 'onair' and creates a new device
+[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
+are received in some time, the device is considered gone and wiped out
+[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
+the beacon cache of dead devices].
+
+
+Device lists
+------------
+
+All UWB devices are kept in the list of the struct bus_type uwb_bus_type.
+
+
+Bandwidth allocation
+--------------------
+
+The UWB stack maintains a local copy of DRP availability through
+processing of incoming *DRP Availability Change* notifications. This
+local copy is currently used to present the current bandwidth
+availability to the user through the sysfs file
+/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
+availability information will be used by the bandwidth reservation
+routines.
+
+The bandwidth reservation routines are in progress and are thus not
+present in the current release. When completed they will enable a user
+to initiate DRP reservation requests through interaction with sysfs. DRP
+reservation requests from remote UWB devices will also be handled. The
+bandwidth management done by the UWB stack will include callbacks to the
+higher layers will enable the higher layers to use the reservations upon
+completion. [Note: The bandwidth reservation work is in progress and
+subject to change.]
+
+
+Wireless USB Host Controller drivers
+====================================
+
+*WARNING* This section needs a lot of work!
+
+As explained above, there are three different types of HCs in the WUSB
+world: HWA-HC, DWA-HC and WHCI-HC.
+
+HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
+connected controllers), and their transfer management system is almost
+identical. So is their notification delivery system.
+
+HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
+they have to deal with WUSB device life cycle and maintenance, wireless
+root-hub
+
+HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
+three endpoints (Notifications, Data Transfer In and Data Transfer
+Out--known as NEP, DTI and DTO in the code).
+
+We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
+ID and tell the HC to use all that. Then we start it. This means the HC
+starts sending MMCs.
+
+ *
+
+ The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
+ that define a stream in the UWB channel time allocated for sending
+ WUSB IEs (host to device commands/notifications) and Device
+ Notifications (device initiated to host). Each host defines a
+ unique Wireless USB cluster through MMCs. Devices can connect to a
+ single cluster at the time. The IEs are Information Elements, and
+ among them are the bandwidth allocations that tell each device
+ when can they transmit or receive.
+
+Now it all depends on external stimuli.
+
+New device connection
+---------------------
+
+A new device pops up, it scans the radio looking for MMCs that give out
+the existence of Wireless USB channels. Once one (or more) are found,
+selects which one to connect to. Sends a /DN_Connect/ (device
+notification connect) during the DNTS (Device Notification Time
+Slot--announced in the MMCs
+
+HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
+into /devconnect/). This process starts the authentication process for
+the device. First we allocate a /fake port/ and assign an
+unauthenticated address (128 to 255--what we really do is
+0x80 | fake_port_idx). We fiddle with the fake port status and /hub_wq/
+sees a new connection, so he moves on to enable the fake port with a reset.
+
+So now we are in the reset path -- we know we have a non-yet enumerated
+device with an unauthorized address; we ask user space to authenticate
+(FIXME: not yet done, similar to bluetooth pairing), then we do the key
+exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
+device to the default state. Device is authenticated.
+
+From here, the USB stack takes control through the usb_hcd ops. hub_wq
+has seen the port status changes, as we have been toggling them. It will
+start enumerating and doing transfers through usb_hcd->urb_enqueue() to
+read descriptors and move our data.
+
+Device life cycle and keep alives
+---------------------------------
+
+Every time there is a successful transfer to/from a device, we update a
+per-device activity timestamp. If not, every now and then we check and
+if the activity timestamp gets old, we ping the device by sending it a
+Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
+arrives to us as a notification through
+devconnect.c:wusb_handle_dn_alive(). If a device times out, we
+disconnect it from the system (cleaning up internal information and
+toggling the bits in the fake hub port, which kicks hub_wq into removing
+the rest of the stuff).
+
+This is done through devconnect:__wusb_check_devs(), which will scan the
+device list looking for whom needs refreshing.
+
+If the device wants to disconnect, it will either die (ugly) or send a
+/DN_Disconnect/ that will prompt a disconnection from the system.
+
+Sending and receiving data
+--------------------------
+
+Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
+/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
+DWAs.
+
+Each HC has a number of rpipes and buffers that can be assigned to them;
+when doing a data transfer (xfer), first the rpipe has to be aimed and
+prepared (buffers assigned), then we can start queueing requests for
+data in or out.
+
+Data buffers have to be segmented out before sending--so we send first a
+header (segment request) and then if there is any data, a data buffer
+immediately after to the DTI interface (yep, even the request). If our
+buffer is bigger than the max segment size, then we just do multiple
+requests.
+
+[This sucks, because doing USB scatter gatter in Linux is resource
+intensive, if any...not that the current approach is not. It just has to
+be cleaned up a lot :)].
+
+If reading, we don't send data buffers, just the segment headers saying
+we want to read segments.
+
+When the xfer is executed, we receive a notification that says data is
+ready in the DTI endpoint (handled through
+xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
+descriptor that gives us the status of the transfer, its identification
+(given when we issued it) and the segment number. If it was a data read,
+we issue another URB to read into the destination buffer the chunk of
+data coming out of the remote endpoint. Done, wait for the next guy. The
+callbacks for the URBs issued from here are the ones that will declare
+the xfer complete at some point and call its callback.
+
+Seems simple, but the implementation is not trivial.
+
+ *
+
+ *WARNING* Old!!
+
+The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
+array of segments, tallys on segments and buffers and callback
+information. Buried in there is a lot of URBs for executing the segments
+and buffer transfers.
+
+For OUT xfers, there is an array of segments, one URB for each, another
+one of buffer URB. When submitting, we submit URBs for segment request
+1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
+result data; when all the segments are complete, we call the callback to
+finalize the transfer.
+
+For IN xfers, we only issue URBs for the segments we want to read and
+then wait for the xfer result data.
+
+URB mapping into xfers
+^^^^^^^^^^^^^^^^^^^^^^
+
+This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
+rpipe to the endpoint where we have to transmit, create a transfer
+context (wa_xfer) and submit it. When the xfer is done, our callback is
+called and we assign the status bits and release the xfer resources.
+
+In dequeue() we are basically cancelling/aborting the transfer. We issue
+a xfer abort request to the HC, cancel all the URBs we had submitted
+and not yet done and when all that is done, the xfer callback will be
+called--this will call the URB callback.
+
+
+Glossary
+========
+
+*DWA* -- Device Wire Adapter
+
+USB host, wired for downstream devices, upstream connects wirelessly
+with Wireless USB.
+
+*EVENT* -- Response to a command on the NEEP
+
+*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
+
+*NEH* -- Notification/Event Handle
+
+Handle/file descriptor for receiving notifications or events. The WA
+code requires you to get one of this to listen for notifications or
+events on the NEEP.
+
+*NEEP* -- Notification/Event EndPoint
+
+Stuff related to the management of the first endpoint of a HWA USB
+dongle that is used to deliver an stream of events and notifications to
+the host.
+
+*NOTIFICATION* -- Message coming in the NEEP as response to something.
+
+*RC* -- Radio Control
+
+Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
+InakyPerezGonzalez)
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/staging/wusbcore/Kconfig
index abc0f361021f..a559d023b508 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/staging/wusbcore/Kconfig
@@ -4,7 +4,7 @@
#
config USB_WUSB
tristate "Enable Wireless USB extensions"
- depends on UWB
+ depends on UWB && USB
select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
@@ -36,3 +36,4 @@ config USB_WUSB_CBAF_DEBUG
to the system log. Select this if you are having a problem with
CBA support and want to see more of what is going on.
+source "drivers/staging/wusbcore/host/Kconfig"
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/staging/wusbcore/Makefile
index d604ccdd916f..b47b874268ac 100644
--- a/drivers/usb/wusbcore/Makefile
+++ b/drivers/staging/wusbcore/Makefile
@@ -24,3 +24,5 @@ wusb-wa-y := \
wa-nep.o \
wa-rpipe.o \
wa-xfer.o
+
+obj-y += host/
diff --git a/drivers/staging/wusbcore/TODO b/drivers/staging/wusbcore/TODO
new file mode 100644
index 000000000000..abae57000534
--- /dev/null
+++ b/drivers/staging/wusbcore/TODO
@@ -0,0 +1,8 @@
+TODO: Remove in late 2019 unless there are users
+
+There seems to not be any real wireless USB devices anywhere in the wild
+anymore. It turned out to be a failed technology :(
+
+This will be removed from the tree if no one objects.
+
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/staging/wusbcore/cbaf.c
index af77064c7456..57062eaf7558 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/staging/wusbcore/cbaf.c
@@ -80,9 +80,9 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/uwb.h>
-#include <linux/usb/wusb.h>
-#include <linux/usb/association.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+#include "include/association.h"
#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/staging/wusbcore/crypto.c
index 9ee66483ee54..d7d55ed19a98 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/staging/wusbcore/crypto.c
@@ -38,10 +38,10 @@
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/err.h>
-#include <linux/uwb.h>
#include <linux/slab.h>
-#include <linux/usb/wusb.h>
#include <linux/scatterlist.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
static int debug_crypto_verify;
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/staging/wusbcore/dev-sysfs.c
index 67b0a4c412b2..67b0a4c412b2 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/staging/wusbcore/dev-sysfs.c
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/staging/wusbcore/devconnect.c
index a93837d57d53..1170f8baf608 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/staging/wusbcore/devconnect.c
@@ -49,7 +49,7 @@
* for processing a DN_Alive pong from a device.
*
* wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
- * process a disconenct request from a
+ * process a disconnect request from a
* device.
*
* __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
diff --git a/drivers/staging/wusbcore/host/Kconfig b/drivers/staging/wusbcore/host/Kconfig
new file mode 100644
index 000000000000..9a73f9360a08
--- /dev/null
+++ b/drivers/staging/wusbcore/host/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_WHCI_HCD
+ tristate "Wireless USB Host Controller Interface (WHCI) driver"
+ depends on USB_PCI && USB && UWB
+ select USB_WUSB
+ select UWB_WHCI
+ help
+ A driver for PCI-based Wireless USB Host Controllers that are
+ compliant with the WHCI specification.
+
+ To compile this driver a module, choose M here: the module
+ will be called "whci-hcd".
+
+config USB_HWA_HCD
+ tristate "Host Wire Adapter (HWA) driver"
+ depends on USB && UWB
+ select USB_WUSB
+ select UWB_HWA
+ help
+ This driver enables you to connect Wireless USB devices to
+ your system using a Host Wire Adaptor USB dongle. This is an
+ UWB Radio Controller and WUSB Host Controller connected to
+ your machine via USB (specified in WUSB1.0).
+
+ To compile this driver a module, choose M here: the module
+ will be called "hwa-hc".
+
diff --git a/drivers/staging/wusbcore/host/Makefile b/drivers/staging/wusbcore/host/Makefile
new file mode 100644
index 000000000000..d65ee8a73e21
--- /dev/null
+++ b/drivers/staging/wusbcore/host/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_USB_WHCI_HCD) += whci/
+obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/staging/wusbcore/host/hwa-hc.c
index 09a8ebd95588..8d959e91fe27 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/staging/wusbcore/host/hwa-hc.c
@@ -45,8 +45,8 @@
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/completion.h>
-#include "../wusbcore/wa-hc.h"
-#include "../wusbcore/wusbhc.h"
+#include "../wa-hc.h"
+#include "../wusbhc.h"
struct hwahc {
struct wusbhc wusbhc; /* has to be 1st */
@@ -159,7 +159,7 @@ out:
return result;
error_set_cluster_id:
- wusb_cluster_id_put(wusbhc->cluster_id);
+ wusb_cluster_id_put(addr);
error_cluster_id_get:
goto out;
diff --git a/drivers/usb/host/whci/Makefile b/drivers/staging/wusbcore/host/whci/Makefile
index 859d20079df6..859d20079df6 100644
--- a/drivers/usb/host/whci/Makefile
+++ b/drivers/staging/wusbcore/host/whci/Makefile
diff --git a/drivers/usb/host/whci/asl.c b/drivers/staging/wusbcore/host/whci/asl.c
index 276fb34c8efd..a2b9a50cfb80 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/staging/wusbcore/host/whci/asl.c
@@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
-#include <linux/uwb/umc.h>
#include <linux/usb.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/debug.c b/drivers/staging/wusbcore/host/whci/debug.c
index 8ddfe3f1f693..443da6719147 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/staging/wusbcore/host/whci/debug.c
@@ -10,7 +10,7 @@
#include <linux/seq_file.h>
#include <linux/export.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/staging/wusbcore/host/whci/hcd.c
index 8af9dcfea127..bee1ff2d35be 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/staging/wusbcore/host/whci/hcd.c
@@ -7,9 +7,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/uwb/umc.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/hw.c b/drivers/staging/wusbcore/host/whci/hw.c
index 22b3b7f7419d..e4e8914abf42 100644
--- a/drivers/usb/host/whci/hw.c
+++ b/drivers/staging/wusbcore/host/whci/hw.c
@@ -6,9 +6,9 @@
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
-#include <linux/uwb/umc.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/init.c b/drivers/staging/wusbcore/host/whci/init.c
index 82416973f773..55fd458a8f30 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/staging/wusbcore/host/whci/init.c
@@ -7,9 +7,9 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
-#include <linux/uwb/umc.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/int.c b/drivers/staging/wusbcore/host/whci/int.c
index 7e4ad1b8f3e3..bdbe35e9366f 100644
--- a/drivers/usb/host/whci/int.c
+++ b/drivers/staging/wusbcore/host/whci/int.c
@@ -5,9 +5,9 @@
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
-#include <linux/uwb/umc.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/staging/wusbcore/host/whci/pzl.c
index ef52aeb02fde..6dfc075f5798 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/staging/wusbcore/host/whci/pzl.c
@@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
-#include <linux/uwb/umc.h>
#include <linux/usb.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/qset.c b/drivers/staging/wusbcore/host/whci/qset.c
index 925166a207aa..66459b77dc77 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/staging/wusbcore/host/whci/qset.c
@@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/uwb/umc.h>
#include <linux/usb.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/staging/wusbcore/host/whci/whcd.h
index 139476997e7c..a442a2589e83 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/staging/wusbcore/host/whci/whcd.h
@@ -7,10 +7,10 @@
#ifndef __WHCD_H
#define __WHCD_H
-#include <linux/uwb/whci.h>
-#include <linux/uwb/umc.h>
#include <linux/workqueue.h>
+#include "../../../uwb/include/whci.h"
+#include "../../../uwb/include/umc.h"
#include "whci-hc.h"
/* Generic command timeout. */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/staging/wusbcore/host/whci/whci-hc.h
index 5a86a57a80cc..5a86a57a80cc 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/staging/wusbcore/host/whci/whci-hc.h
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/staging/wusbcore/host/whci/wusb.c
index 8a4d805ff63a..6d0068ab35e4 100644
--- a/drivers/usb/host/whci/wusb.c
+++ b/drivers/staging/wusbcore/host/whci/wusb.c
@@ -5,9 +5,9 @@
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
-#include <linux/uwb/umc.h>
-#include "../../wusbcore/wusbhc.h"
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
#include "whcd.h"
diff --git a/drivers/staging/wusbcore/include/association.h b/drivers/staging/wusbcore/include/association.h
new file mode 100644
index 000000000000..d7f3cb9b9db5
--- /dev/null
+++ b/drivers/staging/wusbcore/include/association.h
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB - Cable Based Association
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_USB_ASSOCIATION_H
+#define __LINUX_USB_ASSOCIATION_H
+
+
+/*
+ * Association attributes
+ *
+ * Association Models Supplement to WUSB 1.0 T[3-1]
+ *
+ * Each field in the structures has it's ID, it's length and then the
+ * value. This is the actual definition of the field's ID and its
+ * length.
+ */
+struct wusb_am_attr {
+ __u8 id;
+ __u8 len;
+};
+
+/* Different fields defined by the spec */
+#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
+#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
+#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
+#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
+#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
+#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
+#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
+#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
+#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
+
+/* CBAF Control Requests (AMS1.0[T4-1] */
+enum {
+ CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
+ CBAF_REQ_GET_ASSOCIATION_REQUEST,
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE
+};
+
+/*
+ * CBAF USB-interface defitions
+ *
+ * No altsettings, one optional interrupt endpoint.
+ */
+enum {
+ CBAF_IFACECLASS = 0xef,
+ CBAF_IFACESUBCLASS = 0x03,
+ CBAF_IFACEPROTOCOL = 0x01,
+};
+
+/* Association Information (AMS1.0[T4-3]) */
+struct wusb_cbaf_assoc_info {
+ __le16 Length;
+ __u8 NumAssociationRequests;
+ __le16 Flags;
+ __u8 AssociationRequestsArray[];
+} __attribute__((packed));
+
+/* Association Request (AMS1.0[T4-4]) */
+struct wusb_cbaf_assoc_request {
+ __u8 AssociationDataIndex;
+ __u8 Reserved;
+ __le16 AssociationTypeId;
+ __le16 AssociationSubTypeId;
+ __le32 AssociationTypeInfoSize;
+} __attribute__((packed));
+
+enum {
+ AR_TYPE_WUSB = 0x0001,
+ AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
+ AR_TYPE_WUSB_ASSOCIATE = 0x0001,
+};
+
+/* Association Attribute header (AMS1.0[3.8]) */
+struct wusb_cbaf_attr_hdr {
+ __le16 id;
+ __le16 len;
+} __attribute__((packed));
+
+/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
+struct wusb_cbaf_host_info {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr CHID_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
+ __u8 HostFriendlyName[];
+} __attribute__((packed));
+
+/* Device Info (AMS1.0[T4-8])
+ *
+ * I still don't get this tag'n'header stuff for each goddamn
+ * field...
+ */
+struct wusb_cbaf_device_info {
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr CDID_hdr;
+ struct wusb_ckhdid CDID;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
+ __u8 DeviceFriendlyName[];
+} __attribute__((packed));
+
+/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
+struct wusb_cbaf_cc_data {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_ckhdid CDID;
+ struct wusb_ckhdid CK;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+} __attribute__((packed));
+
+/* CC_DATA - Failure case (AMS1.0[T4-10]) */
+struct wusb_cbaf_cc_data_fail {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le16 Length;
+ struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
+ __u32 AssociationStatus;
+} __attribute__((packed));
+
+#endif /* __LINUX_USB_ASSOCIATION_H */
diff --git a/drivers/staging/wusbcore/include/wusb-wa.h b/drivers/staging/wusbcore/include/wusb-wa.h
new file mode 100644
index 000000000000..64a840b5106e
--- /dev/null
+++ b/drivers/staging/wusbcore/include/wusb-wa.h
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Wire Adapter constants and structures.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation.
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ *
+ * References:
+ * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
+ */
+#ifndef __LINUX_USB_WUSB_WA_H
+#define __LINUX_USB_WUSB_WA_H
+
+/**
+ * Radio Command Request for the Radio Control Interface
+ *
+ * Radio Control Interface command and event codes are the same as
+ * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
+ */
+enum {
+ WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
+};
+
+/* Wireless Adapter Requests ([WUSB] table 8-51) */
+enum {
+ WUSB_REQ_ADD_MMC_IE = 20,
+ WUSB_REQ_REMOVE_MMC_IE = 21,
+ WUSB_REQ_SET_NUM_DNTS = 22,
+ WUSB_REQ_SET_CLUSTER_ID = 23,
+ WUSB_REQ_SET_DEV_INFO = 24,
+ WUSB_REQ_GET_TIME = 25,
+ WUSB_REQ_SET_STREAM_IDX = 26,
+ WUSB_REQ_SET_WUSB_MAS = 27,
+ WUSB_REQ_CHAN_STOP = 28,
+};
+
+
+/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
+enum {
+ WUSB_TIME_ADJ = 0,
+ WUSB_TIME_BPST = 1,
+ WUSB_TIME_WUSB = 2,
+};
+
+enum {
+ WA_ENABLE = 0x01,
+ WA_RESET = 0x02,
+ RPIPE_PAUSE = 0x1,
+ RPIPE_STALL = 0x2,
+};
+
+/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
+enum {
+ WA_STATUS_ENABLED = 0x01,
+ WA_STATUS_RESETTING = 0x02
+};
+
+enum rpipe_crs {
+ RPIPE_CRS_CTL = 0x01,
+ RPIPE_CRS_ISO = 0x02,
+ RPIPE_CRS_BULK = 0x04,
+ RPIPE_CRS_INTR = 0x08
+};
+
+/**
+ * RPipe descriptor ([WUSB] section 8.5.2.11)
+ *
+ * FIXME: explain rpipes
+ */
+struct usb_rpipe_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 wRPipeIndex;
+ __le16 wRequests;
+ __le16 wBlocks; /* rw if 0 */
+ __le16 wMaxPacketSize; /* rw */
+ union {
+ u8 dwa_bHSHubAddress; /* rw: DWA. */
+ u8 hwa_bMaxBurst; /* rw: HWA. */
+ };
+ union {
+ u8 dwa_bHSHubPort; /* rw: DWA. */
+ u8 hwa_bDeviceInfoIndex; /* rw: HWA. */
+ };
+ u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
+ union {
+ u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */
+ u8 hwa_reserved; /* rw: HWA. */
+ };
+ u8 bEndpointAddress; /* rw: Target EP address */
+ u8 bDataSequence; /* ro: Current Data sequence */
+ __le32 dwCurrentWindow; /* ro */
+ u8 bMaxDataSequence; /* ro?: max supported seq */
+ u8 bInterval; /* rw: */
+ u8 bOverTheAirInterval; /* rw: */
+ u8 bmAttribute; /* ro? */
+ u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
+ u8 bmRetryOptions; /* rw? */
+ __le16 wNumTransactionErrors; /* rw */
+} __attribute__ ((packed));
+
+/**
+ * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
+ *
+ * These are the notifications coming on the notification endpoint of
+ * an HWA and a DWA.
+ */
+enum wa_notif_type {
+ DWA_NOTIF_RWAKE = 0x91,
+ DWA_NOTIF_PORTSTATUS = 0x92,
+ WA_NOTIF_TRANSFER = 0x93,
+ HWA_NOTIF_BPST_ADJ = 0x94,
+ HWA_NOTIF_DN = 0x95,
+};
+
+/**
+ * Wire Adapter notification header
+ *
+ * Notifications coming from a wire adapter use a common header
+ * defined in [WUSB] sections 8.4.5 & 8.5.4.
+ */
+struct wa_notif_hdr {
+ u8 bLength;
+ u8 bNotifyType; /* enum wa_notif_type */
+} __packed;
+
+/**
+ * HWA DN Received notification [(WUSB] section 8.5.4.2)
+ *
+ * The DNData is specified in WUSB1.0[7.6]. For each device
+ * notification we received, we just need to dispatch it.
+ *
+ * @dndata: this is really an array of notifications, but all start
+ * with the same header.
+ */
+struct hwa_notif_dn {
+ struct wa_notif_hdr hdr;
+ u8 bSourceDeviceAddr; /* from errata 2005/07 */
+ u8 bmAttributes;
+ struct wusb_dn_hdr dndata[];
+} __packed;
+
+/* [WUSB] section 8.3.3 */
+enum wa_xfer_type {
+ WA_XFER_TYPE_CTL = 0x80,
+ WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
+ WA_XFER_TYPE_ISO = 0x82,
+ WA_XFER_RESULT = 0x83,
+ WA_XFER_ABORT = 0x84,
+ WA_XFER_ISO_PACKET_INFO = 0xA0,
+ WA_XFER_ISO_PACKET_STATUS = 0xA1,
+};
+
+/* [WUSB] section 8.3.3 */
+struct wa_xfer_hdr {
+ u8 bLength; /* 0x18 */
+ u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+ __le32 dwTransferLength; /* Length of data to xfer */
+ u8 bTransferSegment;
+} __packed;
+
+struct wa_xfer_ctl {
+ struct wa_xfer_hdr hdr;
+ u8 bmAttribute;
+ __le16 wReserved;
+ struct usb_ctrlrequest baSetupData;
+} __packed;
+
+struct wa_xfer_bi {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wReserved;
+} __packed;
+
+/* [WUSB] section 8.5.5 */
+struct wa_xfer_hwaiso {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wPresentationTime;
+ __le32 dwNumOfPackets;
+} __packed;
+
+struct wa_xfer_packet_info_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ __le16 PacketLength[0];
+} __packed;
+
+struct wa_xfer_packet_status_len_hwaiso {
+ __le16 PacketLength;
+ __le16 PacketStatus;
+} __packed;
+
+struct wa_xfer_packet_status_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
+} __packed;
+
+/* [WUSB] section 8.3.3.5 */
+struct wa_xfer_abort {
+ u8 bLength;
+ u8 bRequestType;
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+} __packed;
+
+/**
+ * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
+ *
+ */
+struct wa_notif_xfer {
+ struct wa_notif_hdr hdr;
+ u8 bEndpoint;
+ u8 Reserved;
+} __packed;
+
+/** Transfer result basic codes [WUSB] table 8-15 */
+enum {
+ WA_XFER_STATUS_SUCCESS,
+ WA_XFER_STATUS_HALTED,
+ WA_XFER_STATUS_DATA_BUFFER_ERROR,
+ WA_XFER_STATUS_BABBLE,
+ WA_XFER_RESERVED,
+ WA_XFER_STATUS_NOT_FOUND,
+ WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
+ WA_XFER_STATUS_TRANSACTION_ERROR,
+ WA_XFER_STATUS_ABORTED,
+ WA_XFER_STATUS_RPIPE_NOT_READY,
+ WA_XFER_INVALID_FORMAT,
+ WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
+ WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
+};
+
+/** [WUSB] section 8.3.3.4 */
+struct wa_xfer_result {
+ struct wa_notif_hdr hdr;
+ __le32 dwTransferID;
+ __le32 dwTransferLength;
+ u8 bTransferSegment;
+ u8 bTransferStatus;
+ __le32 dwNumOfPackets;
+} __packed;
+
+/**
+ * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
+ *
+ * NOTE: u16 fields are read Little Endian from the hardware.
+ *
+ * @bNumPorts is the original max number of devices that the host can
+ * connect; we might chop this so the stack can handle
+ * it. In case you need to access it, use wusbhc->ports_max
+ * if it is a Wireless USB WA.
+ */
+struct usb_wa_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdWAVersion;
+ u8 bNumPorts; /* don't use!! */
+ u8 bmAttributes; /* Reserved == 0 */
+ __le16 wNumRPipes;
+ __le16 wRPipeMaxBlock;
+ u8 bRPipeBlockSize;
+ u8 bPwrOn2PwrGood;
+ u8 bNumMMCIEs;
+ u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
+} __packed;
+
+/**
+ * HWA Device Information Buffer (WUSB1.0[T8.54])
+ */
+struct hwa_dev_info {
+ u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
+ u8 bDeviceAddress;
+ __le16 wPHYRates;
+ u8 bmDeviceAttribute;
+} __packed;
+
+#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/drivers/staging/wusbcore/include/wusb.h b/drivers/staging/wusbcore/include/wusb.h
new file mode 100644
index 000000000000..09771d1da7bc
--- /dev/null
+++ b/drivers/staging/wusbcore/include/wusb.h
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Standard Definitions
+ * Event Size Tables
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ */
+
+#ifndef __WUSB_H__
+#define __WUSB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/usb/ch9.h>
+#include <linux/param.h>
+#include "../../uwb/include/spec.h"
+
+/**
+ * WUSB Information Element header
+ *
+ * I don't know why, they decided to make it different to the MBOA MAC
+ * IE Header; beats me.
+ */
+struct wuie_hdr {
+ u8 bLength;
+ u8 bIEIdentifier;
+} __attribute__((packed));
+
+enum {
+ WUIE_ID_WCTA = 0x80,
+ WUIE_ID_CONNECTACK,
+ WUIE_ID_HOST_INFO,
+ WUIE_ID_CHANGE_ANNOUNCE,
+ WUIE_ID_DEVICE_DISCONNECT,
+ WUIE_ID_HOST_DISCONNECT,
+ WUIE_ID_KEEP_ALIVE = 0x89,
+ WUIE_ID_ISOCH_DISCARD,
+ WUIE_ID_RESET_DEVICE,
+};
+
+/**
+ * Maximum number of array elements in a WUSB IE.
+ *
+ * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
+ * are "arrays" have to limited to 4 elements. So we define it
+ * like that to ease up and submit only the neeed size.
+ */
+#define WUIE_ELT_MAX 4
+
+/**
+ * Wrapper for the data that defines a CHID, a CDID or a CK
+ *
+ * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
+ * data. In order to avoid confusion and enforce types, we wrap it.
+ *
+ * Make it packed, as we use it in some hw definitions.
+ */
+struct wusb_ckhdid {
+ u8 data[16];
+} __attribute__((packed));
+
+static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+
+#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
+
+/**
+ * WUSB IE: Host Information (WUSB1.0[7.5.2])
+ *
+ * Used to provide information about the host to the Wireless USB
+ * devices in range (CHID can be used as an ASCII string).
+ */
+struct wuie_host_info {
+ struct wuie_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CHID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
+ *
+ * Used to acknowledge device connect requests. See note for
+ * WUIE_ELT_MAX.
+ */
+struct wuie_connect_ack {
+ struct wuie_hdr hdr;
+ struct {
+ struct wusb_ckhdid CDID;
+ u8 bDeviceAddress; /* 0 means unused */
+ u8 bReserved;
+ } blk[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE Host Information Element, Connect Availability
+ *
+ * WUSB1.0[7.5.2], bmAttributes description
+ */
+enum {
+ WUIE_HI_CAP_RECONNECT = 0,
+ WUIE_HI_CAP_LIMITED,
+ WUIE_HI_CAP_RESERVED,
+ WUIE_HI_CAP_ALL,
+};
+
+/**
+ * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
+ *
+ * Tells devices the host is going to stop sending MMCs and will disappear.
+ */
+struct wuie_channel_stop {
+ struct wuie_hdr hdr;
+ u8 attributes;
+ u8 timestamp[3];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Keepalive (WUSB1.0[7.5.9])
+ *
+ * Ask device(s) to send keepalives.
+ */
+struct wuie_keep_alive {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Reset device (WUSB1.0[7.5.11])
+ *
+ * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
+ * use it for one at the time...
+ *
+ * In any case, this request is a wee bit silly: why don't they target
+ * by address??
+ */
+struct wuie_reset {
+ struct wuie_hdr hdr;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
+ *
+ * Tell device to disconnect; we can fit 4 addresses, but we only use
+ * it for one at the time...
+ */
+struct wuie_disconnect {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress;
+ u8 padding;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
+ *
+ * Tells all connected devices to disconnect.
+ */
+struct wuie_host_disconnect {
+ struct wuie_hdr hdr;
+} __attribute__((packed));
+
+/**
+ * WUSB Device Notification header (WUSB1.0[7.6])
+ */
+struct wusb_dn_hdr {
+ u8 bType;
+ u8 notifdata[];
+} __attribute__((packed));
+
+/** Device Notification codes (WUSB1.0[Table 7-54]) */
+enum WUSB_DN {
+ WUSB_DN_CONNECT = 0x01,
+ WUSB_DN_DISCONNECT = 0x02,
+ WUSB_DN_EPRDY = 0x03,
+ WUSB_DN_MASAVAILCHANGED = 0x04,
+ WUSB_DN_RWAKE = 0x05,
+ WUSB_DN_SLEEP = 0x06,
+ WUSB_DN_ALIVE = 0x07,
+};
+
+/** WUSB Device Notification Connect */
+struct wusb_dn_connect {
+ struct wusb_dn_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
+{
+ return le16_to_cpu(dn->attributes) & 0xff;
+}
+
+static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
+}
+
+static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
+}
+
+/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
+struct wusb_dn_alive {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/** Device is disconnecting (WUSB1.0[7.6.2]) */
+struct wusb_dn_disconnect {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/* General constants */
+enum {
+ WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
+};
+
+/*
+ * WUSB Crypto stuff (WUSB1.0[6])
+ */
+
+extern const char *wusb_et_name(u8);
+
+/**
+ * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
+ * the host or the device.
+ */
+static inline u8 wusb_key_index(int index, int type, int originator)
+{
+ return (originator << 6) | (type << 4) | index;
+}
+
+#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
+#define WUSB_KEY_INDEX_TYPE_ASSOC 1
+#define WUSB_KEY_INDEX_TYPE_GTK 2
+#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
+#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+/* bits 0-3 used for the key index. */
+#define WUSB_KEY_INDEX_MAX 15
+
+/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
+struct aes_ccm_nonce {
+ u8 sfn[6]; /* Little Endian */
+ u8 tkid[3]; /* LE */
+ struct uwb_dev_addr dest_addr;
+ struct uwb_dev_addr src_addr;
+} __attribute__((packed));
+
+/* A CCM operation label, defined on WUSB1.0[6.5.x] */
+struct aes_ccm_label {
+ u8 data[14];
+} __attribute__((packed));
+
+/*
+ * Input to the key derivation sequence defined in
+ * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
+ * PRF function.
+ */
+struct wusb_keydvt_in {
+ u8 hnonce[16];
+ u8 dnonce[16];
+} __attribute__((packed));
+
+/*
+ * Output from the key derivation sequence defined in
+ * WUSB1.0[6.5.1].
+ */
+struct wusb_keydvt_out {
+ u8 kck[16];
+ u8 ptk[16];
+} __attribute__((packed));
+
+/* Pseudo Random Function WUSB1.0[6.5] */
+extern int wusb_crypto_init(void);
+extern void wusb_crypto_exit(void);
+extern ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len);
+
+static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 64);
+}
+
+static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 128);
+}
+
+static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 256);
+}
+
+/* Key derivation WUSB1.0[6.5.1] */
+static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
+ const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct wusb_keydvt_in *keydvt_in)
+{
+ const struct aes_ccm_label a = { .data = "Pair-wise keys" };
+ return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
+ keydvt_in, sizeof(*keydvt_in));
+}
+
+/*
+ * Out-of-band MIC Generation WUSB1.0[6.5.2]
+ *
+ * Compute the MIC over @key, @n and @hs and place it in @mic_out.
+ *
+ * @mic_out: Where to place the 8 byte MIC tag
+ * @key: KCK from the derivation process
+ * @n: CCM nonce, n->sfn == 0, TKID as established in the
+ * process.
+ * @hs: Handshake struct for phase 2 of the 4-way.
+ * hs->bStatus and hs->bReserved are zero.
+ * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
+ * hs->dest_addr is the device's USB address padded with 0
+ * hs->src_addr is the hosts's UWB device address
+ * hs->mic is ignored (as we compute that value).
+ */
+static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct usb_handshake *hs)
+{
+ const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
+ return wusb_prf_64(mic_out, 8, key, n, &a,
+ hs, sizeof(*hs) - sizeof(hs->MIC));
+}
+
+#endif /* #ifndef __WUSB_H__ */
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/staging/wusbcore/mmc.c
index acce0d551eb2..881e1f20d718 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/staging/wusbcore/mmc.c
@@ -22,9 +22,9 @@
* FIXME:
* - add timers that autoremove intervalled IEs?
*/
-#include <linux/usb/wusb.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include "include/wusb.h"
#include "wusbhc.h"
/* Initialize the MMCIEs handling mechanism */
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/staging/wusbcore/pal.c
index 30f569131471..30f569131471 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/staging/wusbcore/pal.c
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/staging/wusbcore/reservation.c
index 6dcfc6825f55..b921faac698b 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/staging/wusbcore/reservation.c
@@ -5,8 +5,8 @@
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*/
#include <linux/kernel.h>
-#include <linux/uwb.h>
+#include "../uwb/uwb.h"
#include "wusbhc.h"
/*
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/staging/wusbcore/rh.c
index 20c08cd9dcbf..20c08cd9dcbf 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/staging/wusbcore/rh.c
diff --git a/drivers/usb/wusbcore/security.c b/drivers/staging/wusbcore/security.c
index 14ac8c98ac9e..14ac8c98ac9e 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/staging/wusbcore/security.c
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/staging/wusbcore/wa-hc.c
index 6827075fb8a1..6827075fb8a1 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/staging/wusbcore/wa-hc.c
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/staging/wusbcore/wa-hc.h
index ec90fff21deb..5a38465724c2 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/staging/wusbcore/wa-hc.h
@@ -70,9 +70,9 @@
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/uwb.h>
-#include <linux/usb/wusb.h>
-#include <linux/usb/wusb-wa.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+#include "include/wusb-wa.h"
struct wusbhc;
struct wahc;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/staging/wusbcore/wa-nep.c
index 5f0656db5482..5f0656db5482 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/staging/wusbcore/wa-nep.c
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/staging/wusbcore/wa-rpipe.c
index a5734cbcd5ad..a5734cbcd5ad 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/staging/wusbcore/wa-rpipe.c
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/staging/wusbcore/wa-xfer.c
index abf88cea37bb..abf88cea37bb 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/staging/wusbcore/wa-xfer.c
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/staging/wusbcore/wusbhc.c
index d0b404d258e8..d0b404d258e8 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/staging/wusbcore/wusbhc.c
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/staging/wusbcore/wusbhc.h
index 7681d796ca5b..716244a2ec44 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/staging/wusbcore/wusbhc.h
@@ -45,8 +45,8 @@
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <linux/usb/hcd.h>
-#include <linux/uwb.h>
-#include <linux/usb/wusb.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
/*
* Time from a WUSB channel stop request to the last transmitted MMC.
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 22dd4c457d6a..c70caf4ea490 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
return 0;
if (caps & DCB_CAP_DCBX_VER_IEEE) {
- iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
-
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
-
+ if (!ret) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ }
} else if (caps & DCB_CAP_DCBX_VER_CEE) {
iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 343b129c2cfa..e877b917c15f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work)
iscsi_app = &dcb_work->dcb_app;
if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
- if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
+ (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
goto out;
priority = iscsi_app->app.priority;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
- uint32_t read_len = se_cmd->data_length;
+ uint32_t read_len;
/*
* cmd has been completed already from timeout, just reclaim
* data area space and free cmd
*/
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON_ONCE(se_cmd);
goto out;
+ }
list_del_init(&cmd->queue_entry);
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
goto done;
}
+ read_len = se_cmd->data_length;
if (se_cmd->data_direction == DMA_FROM_DEVICE &&
(entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd = NULL;
} else {
list_del_init(&cmd->queue_entry);
idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index aa942703ae65..13b0269a0abc 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -148,6 +148,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
*/
optee_cq_wait_for_completion(&optee->call_queue, &w);
} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ might_sleep();
param.a0 = res.a0;
param.a1 = res.a1;
param.a2 = res.a2;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 4c5db59a619b..391f39776c6a 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
@@ -66,8 +67,6 @@ struct time_in_idle {
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @clipped_freq: integer value representing the absolute value of the clipped
- * frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @freq_table: Freq table in descending order of frequencies
@@ -84,12 +83,12 @@ struct cpufreq_cooling_device {
int id;
u32 last_load;
unsigned int cpufreq_state;
- unsigned int clipped_freq;
unsigned int max_level;
struct freq_table *freq_table; /* In descending order */
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
+ struct dev_pm_qos_request qos_req;
};
static DEFINE_IDA(cpufreq_ida);
@@ -119,59 +118,6 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
}
/**
- * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
- * @nb: struct notifier_block * with callback info.
- * @event: value showing cpufreq event for which this function invoked.
- * @data: callback-specific data
- *
- * Callback to hijack the notification on cpufreq policy transition.
- * Every time there is a change in policy, we will intercept and
- * update the cpufreq policy with thermal constraints.
- *
- * Return: 0 (success)
- */
-static int cpufreq_thermal_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct cpufreq_policy *policy = data;
- unsigned long clipped_freq;
- struct cpufreq_cooling_device *cpufreq_cdev;
-
- if (event != CPUFREQ_ADJUST)
- return NOTIFY_DONE;
-
- mutex_lock(&cooling_list_lock);
- list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
- /*
- * A new copy of the policy is sent to the notifier and can't
- * compare that directly.
- */
- if (policy->cpu != cpufreq_cdev->policy->cpu)
- continue;
-
- /*
- * policy->max is the maximum allowed frequency defined by user
- * and clipped_freq is the maximum that thermal constraints
- * allow.
- *
- * If clipped_freq is lower than policy->max, then we need to
- * readjust policy->max.
- *
- * But, if clipped_freq is greater than policy->max, we don't
- * need to do anything.
- */
- clipped_freq = cpufreq_cdev->clipped_freq;
-
- if (policy->max > clipped_freq)
- cpufreq_verify_within_limits(policy, 0, clipped_freq);
- break;
- }
- mutex_unlock(&cooling_list_lock);
-
- return NOTIFY_OK;
-}
-
-/**
* update_freq_table() - Update the freq table with power numbers
* @cpufreq_cdev: the cpufreq cooling device in which to update the table
* @capacitance: dynamic power coefficient for these cpus
@@ -374,7 +320,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- unsigned int clip_freq;
/* Request state should be less than max_level */
if (WARN_ON(state > cpufreq_cdev->max_level))
@@ -384,13 +329,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
if (cpufreq_cdev->cpufreq_state == state)
return 0;
- clip_freq = cpufreq_cdev->freq_table[state].frequency;
cpufreq_cdev->cpufreq_state = state;
- cpufreq_cdev->clipped_freq = clip_freq;
-
- cpufreq_update_policy(cpufreq_cdev->policy->cpu);
- return 0;
+ return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+ cpufreq_cdev->freq_table[state].frequency);
}
/**
@@ -554,11 +496,6 @@ static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
.power2state = cpufreq_power2state,
};
-/* Notifier for cpufreq policy change */
-static struct notifier_block thermal_cpufreq_notifier_block = {
- .notifier_call = cpufreq_thermal_notifier,
-};
-
static unsigned int find_next_max(struct cpufreq_frequency_table *table,
unsigned int prev_max)
{
@@ -596,9 +533,16 @@ __cpufreq_cooling_register(struct device_node *np,
struct cpufreq_cooling_device *cpufreq_cdev;
char dev_name[THERMAL_NAME_LENGTH];
unsigned int freq, i, num_cpus;
+ struct device *dev;
int ret;
struct thermal_cooling_device_ops *cooling_ops;
- bool first;
+
+ dev = get_cpu_device(policy->cpu);
+ if (unlikely(!dev)) {
+ pr_warn("No cpu device for cpu %d\n", policy->cpu);
+ return ERR_PTR(-ENODEV);
+ }
+
if (IS_ERR_OR_NULL(policy)) {
pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
@@ -671,25 +615,29 @@ __cpufreq_cooling_register(struct device_node *np,
cooling_ops = &cpufreq_cooling_ops;
}
+ ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ cpufreq_cdev->freq_table[0].frequency);
+ if (ret < 0) {
+ pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+ ret);
+ cdev = ERR_PTR(ret);
+ goto remove_ida;
+ }
+
cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
cooling_ops);
if (IS_ERR(cdev))
- goto remove_ida;
-
- cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
+ goto remove_qos_req;
mutex_lock(&cooling_list_lock);
- /* Register the notifier for first cpufreq cooling device */
- first = list_empty(&cpufreq_cdev_list);
list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
- if (first)
- cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
-
return cdev;
+remove_qos_req:
+ dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
@@ -777,7 +725,6 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
struct cpufreq_cooling_device *cpufreq_cdev;
- bool last;
if (!cdev)
return;
@@ -786,15 +733,10 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_cdev->node);
- /* Unregister the notifier for the last cpufreq cooling device */
- last = list_empty(&cpufreq_cdev_list);
mutex_unlock(&cooling_list_lock);
- if (last)
- cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
-
thermal_cooling_device_unregister(cdev);
+ dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 213ab3cc6b80..d3446acf9bbd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep);
if (ret < 0) {
powercap_unregister_control_type(rapl_mmio_priv.control_type);
+ rapl_mmio_priv.control_type = NULL;
return ret;
}
rapl_mmio_priv.pcap_rapl_online = ret;
@@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
static void proc_thermal_rapl_remove(void)
{
+ if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type))
+ return;
+
cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online);
powercap_unregister_control_type(rapl_mmio_priv.control_type);
}
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 5149a817456b..53216dcbe173 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -430,7 +430,7 @@ static void clamp_idle_injection_func(struct kthread_work *work)
if (should_skip)
goto balance;
- play_idle(jiffies_to_msecs(w_data->duration_jiffies));
+ play_idle(jiffies_to_usecs(w_data->duration_jiffies));
balance:
if (clamping && w_data->clamping && cpu_online(w_data->cpu))
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 3f55cb3c81b2..001187c577bf 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
-thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
+thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2427d73be731..2ec1af8f7968 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -930,6 +930,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
return res;
}
+static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
+ const struct tb_cfg_result *res)
+{
+ /*
+ * For unimplemented ports access to port config space may return
+ * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
+ * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
+ * that the caller can mark the port as disabled.
+ */
+ if (space == TB_CFG_PORT &&
+ res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
+ return -ENODEV;
+
+ tb_cfg_print_error(ctl, res);
+ return -EIO;
+}
+
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -942,8 +959,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
case 1:
/* Thunderbolt error, tb_error holds the actual number */
- tb_cfg_print_error(ctl, &res);
- return -EIO;
+ return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
@@ -969,8 +985,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
case 1:
/* Thunderbolt error, tb_error holds the actual number */
- tb_cfg_print_error(ctl, &res);
- return -EIO;
+ return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 81e8ac4c5805..ee5196479854 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -414,7 +414,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
struct device *dev = &sw->tb->nhi->pdev->dev;
int len, res;
- len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+ len = device_property_count_u8(dev, "ThunderboltDROM");
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
@@ -525,10 +525,6 @@ int tb_drom_read(struct tb_switch *sw)
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].dual_link_port = &sw->ports[3];
- /* Port 5 is inaccessible on this gen 1 controller */
- if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
- sw->ports[5].disabled = true;
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fbdcef56a676..245588f691e7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -55,16 +55,20 @@
* @safe_mode: ICM is in safe mode
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
* @rpm: Does the controller support runtime PM (RTD3)
+ * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
+ * @veto: Is RTD3 veto in effect
* @is_supported: Checks if we can support ICM on this controller
* @cio_reset: Trigger CIO reset
* @get_mode: Read and return the ICM firmware mode (optional)
* @get_route: Find a route string for given switch
* @save_devices: Ask ICM to save devices to ACL when suspending (optional)
* @driver_ready: Send driver ready message to ICM
+ * @set_uuid: Set UUID for the root switch (optional)
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
* @xdomain_connected - Handle XDomain connected ICM message
* @xdomain_disconnected - Handle XDomain disconnected ICM message
+ * @rtd3_veto: Handle RTD3 veto notification ICM message
*/
struct icm {
struct mutex request_lock;
@@ -74,6 +78,8 @@ struct icm {
int vnd_cap;
bool safe_mode;
bool rpm;
+ bool can_upgrade_nvm;
+ bool veto;
bool (*is_supported)(struct tb *tb);
int (*cio_reset)(struct tb *tb);
int (*get_mode)(struct tb *tb);
@@ -82,6 +88,7 @@ struct icm {
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
size_t *nboot_acl, bool *rpm);
+ void (*set_uuid)(struct tb *tb);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
@@ -90,6 +97,7 @@ struct icm {
const struct icm_pkg_header *hdr);
void (*xdomain_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
+ void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
};
struct icm_notification {
@@ -294,6 +302,43 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size,
return -ETIMEDOUT;
}
+/*
+ * If rescan is queued to run (we are resuming), postpone it to give the
+ * firmware some more time to send device connected notifications for next
+ * devices in the chain.
+ */
+static void icm_postpone_rescan(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (delayed_work_pending(&icm->rescan_work))
+ mod_delayed_work(tb->wq, &icm->rescan_work,
+ msecs_to_jiffies(500));
+}
+
+static void icm_veto_begin(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (!icm->veto) {
+ icm->veto = true;
+ /* Keep the domain powered while veto is in effect */
+ pm_runtime_get(&tb->dev);
+ }
+}
+
+static void icm_veto_end(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (icm->veto) {
+ icm->veto = false;
+ /* Allow the domain suspend now */
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+ }
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -517,14 +562,16 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static void add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id, u8 connection_key,
- u8 link, u8 depth, enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid, const u8 *ep_name,
+ size_t ep_name_size, u8 connection_id,
+ u8 connection_key, u8 link, u8 depth,
+ enum tb_security_level security_level,
+ bool authorized, bool boot)
{
const struct intel_vss *vss;
struct tb_switch *sw;
+ int ret;
pm_runtime_get_sync(&parent_sw->dev);
@@ -555,14 +602,18 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
- if (tb_switch_add(sw)) {
+ ret = tb_switch_add(sw);
+ if (ret) {
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_put(sw);
+ sw = ERR_PTR(ret);
}
out:
pm_runtime_mark_last_busy(&parent_sw->dev);
pm_runtime_put_autosuspend(&parent_sw->dev);
+
+ return sw;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -654,6 +705,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u64 route;
int ret;
+ icm_postpone_rescan(tb);
+
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1084,7 +1137,8 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
}
static void
-icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
+ bool force_rtd3)
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
@@ -1094,6 +1148,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
bool authorized, boot;
u64 route;
+ icm_postpone_rescan(tb);
+
/*
* Currently we don't use the QoS information coming with the
* device connected message so simply just ignore that extra
@@ -1149,14 +1205,22 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- 0, 0, 0, security_level, authorized, boot);
+ sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
+ security_level, authorized, boot);
+ if (!IS_ERR(sw) && force_rtd3)
+ sw->rpm = true;
tb_switch_put(parent_sw);
}
static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, false);
+}
+
+static void
icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_tr_event_device_disconnected *pkg =
@@ -1466,6 +1530,61 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
return 0;
}
+static int
+icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_tr_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, 20000);
+ if (ret)
+ return ret;
+
+ /* Ice Lake always supports RTD3 */
+ if (rpm)
+ *rpm = true;
+
+ return 0;
+}
+
+static void icm_icl_set_uuid(struct tb *tb)
+{
+ struct tb_nhi *nhi = tb->nhi;
+ u32 uuid[4];
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
+ pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+
+ tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static void
+icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, true);
+}
+
+static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_icl_event_rtd3_veto *pkg =
+ (const struct icm_icl_event_rtd3_veto *)hdr;
+
+ tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
+
+ if (pkg->veto_reason)
+ icm_veto_begin(tb);
+ else
+ icm_veto_end(tb);
+}
+
static void icm_handle_notification(struct work_struct *work)
{
struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -1493,6 +1612,9 @@ static void icm_handle_notification(struct work_struct *work)
case ICM_EVENT_XDOMAIN_DISCONNECTED:
icm->xdomain_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_RTD3_VETO:
+ icm->rtd3_veto(tb, n->pkg);
+ break;
}
}
@@ -1851,6 +1973,13 @@ static void icm_complete(struct tb *tb)
if (tb->nhi->going_away)
return;
+ /*
+ * If RTD3 was vetoed before we entered system suspend allow it
+ * again now before driver ready is sent. Firmware sends a new RTD3
+ * veto if it is still the case after we have sent it driver ready
+ * command.
+ */
+ icm_veto_end(tb);
icm_unplug_children(tb->root_switch);
/*
@@ -1913,14 +2042,12 @@ static int icm_start(struct tb *tb)
if (IS_ERR(tb->root_switch))
return PTR_ERR(tb->root_switch);
- /*
- * NVM upgrade has not been tested on Apple systems and they
- * don't provide images publicly either. To be on the safe side
- * prevent root switch NVM upgrade on Macs for now.
- */
- tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+ tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
tb->root_switch->rpm = icm->rpm;
+ if (icm->set_uuid)
+ icm->set_uuid(tb);
+
ret = tb_switch_add(tb->root_switch);
if (ret) {
tb_switch_put(tb->root_switch);
@@ -2005,6 +2132,19 @@ static const struct tb_cm_ops icm_tr_ops = {
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
};
+/* Ice Lake */
+static const struct tb_cm_ops icm_icl_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
+ .handle_event = icm_handle_event,
+ .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
struct tb *icm_probe(struct tb_nhi *nhi)
{
struct icm *icm;
@@ -2021,6 +2161,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ icm->can_upgrade_nvm = true;
icm->is_supported = icm_fr_is_supported;
icm->get_route = icm_fr_get_route;
icm->save_devices = icm_fr_save_devices;
@@ -2038,6 +2179,13 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ /*
+ * NVM upgrade has not been tested on Apple systems and
+ * they don't provide images publicly either. To be on
+ * the safe side prevent root switch NVM upgrade on Macs
+ * for now.
+ */
+ icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_ar_cio_reset;
icm->get_mode = icm_ar_get_mode;
@@ -2054,6 +2202,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_tr_cio_reset;
icm->get_mode = icm_ar_get_mode;
@@ -2064,6 +2213,19 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
tb->cm_ops = &icm_tr_ops;
break;
+
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ icm->is_supported = icm_ar_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 27fbe62c7ddd..641b21b54460 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/property.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
- iowrite16(value, ring_desc_base(ring) + offset);
+ /*
+ * The other 16-bits in the register is read-only and writes to it
+ * are ignored by the hardware so we can save one ioread32() by
+ * filling the read-only bits with zeroes.
+ */
+ iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+ /* See ring_iowrite_cons() above for explanation */
+ iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+ if (ring->is_tx)
+ ring_iowrite_prod(ring, ring->head);
+ else
+ ring_iowrite_cons(ring, ring->head);
}
}
@@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+ ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
@@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data)
return IRQ_HANDLED;
}
-static int nhi_suspend_noirq(struct device *dev)
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_suspend_noirq(tb);
+ if (ret)
+ return ret;
+
+ if (nhi->ops && nhi->ops->suspend_noirq) {
+ ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+ return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
+}
+
+static bool nhi_wake_supported(struct pci_dev *pdev)
+{
+ u8 val;
+
+ /*
+ * If power rails are sustainable for wakeup from S4 this
+ * property is set by the BIOS.
+ */
+ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+ return !!val;
+
+ return true;
+}
+
+static int nhi_poweroff_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool wakeup;
- return tb_domain_suspend_noirq(tb);
+ wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
+ return __nhi_suspend_noirq(dev, wakeup);
}
static void nhi_enable_int_throttling(struct tb_nhi *nhi)
@@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
/*
* Check that the device is still there. It may be that the user
* unplugged last device which causes the host controller to go
* away on PCs.
*/
- if (!pci_device_is_present(pdev))
- tb->nhi->going_away = true;
- else
+ if (!pci_device_is_present(pdev)) {
+ nhi->going_away = true;
+ } else {
+ if (nhi->ops && nhi->ops->resume_noirq) {
+ ret = nhi->ops->resume_noirq(nhi);
+ if (ret)
+ return ret;
+ }
nhi_enable_int_throttling(tb->nhi);
+ }
return tb_domain_resume_noirq(tb);
}
@@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_runtime_suspend(tb);
+ if (ret)
+ return ret;
- return tb_domain_runtime_suspend(tb);
+ if (nhi->ops && nhi->ops->runtime_suspend) {
+ ret = nhi->ops->runtime_suspend(tb->nhi);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int nhi_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
- nhi_enable_int_throttling(tb->nhi);
+ if (nhi->ops && nhi->ops->runtime_resume) {
+ ret = nhi->ops->runtime_resume(nhi);
+ if (ret)
+ return ret;
+ }
+
+ nhi_enable_int_throttling(nhi);
return tb_domain_runtime_resume(tb);
}
@@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi)
flush_work(&nhi->interrupt_work);
}
ida_destroy(&nhi->msix_ida);
+
+ if (nhi->ops && nhi->ops->shutdown)
+ nhi->ops->shutdown(nhi);
}
static int nhi_init_msi(struct tb_nhi *nhi)
@@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi)
return 0;
}
+static bool nhi_imr_valid(struct pci_dev *pdev)
+{
+ u8 val;
+
+ if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
+ return !!val;
+
+ return true;
+}
+
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
struct tb *tb;
int res;
+ if (!nhi_imr_valid(pdev)) {
+ dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
+ return -ENODEV;
+ }
+
res = pcim_enable_device(pdev);
if (res) {
dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
@@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
nhi->pdev = pdev;
+ nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated bin pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
@@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ if (nhi->ops && nhi->ops->init) {
+ res = nhi->ops->init(nhi);
+ if (res)
+ return res;
+ }
+
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
@@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
.freeze = nhi_suspend,
+ .poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
.runtime_suspend = nhi_runtime_suspend,
@@ -1158,6 +1266,10 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ 0,}
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1b5d47ecd3ed..b7b973949f8e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -30,6 +30,26 @@ enum nhi_mailbox_cmd {
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+/**
+ * struct tb_nhi_ops - NHI specific optional operations
+ * @init: NHI specific initialization
+ * @suspend_noirq: NHI specific suspend_noirq hook
+ * @resume_noirq: NHI specific resume_noirq hook
+ * @runtime_suspend: NHI specific runtime_suspend hook
+ * @runtime_resume: NHI specific runtime_resume hook
+ * @shutdown: NHI specific shutdown
+ */
+struct tb_nhi_ops {
+ int (*init)(struct tb_nhi *nhi);
+ int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
+ int (*resume_noirq)(struct tb_nhi *nhi);
+ int (*runtime_suspend)(struct tb_nhi *nhi);
+ int (*runtime_resume)(struct tb_nhi *nhi);
+ void (*shutdown)(struct tb_nhi *nhi);
+};
+
+extern const struct tb_nhi_ops icl_nhi_ops;
+
/*
* PCI IDs used in this driver from Win Ridge forward. There is no
* need for the PCI quirk anymore as we will use ICM also on Apple
@@ -51,5 +71,7 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
+#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
new file mode 100644
index 000000000000..61cd09cef943
--- /dev/null
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHI specific operations
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+/* Ice Lake specific NHI operations */
+
+#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
+
+static int check_for_device(struct device *dev, void *data)
+{
+ return tb_is_switch(dev);
+}
+
+static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
+{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
+ int ret;
+
+ ret = device_for_each_child(&tb->root_switch->dev, NULL,
+ check_for_device);
+ return ret > 0;
+}
+
+static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
+{
+ u32 vs_cap;
+
+ /*
+ * The Thunderbolt host controller is present always in Ice Lake
+ * but the firmware may not be loaded and running (depending
+ * whether there is device connected and so on). Each time the
+ * controller is used we need to "Force Power" it first and wait
+ * for the firmware to indicate it is up and running. This "Force
+ * Power" is really not about actually powering on/off the
+ * controller so it is accessible even if "Force Power" is off.
+ *
+ * The actual power management happens inside shared ACPI power
+ * resources using standard ACPI methods.
+ */
+ pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
+ if (power) {
+ vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
+ vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
+ vs_cap |= VS_CAP_22_FORCE_POWER;
+ } else {
+ vs_cap &= ~VS_CAP_22_FORCE_POWER;
+ }
+ pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
+
+ if (power) {
+ unsigned int retries = 10;
+ u32 val;
+
+ /* Wait until the firmware tells it is up and running */
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
+ if (val & VS_CAP_9_FW_READY)
+ return 0;
+ msleep(250);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
+{
+ u32 data;
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
+ data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
+}
+
+static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
+{
+ unsigned long end;
+ u32 data;
+
+ if (!timeout)
+ goto clear;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
+ if (data & VS_CAP_18_DONE)
+ goto clear;
+ msleep(100);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+
+clear:
+ /* Clear the valid bit */
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
+ return 0;
+}
+
+static void icl_nhi_set_ltr(struct tb_nhi *nhi)
+{
+ u32 max_ltr, ltr;
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
+ max_ltr &= 0xffff;
+ /* Program the same value for both snoop and no-snoop */
+ ltr = max_ltr << 16 | max_ltr;
+ pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
+}
+
+static int icl_nhi_suspend(struct tb_nhi *nhi)
+{
+ int ret;
+
+ if (icl_nhi_is_device_connected(nhi))
+ return 0;
+
+ /*
+ * If there is no device connected we need to perform both: a
+ * handshake through LC mailbox and force power down before
+ * entering D3.
+ */
+ icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+ ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return icl_nhi_force_power(nhi, false);
+}
+
+static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
+{
+ enum icl_lc_mailbox_cmd cmd;
+
+ if (!pm_suspend_via_firmware())
+ return icl_nhi_suspend(nhi);
+
+ cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
+ icl_nhi_lc_mailbox_cmd(nhi, cmd);
+ return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+}
+
+static int icl_nhi_resume(struct tb_nhi *nhi)
+{
+ int ret;
+
+ ret = icl_nhi_force_power(nhi, true);
+ if (ret)
+ return ret;
+
+ icl_nhi_set_ltr(nhi);
+ return 0;
+}
+
+static void icl_nhi_shutdown(struct tb_nhi *nhi)
+{
+ icl_nhi_force_power(nhi, false);
+}
+
+const struct tb_nhi_ops icl_nhi_ops = {
+ .init = icl_nhi_resume,
+ .suspend_noirq = icl_nhi_suspend_noirq,
+ .resume_noirq = icl_nhi_resume,
+ .runtime_suspend = icl_nhi_suspend,
+ .runtime_resume = icl_nhi_resume,
+ .shutdown = icl_nhi_shutdown,
+};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index a60bd98c1d04..0d4970dcef84 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -124,4 +124,41 @@ struct ring_desc {
#define REG_FW_STS_ICM_EN_INVERT BIT(1)
#define REG_FW_STS_ICM_EN BIT(0)
+/* ICL NHI VSEC registers */
+
+/* FW ready */
+#define VS_CAP_9 0xc8
+#define VS_CAP_9_FW_READY BIT(31)
+/* UUID */
+#define VS_CAP_10 0xcc
+#define VS_CAP_11 0xd0
+/* LTR */
+#define VS_CAP_15 0xe0
+#define VS_CAP_16 0xe4
+/* TBT2PCIe */
+#define VS_CAP_18 0xec
+#define VS_CAP_18_DONE BIT(0)
+/* PCIe2TBT */
+#define VS_CAP_19 0xf0
+#define VS_CAP_19_VALID BIT(0)
+#define VS_CAP_19_CMD_SHIFT 1
+#define VS_CAP_19_CMD_MASK GENMASK(7, 1)
+/* Force power */
+#define VS_CAP_22 0xfc
+#define VS_CAP_22_FORCE_POWER BIT(1)
+#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24)
+#define VS_CAP_22_DMA_DELAY_SHIFT 24
+
+/**
+ * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
+ * @ICL_LC_GO2SX: Ask LC to enter Sx without wake
+ * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
+ * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
+ */
+enum icl_lc_mailbox_cmd {
+ ICL_LC_GO2SX = 0x02,
+ ICL_LC_GO2SX_NO_WAKE = 0x03,
+ ICL_LC_PREPARE_FOR_RESET = 0x21,
+};
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5668a44e0653..410bf1bceeee 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -364,12 +364,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
nvm->active = nvm_dev;
}
- nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
- if (IS_ERR(nvm_dev)) {
- ret = PTR_ERR(nvm_dev);
- goto err_nvm_active;
+ if (!sw->no_nvm_upgrade) {
+ nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_nvm_active;
+ }
+ nvm->non_active = nvm_dev;
}
- nvm->non_active = nvm_dev;
sw->nvm = nvm;
return 0;
@@ -398,7 +400,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
if (!nvm->authenticating)
nvm_clear_auth_status(sw);
- nvmem_unregister(nvm->non_active);
+ if (nvm->non_active)
+ nvmem_unregister(nvm->non_active);
if (nvm->active)
nvmem_unregister(nvm->active);
ida_simple_remove(&nvm_ida, nvm->id);
@@ -611,8 +614,14 @@ static int tb_init_port(struct tb_port *port)
int cap;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
- if (res)
+ if (res) {
+ if (res == -ENODEV) {
+ tb_dbg(port->sw->tb, " Port %d: not implemented\n",
+ port->port);
+ return 0;
+ }
return res;
+ }
/* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT && port->port != 0) {
@@ -1331,14 +1340,29 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct tb_switch *sw = tb_to_switch(dev);
- if (attr == &dev_attr_key.attr) {
+ if (attr == &dev_attr_device.attr) {
+ if (!sw->device)
+ return 0;
+ } else if (attr == &dev_attr_device_name.attr) {
+ if (!sw->device_name)
+ return 0;
+ } else if (attr == &dev_attr_vendor.attr) {
+ if (!sw->vendor)
+ return 0;
+ } else if (attr == &dev_attr_vendor_name.attr) {
+ if (!sw->vendor_name)
+ return 0;
+ } else if (attr == &dev_attr_key.attr) {
if (tb_route(sw) &&
sw->tb->security_level == TB_SECURITY_SECURE &&
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
- } else if (attr == &dev_attr_nvm_authenticate.attr ||
- attr == &dev_attr_nvm_version.attr) {
+ } else if (attr == &dev_attr_nvm_authenticate.attr) {
+ if (sw->dma_port && !sw->no_nvm_upgrade)
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_version.attr) {
if (sw->dma_port)
return attr->mode;
return 0;
@@ -1446,6 +1470,8 @@ static int tb_switch_get_generation(struct tb_switch *sw)
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return 3;
default:
@@ -1689,13 +1715,17 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
break;
}
- if (sw->no_nvm_upgrade)
+ /* Root switch DMA port requires running firmware */
+ if (!tb_route(sw) && sw->config.enabled)
return 0;
sw->dma_port = dma_port_alloc(sw);
if (!sw->dma_port)
return 0;
+ if (sw->no_nvm_upgrade)
+ return 0;
+
/*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index afbe1d29bb03..4b641e4ee0c5 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -104,10 +104,11 @@ enum icm_pkg_code {
};
enum icm_event_code {
- ICM_EVENT_DEVICE_CONNECTED = 3,
- ICM_EVENT_DEVICE_DISCONNECTED = 4,
- ICM_EVENT_XDOMAIN_CONNECTED = 6,
- ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
+ ICM_EVENT_DEVICE_CONNECTED = 0x3,
+ ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
+ ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+ ICM_EVENT_RTD3_VETO = 0xa,
};
struct icm_pkg_header {
@@ -463,6 +464,13 @@ struct icm_tr_pkg_disconnect_xdomain_response {
uuid_t remote_uuid;
};
+/* Ice Lake messages */
+
+struct icm_icl_event_rtd3_veto {
+ struct icm_pkg_header hdr;
+ u32 veto_reason;
+};
+
/* XDomain messages */
struct tb_xdomain_header {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 31d0234837e4..5a99234826e7 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL;
}
tb_pci_init_path(path);
- tunnel->paths[TB_PCI_PATH_UP] = path;
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
"PCIe Up");
@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return NULL;
}
tb_pci_init_path(path);
- tunnel->paths[TB_PCI_PATH_DOWN] = path;
+ tunnel->paths[TB_PCI_PATH_UP] = path;
return tunnel;
}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 5118d46702d5..4e17a7c7bf0a 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
- return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+ return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index cb4db1b3ca3c..ee0604cd9c6b 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -47,7 +47,7 @@
* using the 2.6 Linux kernel kref construct.
*
* For direction on installation and usage of this driver please reference
- * Documentation/powerpc/hvcs.txt.
+ * Documentation/powerpc/hvcs.rst.
*/
#include <linux/device.h>
@@ -871,8 +871,8 @@ static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
hvcsd->p_partition_ID = pi->partition_ID;
/* copy the null-term char too */
- strlcpy(&hvcsd->p_location_code[0],
- &pi->location_code[0], sizeof(hvcsd->p_location_code));
+ strlcpy(hvcsd->p_location_code, pi->location_code,
+ sizeof(hvcsd->p_location_code));
}
/*
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index e04a43e89f6b..fc38f96475bf 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -553,7 +553,6 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
tty = tty_port_tty_get(&port->port);
if (tty == NULL) {
- word_count = byte_count >> 1;
while (byte_count > 1) {
inw(base);
byte_count -= 2;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c4e16b31f9ab..36a3eb4ad4c5 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1716,7 +1716,7 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- tty_vhangup(tty);
+ tty_hangup(tty);
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
@@ -2171,6 +2171,16 @@ static inline void mux_put(struct gsm_mux *gsm)
kref_put(&gsm->ref, gsm_free_muxr);
}
+static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
+{
+ return gsm->num * NUM_DLCI;
+}
+
+static inline unsigned int mux_line_to_num(unsigned int line)
+{
+ return line / NUM_DLCI;
+}
+
/**
* gsm_alloc_mux - allocate a mux
*
@@ -2351,7 +2361,8 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- int ret, i, base;
+ unsigned int base;
+ int ret, i;
gsm->tty = tty_kref_get(tty);
gsm->output = gsmld_output;
@@ -2361,7 +2372,7 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
else {
/* Don't register device 0 - this is the control channel and not
a usable tty interface */
- base = gsm->num << 6; /* Base for this MUX */
+ base = mux_num_to_base(gsm); /* Base for this MUX */
for (i = 1; i < NUM_DLCI; i++)
tty_register_device(gsm_tty_driver, base + i, NULL);
}
@@ -2379,8 +2390,8 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
+ unsigned int base = mux_num_to_base(gsm); /* Base for this MUX */
int i;
- int base = gsm->num << 6; /* Base for this MUX */
WARN_ON(tty != gsm->tty);
for (i = 1; i < NUM_DLCI; i++)
@@ -2602,6 +2613,7 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
{
struct gsm_config c;
struct gsm_mux *gsm = tty->disc_data;
+ unsigned int base;
switch (cmd) {
case GSMIOC_GETCONF:
@@ -2613,6 +2625,9 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
if (copy_from_user(&c, (void *)arg, sizeof(c)))
return -EFAULT;
return gsm_config(gsm, &c);
+ case GSMIOC_GETFIRST:
+ base = mux_num_to_base(gsm);
+ return put_user(base + 1, (__u32 __user *)arg);
default:
return n_tty_ioctl_helper(tty, file, cmd, arg);
}
@@ -2908,7 +2923,7 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
struct gsm_mux *gsm;
struct gsm_dlci *dlci;
unsigned int line = tty->index;
- unsigned int mux = line >> 6;
+ unsigned int mux = mux_line_to_num(line);
bool alloc = false;
int ret;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 3214e22e79f3..ed99948f3b7f 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1282,7 +1282,7 @@ static void nozomi_setup_private_data(struct nozomi *dc)
static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dc->card_type);
}
@@ -1291,7 +1291,7 @@ static DEVICE_ATTR_RO(card_type);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dc->open_ttys);
}
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index bd53661103eb..8ce700c1a7fc 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -56,10 +56,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
/* get the interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "irq not found - %i", ret);
+ if (ret < 0)
return ret;
- }
data->uart.port.irq = ret;
/* map the main registers */
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index df3bcc0b2d74..e682390ce0de 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1026,10 +1026,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (!has_acpi_companion(uart->port.dev)) {
gpios = mctrl_gpio_init(&uart->port, 0);
if (IS_ERR(gpios)) {
- if (PTR_ERR(gpios) != -ENOSYS) {
- ret = PTR_ERR(gpios);
- goto out_unlock;
- }
+ ret = PTR_ERR(gpios);
+ goto out_unlock;
} else {
uart->gpios = gpios;
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 284e8d052fc3..1c72fdc2dd37 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -27,66 +27,36 @@
#include <asm/byteorder.h>
-#include "8250.h"
+#include "8250_dwlib.h"
/* Offsets for the DesignWare specific registers */
#define DW_UART_USR 0x1f /* UART Status Register */
-#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
-#define DW_UART_CPR 0xf4 /* Component Parameter Register */
-#define DW_UART_UCV 0xf8 /* UART Component Version */
-
-/* Component Parameter Register bits */
-#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
-#define DW_UART_CPR_AFCE_MODE (1 << 4)
-#define DW_UART_CPR_THRE_MODE (1 << 5)
-#define DW_UART_CPR_SIR_MODE (1 << 6)
-#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
-#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
-#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
-#define DW_UART_CPR_FIFO_STAT (1 << 10)
-#define DW_UART_CPR_SHADOW (1 << 11)
-#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
-#define DW_UART_CPR_DMA_EXTRA (1 << 13)
-#define DW_UART_CPR_FIFO_MODE (0xff << 16)
-/* Helper for fifo size calculation */
-#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
/* DesignWare specific register fields */
#define DW_UART_MCR_SIRE BIT(6)
struct dw8250_data {
+ struct dw8250_port_data data;
+
u8 usr_reg;
- u8 dlf_size;
- int line;
int msr_mask_on;
int msr_mask_off;
struct clk *clk;
struct clk *pclk;
struct reset_control *rst;
- struct uart_8250_dma dma;
unsigned int skip_autocfg:1;
unsigned int uart_16550_compatible:1;
};
-static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
{
- if (p->iotype == UPIO_MEM32BE)
- return ioread32be(p->membase + offset);
- return readl(p->membase + offset);
-}
-
-static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
-{
- if (p->iotype == UPIO_MEM32BE)
- iowrite32be(reg, p->membase + offset);
- else
- writel(reg, p->membase + offset);
+ return container_of(data, struct dw8250_data, data);
}
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
/* Override any modem control signals if needed */
if (offset == UART_MSR) {
@@ -160,7 +130,7 @@ static void dw8250_tx_wait_empty(struct uart_port *p)
static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
/* Allow the TX to drain before we reconfigure */
if (offset == UART_LCR)
@@ -175,7 +145,7 @@ static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
writeb(value, p->membase + (offset << p->regshift));
@@ -202,7 +172,7 @@ static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
value &= 0xff;
__raw_writeq(value, p->membase + (offset << p->regshift));
@@ -216,7 +186,7 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
writel(value, p->membase + (offset << p->regshift));
@@ -233,7 +203,7 @@ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
static void dw8250_serial_out32be(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
iowrite32be(value, p->membase + (offset << p->regshift));
@@ -252,7 +222,7 @@ static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
static int dw8250_handle_irq(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
unsigned int iir = p->serial_in(p, UART_IIR);
unsigned int status;
unsigned long flags;
@@ -306,7 +276,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- struct dw8250_data *d = p->private_data;
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
long rate;
int ret;
@@ -368,37 +338,6 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev;
}
-/*
- * divisor = div(I) + div(F)
- * "I" means integer, "F" means fractional
- * quot = div(I) = clk / (16 * baud)
- * frac = div(F) * 2^dlf_size
- *
- * let rem = clk % (16 * baud)
- * we have: div(F) * (16 * baud) = rem
- * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud)
- */
-static unsigned int dw8250_get_divisor(struct uart_port *p,
- unsigned int baud,
- unsigned int *frac)
-{
- unsigned int quot, rem, base_baud = baud * 16;
- struct dw8250_data *d = p->private_data;
-
- quot = p->uartclk / base_baud;
- rem = p->uartclk % base_baud;
- *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud);
-
- return quot;
-}
-
-static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
-{
- dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
- serial8250_do_set_divisor(p, baud, quot, quot_frac);
-}
-
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
if (p->dev->of_node) {
@@ -437,65 +376,18 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
/* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) {
- data->dma.rx_param = p->dev->parent;
- data->dma.tx_param = p->dev->parent;
- data->dma.fn = dw8250_idma_filter;
- }
-}
-
-static void dw8250_setup_port(struct uart_port *p)
-{
- struct uart_8250_port *up = up_to_u8250p(p);
- u32 reg;
-
- /*
- * If the Component Version Register returns zero, we know that
- * ADDITIONAL_FEATURES are not enabled. No need to go any further.
- */
- reg = dw8250_readl_ext(p, DW_UART_UCV);
- if (!reg)
- return;
-
- dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
- (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
-
- dw8250_writel_ext(p, DW_UART_DLF, ~0U);
- reg = dw8250_readl_ext(p, DW_UART_DLF);
- dw8250_writel_ext(p, DW_UART_DLF, 0);
-
- if (reg) {
- struct dw8250_data *d = p->private_data;
-
- d->dlf_size = fls(reg);
- p->get_divisor = dw8250_get_divisor;
- p->set_divisor = dw8250_set_divisor;
- }
-
- reg = dw8250_readl_ext(p, DW_UART_CPR);
- if (!reg)
- return;
-
- /* Select the type based on fifo */
- if (reg & DW_UART_CPR_FIFO_MODE) {
- p->type = PORT_16550A;
- p->flags |= UPF_FIXED_TYPE;
- p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
- up->capabilities = UART_CAP_FIFO;
+ data->data.dma.rx_param = p->dev->parent;
+ data->data.dma.tx_param = p->dev->parent;
+ data->data.dma.fn = dw8250_idma_filter;
}
-
- if (reg & DW_UART_CPR_AFCE_MODE)
- up->capabilities |= UART_CAP_AFE;
-
- if (reg & DW_UART_CPR_SIR_MODE)
- up->capabilities |= UART_CAP_IRDA;
}
static int dw8250_probe(struct platform_device *pdev)
{
- struct uart_8250_port uart = {};
+ struct uart_8250_port uart = {}, *up = &uart;
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int irq = platform_get_irq(pdev, 0);
- struct uart_port *p = &uart.port;
+ struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
int err;
@@ -534,9 +426,9 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->dma.fn = dw8250_fallback_dma_filter;
+ data->data.dma.fn = dw8250_fallback_dma_filter;
data->usr_reg = DW_UART_USR;
- p->private_data = data;
+ p->private_data = &data->data;
data->uart_16550_compatible = device_property_read_bool(dev,
"snps,uart-16550-compatible");
@@ -632,14 +524,14 @@ static int dw8250_probe(struct platform_device *pdev)
/* If we have a valid fifosize, try hooking up DMA */
if (p->fifosize) {
- data->dma.rxconf.src_maxburst = p->fifosize / 4;
- data->dma.txconf.dst_maxburst = p->fifosize / 4;
- uart.dma = &data->dma;
+ data->data.dma.rxconf.src_maxburst = p->fifosize / 4;
+ data->data.dma.txconf.dst_maxburst = p->fifosize / 4;
+ up->dma = &data->data.dma;
}
- data->line = serial8250_register_8250_port(&uart);
- if (data->line < 0) {
- err = data->line;
+ data->data.line = serial8250_register_8250_port(up);
+ if (data->data.line < 0) {
+ err = data->data.line;
goto err_reset;
}
@@ -667,10 +559,11 @@ err_clk:
static int dw8250_remove(struct platform_device *pdev)
{
struct dw8250_data *data = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
- serial8250_unregister_port(data->line);
+ serial8250_unregister_port(data->data.line);
reset_control_assert(data->rst);
@@ -680,8 +573,8 @@ static int dw8250_remove(struct platform_device *pdev)
if (!IS_ERR(data->clk))
clk_disable_unprepare(data->clk);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
return 0;
}
@@ -691,7 +584,7 @@ static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- serial8250_suspend_port(data->line);
+ serial8250_suspend_port(data->data.line);
return 0;
}
@@ -700,7 +593,7 @@ static int dw8250_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- serial8250_resume_port(data->line);
+ serial8250_resume_port(data->data.line);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
new file mode 100644
index 000000000000..6d6a78eead3e
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Synopsys DesignWare 8250 library. */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+
+#include "8250_dwlib.h"
+
+/* Offsets for the DesignWare specific registers */
+#define DW_UART_DLF 0xc0 /* Divisor Latch Fraction Register */
+#define DW_UART_CPR 0xf4 /* Component Parameter Register */
+#define DW_UART_UCV 0xf8 /* UART Component Version */
+
+/* Component Parameter Register bits */
+#define DW_UART_CPR_ABP_DATA_WIDTH (3 << 0)
+#define DW_UART_CPR_AFCE_MODE (1 << 4)
+#define DW_UART_CPR_THRE_MODE (1 << 5)
+#define DW_UART_CPR_SIR_MODE (1 << 6)
+#define DW_UART_CPR_SIR_LP_MODE (1 << 7)
+#define DW_UART_CPR_ADDITIONAL_FEATURES (1 << 8)
+#define DW_UART_CPR_FIFO_ACCESS (1 << 9)
+#define DW_UART_CPR_FIFO_STAT (1 << 10)
+#define DW_UART_CPR_SHADOW (1 << 11)
+#define DW_UART_CPR_ENCODED_PARMS (1 << 12)
+#define DW_UART_CPR_DMA_EXTRA (1 << 13)
+#define DW_UART_CPR_FIFO_MODE (0xff << 16)
+
+/* Helper for FIFO size calculation */
+#define DW_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16)
+
+static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ return ioread32be(p->membase + offset);
+ return readl(p->membase + offset);
+}
+
+static inline void dw8250_writel_ext(struct uart_port *p, int offset, u32 reg)
+{
+ if (p->iotype == UPIO_MEM32BE)
+ iowrite32be(reg, p->membase + offset);
+ else
+ writel(reg, p->membase + offset);
+}
+
+/*
+ * divisor = div(I) + div(F)
+ * "I" means integer, "F" means fractional
+ * quot = div(I) = clk / (16 * baud)
+ * frac = div(F) * 2^dlf_size
+ *
+ * let rem = clk % (16 * baud)
+ * we have: div(F) * (16 * baud) = rem
+ * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud)
+ */
+static unsigned int dw8250_get_divisor(struct uart_port *p, unsigned int baud,
+ unsigned int *frac)
+{
+ unsigned int quot, rem, base_baud = baud * 16;
+ struct dw8250_port_data *d = p->private_data;
+
+ quot = p->uartclk / base_baud;
+ rem = p->uartclk % base_baud;
+ *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud);
+
+ return quot;
+}
+
+static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ dw8250_writel_ext(p, DW_UART_DLF, quot_frac);
+ serial8250_do_set_divisor(p, baud, quot, quot_frac);
+}
+
+void dw8250_setup_port(struct uart_port *p)
+{
+ struct uart_8250_port *up = up_to_u8250p(p);
+ u32 reg;
+
+ /*
+ * If the Component Version Register returns zero, we know that
+ * ADDITIONAL_FEATURES are not enabled. No need to go any further.
+ */
+ reg = dw8250_readl_ext(p, DW_UART_UCV);
+ if (!reg)
+ return;
+
+ dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
+ (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+
+ dw8250_writel_ext(p, DW_UART_DLF, ~0U);
+ reg = dw8250_readl_ext(p, DW_UART_DLF);
+ dw8250_writel_ext(p, DW_UART_DLF, 0);
+
+ if (reg) {
+ struct dw8250_port_data *d = p->private_data;
+
+ d->dlf_size = fls(reg);
+ p->get_divisor = dw8250_get_divisor;
+ p->set_divisor = dw8250_set_divisor;
+ }
+
+ reg = dw8250_readl_ext(p, DW_UART_CPR);
+ if (!reg)
+ return;
+
+ /* Select the type based on FIFO */
+ if (reg & DW_UART_CPR_FIFO_MODE) {
+ p->type = PORT_16550A;
+ p->flags |= UPF_FIXED_TYPE;
+ p->fifosize = DW_UART_CPR_FIFO_SIZE(reg);
+ up->capabilities = UART_CAP_FIFO;
+ }
+
+ if (reg & DW_UART_CPR_AFCE_MODE)
+ up->capabilities |= UART_CAP_AFE;
+
+ if (reg & DW_UART_CPR_SIR_MODE)
+ up->capabilities |= UART_CAP_IRDA;
+}
+EXPORT_SYMBOL_GPL(dw8250_setup_port);
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
new file mode 100644
index 000000000000..87a4db2a8aba
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Synopsys DesignWare 8250 library header file. */
+
+#include <linux/types.h>
+
+#include "8250.h"
+
+struct dw8250_port_data {
+ /* Port properties */
+ int line;
+
+ /* DMA operations */
+ struct uart_8250_dma dma;
+
+ /* Hardware configuration */
+ u8 dlf_size;
+};
+
+void dw8250_setup_port(struct uart_port *p);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index edd6dfe055bf..597eb9d16f21 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/8250_pci.h>
+#include <linux/delay.h>
#include <asm/byteorder.h>
@@ -36,6 +37,8 @@
#define UART_EXAR_INT0 0x80
#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
+#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
+#define UART_EXAR_DVID 0x8d /* Device identification */
#define UART_EXAR_FCTR 0x08 /* Feature Control Register */
#define UART_FCTR_EXAR_IRDA 0x10 /* IrDa data encode select */
@@ -127,18 +130,95 @@ struct exar8250 {
int line[0];
};
+static void exar_pm(struct uart_port *port, unsigned int state, unsigned int old)
+{
+ /*
+ * Exar UARTs have a SLEEP register that enables or disables each UART
+ * to enter sleep mode separately. On the XR17V35x the register
+ * is accessible to each UART at the UART_EXAR_SLEEP offset, but
+ * the UART channel may only write to the corresponding bit.
+ */
+ serial_port_out(port, UART_EXAR_SLEEP, state ? 0xff : 0);
+}
+
+/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ * Calculate divisor with extra 4-bit fractional portion
+ */
+static unsigned int xr17v35x_get_divisor(struct uart_port *p, unsigned int baud,
+ unsigned int *frac)
+{
+ unsigned int quot_16;
+
+ quot_16 = DIV_ROUND_CLOSEST(p->uartclk, baud);
+ *frac = quot_16 & 0x0f;
+
+ return quot_16 >> 4;
+}
+
+static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ serial8250_do_set_divisor(p, baud, quot, quot_frac);
+
+ /* Preserve bits not related to baudrate; DLD[7:4]. */
+ quot_frac |= serial_port_in(p, 0x2) & 0xf0;
+ serial_port_out(p, 0x2, quot_frac);
+}
+
+static void exar_shutdown(struct uart_port *port)
+{
+ unsigned char lsr;
+ bool tx_complete = 0;
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct circ_buf *xmit = &port->state->xmit;
+ int i = 0;
+
+ do {
+ lsr = serial_in(up, UART_LSR);
+ if (lsr & (UART_LSR_TEMT | UART_LSR_THRE))
+ tx_complete = 1;
+ else
+ tx_complete = 0;
+ usleep_range(1000, 1100);
+ } while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000);
+
+ serial8250_do_shutdown(port);
+}
+
static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
int idx, unsigned int offset,
struct uart_8250_port *port)
{
const struct exar8250_board *board = priv->board;
unsigned int bar = 0;
+ unsigned char status;
port->port.iotype = UPIO_MEM;
port->port.mapbase = pci_resource_start(pcidev, bar) + offset;
port->port.membase = priv->virt + offset;
port->port.regshift = board->reg_shift;
+ /*
+ * XR17V35x UARTs have an extra divisor register, DLD that gets enabled
+ * with when DLAB is set which will cause the device to incorrectly match
+ * and assign port type to PORT_16650. The EFR for this UART is found
+ * at offset 0x09. Instead check the Deice ID (DVID) register
+ * for a 2, 4 or 8 port UART.
+ */
+ status = readb(port->port.membase + UART_EXAR_DVID);
+ if (status == 0x82 || status == 0x84 || status == 0x88) {
+ port->port.type = PORT_XR17V35X;
+
+ port->port.get_divisor = xr17v35x_get_divisor;
+ port->port.set_divisor = xr17v35x_set_divisor;
+ } else {
+ port->port.type = PORT_XR17D15X;
+ }
+
+ port->port.pm = exar_pm;
+ port->port.shutdown = exar_shutdown;
+
return 0;
}
@@ -434,6 +514,16 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
port->port.private_data = NULL;
}
+static inline void exar_misc_clear(struct exar8250 *priv)
+{
+ /* Clear all PCI interrupts by reading INT0. No effect on IIR */
+ readb(priv->virt + UART_EXAR_INT0);
+
+ /* Clear INT0 for Expansion Interface slave ports, too */
+ if (priv->board->num_ports > 8)
+ readb(priv->virt + 0x2000 + UART_EXAR_INT0);
+}
+
/*
* These Exar UARTs have an extra interrupt indicator that could fire for a
* few interrupts that are not presented/cleared through IIR. One of which is
@@ -445,14 +535,7 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
*/
static irqreturn_t exar_misc_handler(int irq, void *data)
{
- struct exar8250 *priv = data;
-
- /* Clear all PCI interrupts by reading INT0. No effect on IIR */
- readb(priv->virt + UART_EXAR_INT0);
-
- /* Clear INT0 for Expansion Interface slave ports, too */
- if (priv->board->num_ports > 8)
- readb(priv->virt + 0x2000 + UART_EXAR_INT0);
+ exar_misc_clear(data);
return IRQ_HANDLED;
}
@@ -478,9 +561,7 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nr_ports = board->num_ports ? board->num_ports : pcidev->device & 0x0f;
- priv = devm_kzalloc(&pcidev->dev, sizeof(*priv) +
- sizeof(unsigned int) * nr_ports,
- GFP_KERNEL);
+ priv = devm_kzalloc(&pcidev->dev, struct_size(priv, line, nr_ports), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -496,8 +577,7 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
return rc;
memset(&uart, 0, sizeof(uart));
- uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ
- | UPF_EXAR_EFR;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_EXAR_EFR | UPF_FIXED_TYPE | UPF_FIXED_PORT;
uart.port.irq = pci_irq_vector(pcidev, 0);
uart.port.dev = &pcidev->dev;
@@ -506,6 +586,9 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* Clear interrupts */
+ exar_misc_clear(priv);
+
for (i = 0; i < nr_ports && i < maxnr; i++) {
rc = board->setup(priv, pcidev, &uart, i);
if (rc) {
@@ -561,10 +644,11 @@ static int __maybe_unused exar_suspend(struct device *dev)
static int __maybe_unused exar_resume(struct device *dev)
{
- struct pci_dev *pcidev = to_pci_dev(dev);
- struct exar8250 *priv = pci_get_drvdata(pcidev);
+ struct exar8250 *priv = dev_get_drvdata(dev);
unsigned int i;
+ exar_misc_clear(priv);
+
for (i = 0; i < priv->nr; i++)
if (priv->line[i] >= 0)
serial8250_resume_port(priv->line[i]);
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index eddf119374e1..570e25d6f37e 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -106,10 +106,8 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
int irq, ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "irq not found");
+ if (irq < 0)
return irq;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 53ca9ba6ab4b..5f72ef3ea574 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -14,7 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/dma/dw.h>
-#include "8250.h"
+#include "8250_dwlib.h"
#define PCI_DEVICE_ID_INTEL_QRK_UARTx 0x0936
@@ -24,6 +24,13 @@
#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+#define PCI_DEVICE_ID_INTEL_EHL_UART0 0x4b96
+#define PCI_DEVICE_ID_INTEL_EHL_UART1 0x4b97
+#define PCI_DEVICE_ID_INTEL_EHL_UART2 0x4b98
+#define PCI_DEVICE_ID_INTEL_EHL_UART3 0x4b99
+#define PCI_DEVICE_ID_INTEL_EHL_UART4 0x4b9a
+#define PCI_DEVICE_ID_INTEL_EHL_UART5 0x4b9b
+
#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
@@ -48,21 +55,25 @@ struct lpss8250_board {
};
struct lpss8250 {
- int line;
+ struct dw8250_port_data data;
struct lpss8250_board *board;
/* DMA parameters */
- struct uart_8250_dma dma;
struct dw_dma_chip dma_chip;
struct dw_dma_slave dma_param;
u8 dma_maxburst;
};
+static inline struct lpss8250 *to_lpss8250(struct dw8250_port_data *data)
+{
+ return container_of(data, struct lpss8250, data);
+}
+
static void byt_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
- struct lpss8250 *lpss = p->private_data;
+ struct lpss8250 *lpss = to_lpss8250(p->private_data);
unsigned long fref = lpss->board->freq, fuart = baud * 16;
unsigned long w = BIT(15) - 1;
unsigned long m, n;
@@ -109,7 +120,6 @@ static unsigned int byt_get_mctrl(struct uart_port *port)
static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
{
struct dw_dma_slave *param = &lpss->dma_param;
- struct uart_8250_port *up = up_to_u8250p(port);
struct pci_dev *pdev = to_pci_dev(port->dev);
unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn);
@@ -135,10 +145,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
param->m_master = 0;
param->p_master = 1;
- /* TODO: Detect FIFO size automaticaly for DesignWare 8250 */
- port->fifosize = 64;
- up->tx_loadsz = 64;
-
lpss->dma_maxburst = 16;
port->set_termios = byt_set_termios;
@@ -163,16 +169,19 @@ static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
{
- struct uart_8250_dma *dma = &lpss->dma;
+ struct uart_8250_dma *dma = &lpss->data.dma;
struct dw_dma_chip *chip = &lpss->dma_chip;
struct dw_dma_slave *param = &lpss->dma_param;
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
+ chip->pdata = &qrk_serial_dma_pdata;
chip->dev = &pdev->dev;
+ chip->id = pdev->devfn;
chip->irq = pci_irq_vector(pdev, 0);
chip->regs = pci_ioremap_bar(pdev, 1);
- chip->pdata = &qrk_serial_dma_pdata;
+ if (!chip->regs)
+ return;
/* Falling back to PIO mode if DMA probing fails */
ret = dw_dma_probe(chip);
@@ -195,11 +204,15 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
static void qrk_serial_exit_dma(struct lpss8250 *lpss)
{
+ struct dw_dma_chip *chip = &lpss->dma_chip;
struct dw_dma_slave *param = &lpss->dma_param;
if (!param->dma_dev)
return;
- dw_dma_remove(&lpss->dma_chip);
+
+ dw_dma_remove(chip);
+
+ pci_iounmap(to_pci_dev(chip->dev), chip->regs);
}
#else /* CONFIG_SERIAL_8250_DMA */
static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port) {}
@@ -241,7 +254,7 @@ static bool lpss8250_dma_filter(struct dma_chan *chan, void *param)
static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port)
{
- struct uart_8250_dma *dma = &lpss->dma;
+ struct uart_8250_dma *dma = &lpss->data.dma;
struct dw_dma_slave *rx_param, *tx_param;
struct device *dev = port->port.dev;
@@ -290,7 +303,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
uart.port.dev = &pdev->dev;
uart.port.irq = pdev->irq;
- uart.port.private_data = lpss;
+ uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
uart.port.iotype = UPIO_MEM;
uart.port.regshift = 2;
@@ -306,6 +319,8 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ dw8250_setup_port(&uart.port);
+
ret = lpss8250_dma_setup(lpss, &uart);
if (ret)
goto err_exit;
@@ -314,7 +329,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret < 0)
goto err_exit;
- lpss->line = ret;
+ lpss->data.line = ret;
pci_set_drvdata(pdev, lpss);
return 0;
@@ -329,7 +344,7 @@ static void lpss8250_remove(struct pci_dev *pdev)
{
struct lpss8250 *lpss = pci_get_drvdata(pdev);
- serial8250_unregister_port(lpss->line);
+ serial8250_unregister_port(lpss->data.line);
if (lpss->board->exit)
lpss->board->exit(lpss);
@@ -341,6 +356,11 @@ static const struct lpss8250_board byt_board = {
.setup = byt_serial_setup,
};
+static const struct lpss8250_board ehl_board = {
+ .freq = 200000000,
+ .base_baud = 12500000,
+};
+
static const struct lpss8250_board qrk_board = {
.freq = 44236800,
.base_baud = 2764800,
@@ -348,17 +368,21 @@ static const struct lpss8250_board qrk_board = {
.exit = qrk_serial_exit,
};
-#define LPSS_DEVICE(id, board) { PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&board }
-
static const struct pci_device_id pci_ids[] = {
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_QRK_UARTx, qrk_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BYT_UART1, byt_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BYT_UART2, byt_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BSW_UART1, byt_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BSW_UART2, byt_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BDW_UART1, byt_board),
- LPSS_DEVICE(PCI_DEVICE_ID_INTEL_BDW_UART2, byt_board),
- { },
+ { PCI_DEVICE_DATA(INTEL, QRK_UARTx, &qrk_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART0, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART1, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART2, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART3, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART4, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, EHL_UART5, &ehl_board) },
+ { PCI_DEVICE_DATA(INTEL, BYT_UART1, &byt_board) },
+ { PCI_DEVICE_DATA(INTEL, BYT_UART2, &byt_board) },
+ { PCI_DEVICE_DATA(INTEL, BSW_UART1, &byt_board) },
+ { PCI_DEVICE_DATA(INTEL, BSW_UART2, &byt_board) },
+ { PCI_DEVICE_DATA(INTEL, BDW_UART1, &byt_board) },
+ { PCI_DEVICE_DATA(INTEL, BDW_UART2, &byt_board) },
+ { }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
diff --git a/drivers/tty/serial/8250/8250_moxa.c b/drivers/tty/serial/8250/8250_moxa.c
deleted file mode 100644
index 1ee4cd94d4fa..000000000000
--- a/drivers/tty/serial/8250/8250_moxa.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * 8250_moxa.c - MOXA Smartio/Industio MUE multiport serial driver.
- *
- * Author: Mathieu OTHACEHE <m.othacehe@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include "8250.h"
-
-#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
-#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
-#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
-#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144
-#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160
-#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161
-#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182
-#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183
-#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322
-#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342
-#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381
-#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683
-
-#define MOXA_BASE_BAUD 921600
-#define MOXA_UART_OFFSET 0x200
-#define MOXA_BASE_BAR 1
-
-struct moxa8250_board {
- unsigned int num_ports;
- int line[0];
-};
-
-enum {
- moxa8250_2p = 0,
- moxa8250_4p,
- moxa8250_8p
-};
-
-static struct moxa8250_board moxa8250_boards[] = {
- [moxa8250_2p] = { .num_ports = 2},
- [moxa8250_4p] = { .num_ports = 4},
- [moxa8250_8p] = { .num_ports = 8},
-};
-
-static int moxa8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct uart_8250_port uart;
- struct moxa8250_board *brd;
- void __iomem *ioaddr;
- resource_size_t baseaddr;
- unsigned int i, nr_ports;
- unsigned int offset;
- int ret;
-
- brd = &moxa8250_boards[id->driver_data];
- nr_ports = brd->num_ports;
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- brd = devm_kzalloc(&pdev->dev, sizeof(struct moxa8250_board) +
- sizeof(unsigned int) * nr_ports, GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->num_ports = nr_ports;
-
- memset(&uart, 0, sizeof(struct uart_8250_port));
-
- uart.port.dev = &pdev->dev;
- uart.port.irq = pdev->irq;
- uart.port.uartclk = MOXA_BASE_BAUD * 16;
- uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
-
- baseaddr = pci_resource_start(pdev, MOXA_BASE_BAR);
- ioaddr = pcim_iomap(pdev, MOXA_BASE_BAR, 0);
- if (!ioaddr)
- return -ENOMEM;
-
- for (i = 0; i < nr_ports; i++) {
-
- /*
- * MOXA Smartio MUE boards with 4 ports have
- * a different offset for port #3
- */
- if (nr_ports == 4 && i == 3)
- offset = 7 * MOXA_UART_OFFSET;
- else
- offset = i * MOXA_UART_OFFSET;
-
- uart.port.iotype = UPIO_MEM;
- uart.port.iobase = 0;
- uart.port.mapbase = baseaddr + offset;
- uart.port.membase = ioaddr + offset;
- uart.port.regshift = 0;
-
- dev_dbg(&pdev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
- uart.port.iobase, uart.port.irq, uart.port.iotype);
-
- brd->line[i] = serial8250_register_8250_port(&uart);
- if (brd->line[i] < 0) {
- dev_err(&pdev->dev,
- "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
- uart.port.iobase, uart.port.irq,
- uart.port.iotype, brd->line[i]);
- break;
- }
- }
-
- pci_set_drvdata(pdev, brd);
- return 0;
-}
-
-static void moxa8250_remove(struct pci_dev *pdev)
-{
- struct moxa8250_board *brd = pci_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < brd->num_ports; i++)
- serial8250_unregister_port(brd->line[i]);
-}
-
-#define MOXA_DEVICE(id, data) { PCI_VDEVICE(MOXA, id), (kernel_ulong_t)data }
-
-static const struct pci_device_id pci_ids[] = {
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP102E, moxa8250_2p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP102EL, moxa8250_2p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP104EL_A, moxa8250_4p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP114EL, moxa8250_4p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP116E_A_A, moxa8250_8p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP116E_A_B, moxa8250_8p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP118EL_A, moxa8250_8p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP118E_A_I, moxa8250_8p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP132EL, moxa8250_2p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP134EL_A, moxa8250_4p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP138E_A, moxa8250_8p),
- MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP168EL_A, moxa8250_8p),
- {0}
-};
-MODULE_DEVICE_TABLE(pci, pci_ids);
-
-static struct pci_driver moxa8250_pci_driver = {
- .name = "8250_moxa",
- .id_table = pci_ids,
- .probe = moxa8250_probe,
- .remove = moxa8250_remove,
-};
-
-module_pci_driver(moxa8250_pci_driver);
-
-MODULE_AUTHOR("Mathieu OTHACEHE");
-MODULE_DESCRIPTION("MOXA SmartIO MUE driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 3ef65cbd2478..c68e2b3a1634 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1234,7 +1234,16 @@ static int omap8250_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+
+ /*
+ * Disable runtime PM until autosuspend delay unless specifically
+ * enabled by the user via sysfs. This is the historic way to
+ * prevent an unsafe default policy with lossy characters on wake-up.
+ * For serdev devices this is not needed, the policy can be managed by
+ * the serdev driver.
+ */
+ if (!of_get_available_child_count(pdev->dev.of_node))
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 7f740b37700b..6adbadd6a56a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -43,6 +43,11 @@ struct pci_serial_quirk {
void (*exit)(struct pci_dev *dev);
};
+struct f815xxa_data {
+ spinlock_t lock;
+ int idx;
+};
+
#define PCI_NUM_BAR_RESOURCES 6
struct serial_private {
@@ -53,6 +58,16 @@ struct serial_private {
int line[0];
};
+static const struct pci_device_id pci_use_msi[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x1000) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912,
+ 0xA000, 0x1000) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
+ 0xA000, 0x1000) },
+ { }
+};
+
static int pci_default_setup(struct serial_private*,
const struct pciserial_board*, struct uart_8250_port *, int);
@@ -730,8 +745,16 @@ static int pci_ni8430_init(struct pci_dev *dev)
}
/* UART Port Control Register */
-#define NI8430_PORTCON 0x0f
-#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
+#define NI16550_PCR_OFFSET 0x0f
+#define NI16550_PCR_RS422 0x00
+#define NI16550_PCR_ECHO_RS485 0x01
+#define NI16550_PCR_DTR_RS485 0x02
+#define NI16550_PCR_AUTO_RS485 0x03
+#define NI16550_PCR_WIRE_MODE_MASK 0x03
+#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
+#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
+#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
+#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
static int
pci_ni8430_setup(struct serial_private *priv,
@@ -753,14 +776,117 @@ pci_ni8430_setup(struct serial_private *priv,
return -ENOMEM;
/* enable the transceiver */
- writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
- p + offset + NI8430_PORTCON);
+ writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
+ p + offset + NI16550_PCR_OFFSET);
iounmap(p);
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_ni8431_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ u8 pcr, acr;
+ struct uart_8250_port *up;
+
+ up = container_of(port, struct uart_8250_port, port);
+ acr = up->acr;
+ pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* RS-485 */
+ if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
+ (rs485->flags & SER_RS485_RTS_ON_SEND)) {
+ dev_dbg(port->dev, "Invalid 2-wire mode\n");
+ return -EINVAL;
+ }
+
+ if (rs485->flags & SER_RS485_RX_DURING_TX) {
+ /* Echo */
+ dev_vdbg(port->dev, "2-wire DTR with echo\n");
+ pcr |= NI16550_PCR_ECHO_RS485;
+ acr |= NI16550_ACR_DTR_MANUAL_DTR;
+ } else {
+ /* Auto or DTR */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+ /* Auto */
+ dev_vdbg(port->dev, "2-wire Auto\n");
+ pcr |= NI16550_PCR_AUTO_RS485;
+ acr |= NI16550_ACR_DTR_AUTO_DTR;
+ } else {
+ /* DTR-controlled */
+ /* No Echo */
+ dev_vdbg(port->dev, "2-wire DTR no echo\n");
+ pcr |= NI16550_PCR_DTR_RS485;
+ acr |= NI16550_ACR_DTR_MANUAL_DTR;
+ }
+ }
+ } else {
+ /* RS-422 */
+ dev_vdbg(port->dev, "4-wire\n");
+ pcr |= NI16550_PCR_RS422;
+ acr |= NI16550_ACR_DTR_MANUAL_DTR;
+ }
+
+ dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
+ port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+
+ up->acr = acr;
+ port->serial_out(port, UART_SCR, UART_ACR);
+ port->serial_out(port, UART_ICR, up->acr);
+
+ /* Update the cache. */
+ port->rs485 = *rs485;
+
+ return 0;
+}
+
+static int pci_ni8431_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *uart, int idx)
+{
+ u8 pcr, acr;
+ struct pci_dev *dev = priv->dev;
+ void __iomem *addr;
+ unsigned int bar, offset = board->first_offset;
+
+ if (idx >= board->num_ports)
+ return 1;
+
+ bar = FL_GET_BASE(board->flags);
+ offset += idx * board->uart_offset;
+
+ addr = pci_ioremap_bar(dev, bar);
+ if (!addr)
+ return -ENOMEM;
+
+ /* enable the transceiver */
+ writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
+ addr + NI16550_PCR_OFFSET);
+
+ pcr = readb(addr + NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+
+ /* set wire mode to default RS-422 */
+ pcr |= NI16550_PCR_RS422;
+ acr = NI16550_ACR_DTR_MANUAL_DTR;
+
+ /* write port configuration to register */
+ writeb(pcr, addr + NI16550_PCR_OFFSET);
+
+ /* access and write to UART acr register */
+ writeb(UART_ACR, addr + UART_SCR);
+ writeb(acr, addr + UART_ICR);
+
+ uart->port.rs485_config = &pci_ni8431_config_rs485;
+
+ iounmap(addr);
+
+ return setup_port(priv, uart, bar, offset, board->reg_shift);
+}
+
static int pci_netmos_9900_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1596,6 +1722,77 @@ static int pci_fintek_init(struct pci_dev *dev)
return max_port;
}
+static void f815xxa_mem_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct f815xxa_data *data = p->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ writeb(value, p->membase + offset);
+ readb(p->membase + UART_SCR); /* Dummy read for flush pcie tx queue */
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int pci_fintek_f815xxa_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ struct pci_dev *pdev = priv->dev;
+ struct f815xxa_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->idx = idx;
+ spin_lock_init(&data->lock);
+
+ port->port.private_data = data;
+ port->port.iotype = UPIO_MEM;
+ port->port.flags |= UPF_IOREMAP;
+ port->port.mapbase = pci_resource_start(pdev, 0) + 8 * idx;
+ port->port.serial_out = f815xxa_mem_serial_out;
+
+ return 0;
+}
+
+static int pci_fintek_f815xxa_init(struct pci_dev *dev)
+{
+ u32 max_port, i;
+ int config_base;
+
+ if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM))
+ return -ENODEV;
+
+ switch (dev->device) {
+ case 0x1204: /* 4 ports */
+ case 0x1208: /* 8 ports */
+ max_port = dev->device & 0xff;
+ break;
+ case 0x1212: /* 12 ports */
+ max_port = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set to mmio decode */
+ pci_write_config_byte(dev, 0x209, 0x40);
+
+ for (i = 0; i < max_port; ++i) {
+ /* UART0 configuration offset start from 0x2A0 */
+ config_base = 0x2A0 + 0x08 * i;
+
+ /* Select 128-byte FIFO and 8x FIFO threshold */
+ pci_write_config_byte(dev, config_base + 0x01, 0x33);
+
+ /* Enable UART I/O port */
+ pci_write_config_byte(dev, config_base + 0, 0x01);
+ }
+
+ return max_port;
+}
+
static int skip_tx_en_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1692,6 +1889,46 @@ pci_wch_ch38x_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int
+pci_sunix_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ int bar;
+ int offset;
+
+ port->port.flags |= UPF_FIXED_TYPE;
+ port->port.type = PORT_SUNIX;
+
+ if (idx < 4) {
+ bar = 0;
+ offset = idx * board->uart_offset;
+ } else {
+ bar = 1;
+ idx -= 4;
+ idx = div_s64_rem(idx, 4, &offset);
+ offset = idx * 64 + offset * board->uart_offset;
+ }
+
+ return setup_port(priv, port, bar, offset, 0);
+}
+
+static int
+pci_moxa_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ unsigned int bar = FL_GET_BASE(board->flags);
+ int offset;
+
+ if (board->num_ports == 4 && idx == 3)
+ offset = 7 * board->uart_offset;
+ else
+ offset = idx * board->uart_offset;
+
+ return setup_port(priv, port, bar, offset, 0);
+}
+
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1786,7 +2023,28 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-
+#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
+#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
+#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
+#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
+#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
+#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
+#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
+#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
+#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
+
+#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
+#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
+#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
+#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144
+#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160
+#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161
+#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182
+#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183
+#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322
+#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342
+#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381
+#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2011,6 +2269,87 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8430_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8430_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8431_4852,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8431_4854,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8431_4858,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8433_4852,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCI_DEVICE_ID_NI_PXI8433_4854,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_NI,
+ .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_ni8430_init,
+ .setup = pci_ni8431_setup,
+ .exit = pci_ni8430_exit,
+ },
/* Quatech */
{
.vendor = PCI_VENDOR_ID_QUATECH,
@@ -2289,21 +2628,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_timedia_setup,
},
/*
- * SUNIX (Timedia) cards
- * Do not "probe" for these cards as there is at least one combination
- * card that should be handled by parport_pc that doesn't match the
- * rule in pci_timedia_probe.
- * It is part number is MIO5079A but its subdevice ID is 0x0102.
- * There are some boards with part number SER5037AL that report
- * subdevice ID 0x0002.
+ * Sunix PCI serial boards
*/
{
.vendor = PCI_VENDOR_ID_SUNIX,
.device = PCI_DEVICE_ID_SUNIX_1999,
.subvendor = PCI_VENDOR_ID_SUNIX,
.subdevice = PCI_ANY_ID,
- .init = pci_timedia_init,
- .setup = pci_timedia_setup,
+ .setup = pci_sunix_setup,
},
/*
* Xircom cards
@@ -2563,6 +2895,40 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_fintek_setup,
.init = pci_fintek_init,
},
+ /*
+ * MOXA
+ */
+ {
+ .vendor = PCI_VENDOR_ID_MOXA,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_moxa_setup,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1204,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_f815xxa_setup,
+ .init = pci_fintek_f815xxa_init,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1208,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_f815xxa_setup,
+ .init = pci_fintek_f815xxa_init,
+ },
+ {
+ .vendor = 0x1c29,
+ .device = 0x1212,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_fintek_f815xxa_setup,
+ .init = pci_fintek_f815xxa_init,
+ },
/*
* Default "match everything" terminator entry
@@ -2740,6 +3106,13 @@ enum pci_board_num_t {
pbn_ni8430_4,
pbn_ni8430_8,
pbn_ni8430_16,
+ pbn_ni8430_pxie_8,
+ pbn_ni8430_pxie_16,
+ pbn_ni8431_2,
+ pbn_ni8431_4,
+ pbn_ni8431_8,
+ pbn_ni8431_pxie_8,
+ pbn_ni8431_pxie_16,
pbn_ADDIDATA_PCIe_1_3906250,
pbn_ADDIDATA_PCIe_2_3906250,
pbn_ADDIDATA_PCIe_4_3906250,
@@ -2751,12 +3124,23 @@ enum pci_board_num_t {
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_fintek_F81504A,
+ pbn_fintek_F81508A,
+ pbn_fintek_F81512A,
pbn_wch382_2,
pbn_wch384_4,
pbn_pericom_PI7C9X7951,
pbn_pericom_PI7C9X7952,
pbn_pericom_PI7C9X7954,
pbn_pericom_PI7C9X7958,
+ pbn_sunix_pci_1s,
+ pbn_sunix_pci_2s,
+ pbn_sunix_pci_4s,
+ pbn_sunix_pci_8s,
+ pbn_sunix_pci_16s,
+ pbn_moxa8250_2p,
+ pbn_moxa8250_4p,
+ pbn_moxa8250_8p,
};
/*
@@ -3381,6 +3765,55 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x10,
.first_offset = 0x800,
},
+ [pbn_ni8430_pxie_16] = {
+ .flags = FL_BASE0,
+ .num_ports = 16,
+ .base_baud = 3125000,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8430_pxie_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 3125000,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8431_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8431_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8431_2] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 3686400,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8431_pxie_16] = {
+ .flags = FL_BASE0,
+ .num_ports = 16,
+ .base_baud = 3125000,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+ [pbn_ni8431_pxie_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 3125000,
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
/*
* ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
*/
@@ -3453,6 +3886,21 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
+ [pbn_fintek_F81504A] = {
+ .num_ports = 4,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ },
+ [pbn_fintek_F81508A] = {
+ .num_ports = 8,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ },
+ [pbn_fintek_F81512A] = {
+ .num_ports = 12,
+ .uart_offset = 8,
+ .base_baud = 115200,
+ },
[pbn_wch382_2] = {
.flags = FL_BASE0,
.num_ports = 2,
@@ -3494,6 +3942,49 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.uart_offset = 0x8,
},
+ [pbn_sunix_pci_1s] = {
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_sunix_pci_2s] = {
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_sunix_pci_4s] = {
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_sunix_pci_8s] = {
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_sunix_pci_16s] = {
+ .num_ports = 16,
+ .base_baud = 921600,
+ .uart_offset = 0x8,
+ },
+ [pbn_moxa8250_2p] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+ [pbn_moxa8250_4p] = {
+ .flags = FL_BASE1,
+ .num_ports = 4,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
+ [pbn_moxa8250_8p] = {
+ .flags = FL_BASE1,
+ .num_ports = 8,
+ .base_baud = 921600,
+ .uart_offset = 0x200,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3507,20 +3998,6 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
- /* Moxa Smartio MUE boards handled by 8250_moxa */
- { PCI_VDEVICE(MOXA, 0x1024), },
- { PCI_VDEVICE(MOXA, 0x1025), },
- { PCI_VDEVICE(MOXA, 0x1045), },
- { PCI_VDEVICE(MOXA, 0x1144), },
- { PCI_VDEVICE(MOXA, 0x1160), },
- { PCI_VDEVICE(MOXA, 0x1161), },
- { PCI_VDEVICE(MOXA, 0x1182), },
- { PCI_VDEVICE(MOXA, 0x1183), },
- { PCI_VDEVICE(MOXA, 0x1322), },
- { PCI_VDEVICE(MOXA, 0x1342), },
- { PCI_VDEVICE(MOXA, 0x1381), },
- { PCI_VDEVICE(MOXA, 0x1683), },
-
/* Intel platforms with MID UART */
{ PCI_VDEVICE(INTEL, 0x081b), },
{ PCI_VDEVICE(INTEL, 0x081c), },
@@ -3688,7 +4165,22 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
memset(&uart, 0, sizeof(uart));
uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.uartclk = board->base_baud * 16;
- uart.port.irq = get_pci_irq(dev, board);
+
+ if (pci_match_id(pci_use_msi, dev)) {
+ dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+ pci_set_master(dev);
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ } else {
+ dev_dbg(&dev->dev, "Using legacy interrupts\n");
+ rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ }
+ if (rc < 0) {
+ kfree(priv);
+ priv = ERR_PTR(rc);
+ goto err_deinit;
+ }
+
+ uart.port.irq = pci_irq_vector(dev, 0);
uart.port.dev = &dev->dev;
for (i = 0; i < nr_ports; i++) {
@@ -3859,8 +4351,7 @@ static void pciserial_remove_one(struct pci_dev *dev)
#ifdef CONFIG_PM_SLEEP
static int pciserial_suspend_one(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct serial_private *priv = pci_get_drvdata(pdev);
+ struct serial_private *priv = dev_get_drvdata(dev);
if (priv)
pciserial_suspend_ports(priv);
@@ -4532,17 +5023,29 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b0_bt_1_921600 },
/*
- * SUNIX (TIMEDIA)
+ * Sunix PCI serial boards
*/
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
- PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00,
- pbn_b0_bt_1_921600 },
-
+ PCI_VENDOR_ID_SUNIX, 0x0001, 0, 0,
+ pbn_sunix_pci_1s },
{ PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
- PCI_VENDOR_ID_SUNIX, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
- pbn_b0_bt_1_921600 },
+ PCI_VENDOR_ID_SUNIX, 0x0002, 0, 0,
+ pbn_sunix_pci_2s },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, 0x0004, 0, 0,
+ pbn_sunix_pci_4s },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, 0x0084, 0, 0,
+ pbn_sunix_pci_4s },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, 0x0008, 0, 0,
+ pbn_sunix_pci_8s },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, 0x0088, 0, 0,
+ pbn_sunix_pci_8s },
+ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999,
+ PCI_VENDOR_ID_SUNIX, 0x0010, 0, 0,
+ pbn_sunix_pci_16s },
/*
* AFAVLAB serial card, from Harald Welte <laforge@gnumonks.org>
@@ -5064,6 +5567,73 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ni8430_4 },
+ { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_pxie_8 },
+ { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_pxie_16 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_4 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_8 },
+ { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_pxie_8 },
+ { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_pxie_16 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_2 },
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8431_4 },
+
+ /*
+ * MOXA
+ */
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102E,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_2p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102EL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_2p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104EL_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_4p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP114EL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_4p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP116E_A_B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118EL_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118E_A_I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132EL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_2p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP134EL_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_4p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP138E_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
+ { PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP168EL_A,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_moxa8250_8p },
/*
* ADDI-DATA GmbH communication cards <info@addi-data.com>
@@ -5292,6 +5862,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
{ PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
{ PCI_DEVICE(0x1c29, 0x1112), .driver_data = pbn_fintek_12 },
+ { PCI_DEVICE(0x1c29, 0x1204), .driver_data = pbn_fintek_F81504A },
+ { PCI_DEVICE(0x1c29, 0x1208), .driver_data = pbn_fintek_F81508A },
+ { PCI_DEVICE(0x1c29, 0x1212), .driver_data = pbn_fintek_F81512A },
/* MKS Tenta SCOM-080x serial cards */
{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index dfca33141fcc..de90d681b64c 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -498,10 +498,9 @@ static void serial_pnp_remove(struct pnp_dev *dev)
serial8250_unregister_port(line - 1);
}
-#ifdef CONFIG_PM
-static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
+static int __maybe_unused serial_pnp_suspend(struct device *dev)
{
- long line = (long)pnp_get_drvdata(dev);
+ long line = (long)dev_get_drvdata(dev);
if (!line)
return -ENODEV;
@@ -509,26 +508,25 @@ static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
return 0;
}
-static int serial_pnp_resume(struct pnp_dev *dev)
+static int __maybe_unused serial_pnp_resume(struct device *dev)
{
- long line = (long)pnp_get_drvdata(dev);
+ long line = (long)dev_get_drvdata(dev);
if (!line)
return -ENODEV;
serial8250_resume_port(line - 1);
return 0;
}
-#else
-#define serial_pnp_suspend NULL
-#define serial_pnp_resume NULL
-#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(serial_pnp_pm_ops, serial_pnp_suspend, serial_pnp_resume);
static struct pnp_driver serial_pnp_driver = {
.name = "serial",
.probe = serial_pnp_probe,
.remove = serial_pnp_remove,
- .suspend = serial_pnp_suspend,
- .resume = serial_pnp_resume,
+ .driver = {
+ .pm = &serial_pnp_pm_ops,
+ },
.id_table = pnp_dev_table,
};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c1cec808571b..8407166610ce 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -40,13 +40,6 @@
#include "8250.h"
-/*
- * These are definitions for the Exar XR17V35X and XR17(C|D)15X
- */
-#define UART_EXAR_INT0 0x80
-#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
-#define UART_EXAR_DVID 0x8d /* Device identification */
-
/* Nuvoton NPCM timeout register */
#define UART_NPCM_TOR 7
#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
@@ -308,6 +301,14 @@ static const struct serial8250_config uart_config[] = {
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
+ [PORT_SUNIX] = {
+ .name = "Sunix",
+ .fifo_size = 128,
+ .tx_loadsz = 128,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .rxtrig_bytes = {1, 32, 64, 112},
+ .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
+ },
};
/* Uart divisor latch read */
@@ -709,19 +710,8 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx);
static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
{
unsigned char lcr = 0, efr = 0;
- /*
- * Exar UARTs have a SLEEP register that enables or disables
- * each UART to enter sleep mode separately. On the XR17V35x the
- * register is accessible to each UART at the UART_EXAR_SLEEP
- * offset but the UART channel may only write to the corresponding
- * bit.
- */
+
serial8250_rpm_get(p);
- if ((p->port.type == PORT_XR17V35X) ||
- (p->port.type == PORT_XR17D15X)) {
- serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
- goto out;
- }
if (p->capabilities & UART_CAP_SLEEP) {
if (p->capabilities & UART_CAP_EFR) {
@@ -738,7 +728,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial_out(p, UART_LCR, lcr);
}
}
-out:
+
serial8250_rpm_put(p);
}
@@ -1012,27 +1002,6 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->capabilities |= UART_CAP_FIFO;
/*
- * XR17V35x UARTs have an extra divisor register, DLD
- * that gets enabled with when DLAB is set which will
- * cause the device to incorrectly match and assign
- * port type to PORT_16650. The EFR for this UART is
- * found at offset 0x09. Instead check the Deice ID (DVID)
- * register for a 2, 4 or 8 port UART.
- */
- if (up->port.flags & UPF_EXAR_EFR) {
- status1 = serial_in(up, UART_EXAR_DVID);
- if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
- DEBUG_AUTOCONF("Exar XR17V35x ");
- up->port.type = PORT_XR17V35X;
- up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP;
-
- return;
- }
-
- }
-
- /*
* Check for presence of the EFR when DLAB is set.
* Only ST16C650V1 UARTs pass this test.
*/
@@ -1171,18 +1140,6 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_out(up, UART_IER, iersave);
/*
- * Exar uarts have EFR in a weird location
- */
- if (up->port.flags & UPF_EXAR_EFR) {
- DEBUG_AUTOCONF("Exar XR17D15x ");
- up->port.type = PORT_XR17D15X;
- up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
- UART_CAP_SLEEP;
-
- return;
- }
-
- /*
* We distinguish between 16550A and U6 16550A by counting
* how many bytes are in the FIFO.
*/
@@ -2184,8 +2141,6 @@ int serial8250_do_startup(struct uart_port *port)
serial_port_in(port, UART_RX);
serial_port_in(port, UART_IIR);
serial_port_in(port, UART_MSR);
- if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
- serial_port_in(port, UART_EXAR_INT0);
/*
* At this point, there's no way the LSR could still be 0xff;
@@ -2343,8 +2298,6 @@ dont_test_tx_en:
serial_port_in(port, UART_RX);
serial_port_in(port, UART_IIR);
serial_port_in(port, UART_MSR);
- if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
- serial_port_in(port, UART_EXAR_INT0);
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
@@ -2453,23 +2406,6 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
-/*
- * XR17V35x UARTs have an extra fractional divisor register (DLD)
- * Calculate divisor with extra 4-bit fractional portion
- */
-static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
- unsigned int baud,
- unsigned int *frac)
-{
- struct uart_port *port = &up->port;
- unsigned int quot_16;
-
- quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
- *frac = quot_16 & 0x0f;
-
- return quot_16 >> 4;
-}
-
/* Nuvoton NPCM UARTs have a custom divisor calculation */
static unsigned int npcm_get_divisor(struct uart_8250_port *up,
unsigned int baud)
@@ -2497,8 +2433,6 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
baud == (port->uartclk/8))
quot = 0x8002;
- else if (up->port.type == PORT_XR17V35X)
- quot = xr17v35x_get_divisor(up, baud, frac);
else if (up->port.type == PORT_NPCM)
quot = npcm_get_divisor(up, baud);
else
@@ -2585,13 +2519,6 @@ void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
serial_dl_write(up, quot);
-
- /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
- if (up->port.type == PORT_XR17V35X) {
- /* Preserve bits not related to baudrate; DLD[7:4]. */
- quot_frac |= serial_port_in(port, 0x2) & 0xf0;
- serial_port_out(port, 0x2, quot_frac);
- }
}
EXPORT_SYMBOL_GPL(serial8250_do_set_divisor);
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 164ba133437a..e0b73a5402db 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -176,10 +176,8 @@ static int uniphier_uart_probe(struct platform_device *pdev)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get IRQ number\n");
+ if (irq < 0)
return irq;
- }
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 509f6a3bb9ff..7ef60f8b6e2c 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -314,6 +314,9 @@ config SERIAL_8250_RSA
If you don't have such card, or if unsure, say N.
+config SERIAL_8250_DWLIB
+ bool
+
config SERIAL_8250_ACORN
tristate "Acorn expansion card serial port support"
depends on ARCH_ACORN && SERIAL_8250
@@ -354,6 +357,7 @@ config SERIAL_8250_FSL
config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks"
depends on SERIAL_8250
+ select SERIAL_8250_DWLIB
help
Selecting this option will enable handling of the extra features
present in the Synopsys DesignWare APB UART.
@@ -440,6 +444,7 @@ config SERIAL_8250_LPSS
default SERIAL_8250
depends on SERIAL_8250 && PCI
depends on X86 || COMPILE_TEST
+ select SERIAL_8250_DWLIB
select DW_DMAC_CORE if SERIAL_8250_DMA
select DW_DMAC_PCI if (SERIAL_8250_DMA && X86_INTEL_LPSS)
select RATIONAL
@@ -463,16 +468,6 @@ config SERIAL_8250_MID
present on the UART found on Intel Medfield SOC and various other
Intel platforms.
-config SERIAL_8250_MOXA
- tristate "MOXA SmartIO MUE support"
- depends on SERIAL_8250 && PCI
- help
- Say Y here if you have a Moxa SmartIO MUE multiport serial card.
- If unsure, say N.
-
- This driver can also be built as a module. The module will be called
- 8250_moxa. If you want to do that, say M here.
-
config SERIAL_8250_PXA
tristate "PXA serial port support"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 18751bc63a84..08c1d8117506 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
8250_base-y := 8250_port.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
+8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o
8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
@@ -34,7 +35,6 @@ obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
-obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fd385c8c53a5..4789b5d62f63 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -197,23 +197,6 @@ config SERIAL_KGDB_NMI
If unsure, say N.
-config SERIAL_KS8695
- bool "Micrel KS8695 (Centaur) serial port support"
- depends on ARCH_KS8695
- select SERIAL_CORE
- help
- This selects the Micrel Centaur KS8695 UART. Say Y here.
-
-config SERIAL_KS8695_CONSOLE
- bool "Support for console on KS8695 (Centaur) serial port"
- depends on SERIAL_KS8695=y
- select SERIAL_CORE_CONSOLE
- help
- Say Y here if you wish to use a KS8695 (Centaur) UART as the
- system console (the system console is the device which
- receives all kernel messages and warnings and which allows
- logins in single user mode).
-
config SERIAL_MESON
tristate "Meson serial port support"
depends on ARCH_MESON
@@ -739,7 +722,8 @@ config SERIAL_PNX8XXX_CONSOLE
config SERIAL_HS_LPC32XX
tristate "LPC32XX high speed serial port support"
- depends on ARCH_LPC32XX && OF
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on OF
select SERIAL_CORE
help
Support for the LPC32XX high speed serial ports (up to 900kbps).
@@ -855,16 +839,6 @@ config SERIAL_CPM_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_SGI_L1_CONSOLE
- bool "SGI Altix L1 serial console support"
- depends on IA64_GENERIC || IA64_SGI_SN2
- select SERIAL_CORE
- select SERIAL_CORE_CONSOLE
- help
- If you have an SGI Altix and you would like to use the system
- controller serial port as your console (you want this!),
- say Y. Otherwise, say N.
-
config SERIAL_PIC32
tristate "Microchip PIC32 serial support"
depends on MACH_PIC32
@@ -982,23 +956,6 @@ config SERIAL_JSM
To compile this driver as a module, choose M here: the
module will be called jsm.
-config SERIAL_SGI_IOC4
- tristate "SGI IOC4 controller serial support"
- depends on (IA64_GENERIC || IA64_SGI_SN2) && SGI_IOC4
- select SERIAL_CORE
- help
- If you have an SGI Altix with an IOC4 based Base IO card
- and wish to use the serial ports on this card, say Y.
- Otherwise, say N.
-
-config SERIAL_SGI_IOC3
- tristate "SGI Altix IOC3 serial support"
- depends on (IA64_GENERIC || IA64_SGI_SN2) && SGI_IOC3
- select SERIAL_CORE
- help
- If you have an SGI Altix with an IOC3 serial card,
- say Y or M. Otherwise, say N.
-
config SERIAL_MSM
tristate "MSM on-chip serial port support"
depends on ARCH_QCOM
@@ -1035,25 +992,6 @@ config SERIAL_VT8500_CONSOLE
depends on SERIAL_VT8500=y
select SERIAL_CORE_CONSOLE
-config SERIAL_NETX
- tristate "NetX serial port support"
- depends on ARCH_NETX
- select SERIAL_CORE
- help
- If you have a machine based on a Hilscher NetX SoC you
- can enable its onboard serial port by enabling this option.
-
- To compile this driver as a module, choose M here: the
- module will be called netx-serial.
-
-config SERIAL_NETX_CONSOLE
- bool "Console on NetX serial port"
- depends on SERIAL_NETX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have enabled the serial port on the Hilscher NetX SoC
- you can make it the console by answering Y to this option.
-
config SERIAL_OMAP
tristate "OMAP serial port support"
depends on ARCH_OMAP2PLUS
@@ -1452,6 +1390,22 @@ config SERIAL_FSL_LPUART_CONSOLE
If you have enabled the lpuart serial port on the Freescale SoCs,
you can make it the console by answering Y to this option.
+config SERIAL_FSL_LINFLEXUART
+ tristate "Freescale linflexuart serial port support"
+ depends on PRINTK
+ select SERIAL_CORE
+ help
+ Support for the on-chip linflexuart on some Freescale SOCs.
+
+config SERIAL_FSL_LINFLEXUART_CONSOLE
+ bool "Console on Freescale linflexuart serial port"
+ depends on SERIAL_FSL_LINFLEXUART=y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ help
+ If you have enabled the linflexuart serial port on the Freescale
+ SoCs, you can make it the console by answering Y to this option.
+
config SERIAL_CONEXANT_DIGICOLOR
tristate "Conexant Digicolor CX92xxx USART serial port support"
depends on OF
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 7cd7cabfa6c4..863f47056539 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_ZS) += zs.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
-obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
obj-$(CONFIG_SERIAL_IMX) += imx.o
obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
@@ -53,14 +52,10 @@ obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
-obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
-obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
-obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
-obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
@@ -82,6 +77,7 @@ obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o
obj-$(CONFIG_SERIAL_ARC) += arc_uart.o
obj-$(CONFIG_SERIAL_RP2) += rp2.o
obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o
+obj-$(CONFIG_SERIAL_FSL_LINFLEXUART) += fsl_linflexuart.o
obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o
obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 5921a33b2a07..3a7d1a66f79c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2718,11 +2718,8 @@ static int sbsa_uart_probe(struct platform_device *pdev)
return -ENOMEM;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "cannot obtain irq\n");
+ if (ret < 0)
return ret;
- }
uap->port.irq = ret;
#ifdef CONFIG_ACPI_SPCR_TABLE
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 0b4f36905321..a8dc8af83f39 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -294,50 +294,6 @@ static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
tasklet_schedule(t);
}
-static unsigned int atmel_get_lines_status(struct uart_port *port)
-{
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- unsigned int status, ret = 0;
-
- status = atmel_uart_readl(port, ATMEL_US_CSR);
-
- mctrl_gpio_get(atmel_port->gpios, &ret);
-
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
- UART_GPIO_CTS))) {
- if (ret & TIOCM_CTS)
- status &= ~ATMEL_US_CTS;
- else
- status |= ATMEL_US_CTS;
- }
-
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
- UART_GPIO_DSR))) {
- if (ret & TIOCM_DSR)
- status &= ~ATMEL_US_DSR;
- else
- status |= ATMEL_US_DSR;
- }
-
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
- UART_GPIO_RI))) {
- if (ret & TIOCM_RI)
- status &= ~ATMEL_US_RI;
- else
- status |= ATMEL_US_RI;
- }
-
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
- UART_GPIO_DCD))) {
- if (ret & TIOCM_CD)
- status &= ~ATMEL_US_DCD;
- else
- status |= ATMEL_US_DCD;
- }
-
- return status;
-}
-
/* Enable or disable the rs485 support */
static int atmel_config_rs485(struct uart_port *port,
struct serial_rs485 *rs485conf)
@@ -1400,7 +1356,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
atmel_port->hd_start_rx = false;
atmel_start_rx(port);
- return;
}
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
@@ -1454,7 +1409,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
spin_lock(&atmel_port->lock_suspended);
do {
- status = atmel_get_lines_status(port);
+ status = atmel_uart_readl(port, ATMEL_US_CSR);
mask = atmel_uart_readl(port, ATMEL_US_IMR);
pending = status & mask;
if (!pending)
@@ -2003,7 +1958,7 @@ static int atmel_startup(struct uart_port *port)
}
/* Save current CSR for comparison in atmel_tasklet_func() */
- atmel_port->irq_status_prev = atmel_get_lines_status(port);
+ atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR);
/*
* Finally, enable the serial port
@@ -2888,7 +2843,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
struct atmel_uart_port *atmel_port;
struct device_node *np = pdev->dev.parent->of_node;
void *data;
- int ret = -ENODEV;
+ int ret;
bool rs485_enabled;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
new file mode 100644
index 000000000000..68d74f2b5106
--- /dev/null
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -0,0 +1,937 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale linflexuart serial port driver
+ *
+ * Copyright 2012-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
+ defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/console.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty_flip.h>
+#include <linux/delay.h>
+
+/* All registers are 32-bit width */
+
+#define LINCR1 0x0000 /* LIN control register */
+#define LINIER 0x0004 /* LIN interrupt enable register */
+#define LINSR 0x0008 /* LIN status register */
+#define LINESR 0x000C /* LIN error status register */
+#define UARTCR 0x0010 /* UART mode control register */
+#define UARTSR 0x0014 /* UART mode status register */
+#define LINTCSR 0x0018 /* LIN timeout control status register */
+#define LINOCR 0x001C /* LIN output compare register */
+#define LINTOCR 0x0020 /* LIN timeout control register */
+#define LINFBRR 0x0024 /* LIN fractional baud rate register */
+#define LINIBRR 0x0028 /* LIN integer baud rate register */
+#define LINCFR 0x002C /* LIN checksum field register */
+#define LINCR2 0x0030 /* LIN control register 2 */
+#define BIDR 0x0034 /* Buffer identifier register */
+#define BDRL 0x0038 /* Buffer data register least significant */
+#define BDRM 0x003C /* Buffer data register most significant */
+#define IFER 0x0040 /* Identifier filter enable register */
+#define IFMI 0x0044 /* Identifier filter match index */
+#define IFMR 0x0048 /* Identifier filter mode register */
+#define GCR 0x004C /* Global control register */
+#define UARTPTO 0x0050 /* UART preset timeout register */
+#define UARTCTO 0x0054 /* UART current timeout register */
+
+/*
+ * Register field definitions
+ */
+
+#define LINFLEXD_LINCR1_INIT BIT(0)
+#define LINFLEXD_LINCR1_MME BIT(4)
+#define LINFLEXD_LINCR1_BF BIT(7)
+
+#define LINFLEXD_LINSR_LINS_INITMODE BIT(12)
+#define LINFLEXD_LINSR_LINS_MASK (0xF << 12)
+
+#define LINFLEXD_LINIER_SZIE BIT(15)
+#define LINFLEXD_LINIER_OCIE BIT(14)
+#define LINFLEXD_LINIER_BEIE BIT(13)
+#define LINFLEXD_LINIER_CEIE BIT(12)
+#define LINFLEXD_LINIER_HEIE BIT(11)
+#define LINFLEXD_LINIER_FEIE BIT(8)
+#define LINFLEXD_LINIER_BOIE BIT(7)
+#define LINFLEXD_LINIER_LSIE BIT(6)
+#define LINFLEXD_LINIER_WUIE BIT(5)
+#define LINFLEXD_LINIER_DBFIE BIT(4)
+#define LINFLEXD_LINIER_DBEIETOIE BIT(3)
+#define LINFLEXD_LINIER_DRIE BIT(2)
+#define LINFLEXD_LINIER_DTIE BIT(1)
+#define LINFLEXD_LINIER_HRIE BIT(0)
+
+#define LINFLEXD_UARTCR_OSR_MASK (0xF << 24)
+#define LINFLEXD_UARTCR_OSR(uartcr) (((uartcr) \
+ & LINFLEXD_UARTCR_OSR_MASK) >> 24)
+
+#define LINFLEXD_UARTCR_ROSE BIT(23)
+
+#define LINFLEXD_UARTCR_RFBM BIT(9)
+#define LINFLEXD_UARTCR_TFBM BIT(8)
+#define LINFLEXD_UARTCR_WL1 BIT(7)
+#define LINFLEXD_UARTCR_PC1 BIT(6)
+
+#define LINFLEXD_UARTCR_RXEN BIT(5)
+#define LINFLEXD_UARTCR_TXEN BIT(4)
+#define LINFLEXD_UARTCR_PC0 BIT(3)
+
+#define LINFLEXD_UARTCR_PCE BIT(2)
+#define LINFLEXD_UARTCR_WL0 BIT(1)
+#define LINFLEXD_UARTCR_UART BIT(0)
+
+#define LINFLEXD_UARTSR_SZF BIT(15)
+#define LINFLEXD_UARTSR_OCF BIT(14)
+#define LINFLEXD_UARTSR_PE3 BIT(13)
+#define LINFLEXD_UARTSR_PE2 BIT(12)
+#define LINFLEXD_UARTSR_PE1 BIT(11)
+#define LINFLEXD_UARTSR_PE0 BIT(10)
+#define LINFLEXD_UARTSR_RMB BIT(9)
+#define LINFLEXD_UARTSR_FEF BIT(8)
+#define LINFLEXD_UARTSR_BOF BIT(7)
+#define LINFLEXD_UARTSR_RPS BIT(6)
+#define LINFLEXD_UARTSR_WUF BIT(5)
+#define LINFLEXD_UARTSR_4 BIT(4)
+
+#define LINFLEXD_UARTSR_TO BIT(3)
+
+#define LINFLEXD_UARTSR_DRFRFE BIT(2)
+#define LINFLEXD_UARTSR_DTFTFF BIT(1)
+#define LINFLEXD_UARTSR_NF BIT(0)
+#define LINFLEXD_UARTSR_PE (LINFLEXD_UARTSR_PE0 |\
+ LINFLEXD_UARTSR_PE1 |\
+ LINFLEXD_UARTSR_PE2 |\
+ LINFLEXD_UARTSR_PE3)
+
+#define LINFLEX_LDIV_MULTIPLIER (16)
+
+#define DRIVER_NAME "fsl-linflexuart"
+#define DEV_NAME "ttyLF"
+#define UART_NR 4
+
+#define EARLYCON_BUFFER_INITIAL_CAP 8
+
+#define PREINIT_DELAY 2000 /* us */
+
+static const struct of_device_id linflex_dt_ids[] = {
+ {
+ .compatible = "fsl,s32v234-linflexuart",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, linflex_dt_ids);
+
+#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE
+static struct uart_port *earlycon_port;
+static bool linflex_earlycon_same_instance;
+static DEFINE_SPINLOCK(init_lock);
+static bool during_init;
+
+static struct {
+ char *content;
+ unsigned int len, cap;
+} earlycon_buf;
+#endif
+
+static void linflex_stop_tx(struct uart_port *port)
+{
+ unsigned long ier;
+
+ ier = readl(port->membase + LINIER);
+ ier &= ~(LINFLEXD_LINIER_DTIE);
+ writel(ier, port->membase + LINIER);
+}
+
+static void linflex_stop_rx(struct uart_port *port)
+{
+ unsigned long ier;
+
+ ier = readl(port->membase + LINIER);
+ writel(ier & ~LINFLEXD_LINIER_DRIE, port->membase + LINIER);
+}
+
+static inline void linflex_transmit_buffer(struct uart_port *sport)
+{
+ struct circ_buf *xmit = &sport->state->xmit;
+ unsigned char c;
+ unsigned long status;
+
+ while (!uart_circ_empty(xmit)) {
+ c = xmit->buf[xmit->tail];
+ writeb(c, sport->membase + BDRL);
+
+ /* Waiting for data transmission completed. */
+ while (((status = readl(sport->membase + UARTSR)) &
+ LINFLEXD_UARTSR_DTFTFF) !=
+ LINFLEXD_UARTSR_DTFTFF)
+ ;
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->icount.tx++;
+
+ writel(status | LINFLEXD_UARTSR_DTFTFF,
+ sport->membase + UARTSR);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(sport);
+
+ if (uart_circ_empty(xmit))
+ linflex_stop_tx(sport);
+}
+
+static void linflex_start_tx(struct uart_port *port)
+{
+ unsigned long ier;
+
+ linflex_transmit_buffer(port);
+ ier = readl(port->membase + LINIER);
+ writel(ier | LINFLEXD_LINIER_DTIE, port->membase + LINIER);
+}
+
+static irqreturn_t linflex_txint(int irq, void *dev_id)
+{
+ struct uart_port *sport = dev_id;
+ struct circ_buf *xmit = &sport->state->xmit;
+ unsigned long flags;
+ unsigned long status;
+
+ spin_lock_irqsave(&sport->lock, flags);
+
+ if (sport->x_char) {
+ writeb(sport->x_char, sport->membase + BDRL);
+
+ /* waiting for data transmission completed */
+ while (((status = readl(sport->membase + UARTSR)) &
+ LINFLEXD_UARTSR_DTFTFF) != LINFLEXD_UARTSR_DTFTFF)
+ ;
+
+ writel(status | LINFLEXD_UARTSR_DTFTFF,
+ sport->membase + UARTSR);
+
+ goto out;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(sport)) {
+ linflex_stop_tx(sport);
+ goto out;
+ }
+
+ linflex_transmit_buffer(sport);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(sport);
+
+out:
+ spin_unlock_irqrestore(&sport->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t linflex_rxint(int irq, void *dev_id)
+{
+ struct uart_port *sport = dev_id;
+ unsigned int flg;
+ struct tty_port *port = &sport->state->port;
+ unsigned long flags, status;
+ unsigned char rx;
+
+ spin_lock_irqsave(&sport->lock, flags);
+
+ status = readl(sport->membase + UARTSR);
+ while (status & LINFLEXD_UARTSR_RMB) {
+ rx = readb(sport->membase + BDRM);
+ flg = TTY_NORMAL;
+ sport->icount.rx++;
+
+ if (status & (LINFLEXD_UARTSR_BOF | LINFLEXD_UARTSR_SZF |
+ LINFLEXD_UARTSR_FEF | LINFLEXD_UARTSR_PE)) {
+ if (status & LINFLEXD_UARTSR_SZF)
+ status |= LINFLEXD_UARTSR_SZF;
+ if (status & LINFLEXD_UARTSR_BOF)
+ status |= LINFLEXD_UARTSR_BOF;
+ if (status & LINFLEXD_UARTSR_FEF)
+ status |= LINFLEXD_UARTSR_FEF;
+ if (status & LINFLEXD_UARTSR_PE)
+ status |= LINFLEXD_UARTSR_PE;
+ }
+
+ writel(status | LINFLEXD_UARTSR_RMB | LINFLEXD_UARTSR_DRFRFE,
+ sport->membase + UARTSR);
+ status = readl(sport->membase + UARTSR);
+
+ if (uart_handle_sysrq_char(sport, (unsigned char)rx))
+ continue;
+
+#ifdef SUPPORT_SYSRQ
+ sport->sysrq = 0;
+#endif
+ tty_insert_flip_char(port, rx, flg);
+ }
+
+ spin_unlock_irqrestore(&sport->lock, flags);
+
+ tty_flip_buffer_push(port);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t linflex_int(int irq, void *dev_id)
+{
+ struct uart_port *sport = dev_id;
+ unsigned long status;
+
+ status = readl(sport->membase + UARTSR);
+
+ if (status & LINFLEXD_UARTSR_DRFRFE)
+ linflex_rxint(irq, dev_id);
+ if (status & LINFLEXD_UARTSR_DTFTFF)
+ linflex_txint(irq, dev_id);
+
+ return IRQ_HANDLED;
+}
+
+/* return TIOCSER_TEMT when transmitter is not busy */
+static unsigned int linflex_tx_empty(struct uart_port *port)
+{
+ unsigned long status;
+
+ status = readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF;
+
+ return status ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int linflex_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void linflex_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void linflex_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void linflex_setup_watermark(struct uart_port *sport)
+{
+ unsigned long cr, ier, cr1;
+
+ /* Disable transmission/reception */
+ ier = readl(sport->membase + LINIER);
+ ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
+ writel(ier, sport->membase + LINIER);
+
+ cr = readl(sport->membase + UARTCR);
+ cr &= ~(LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN);
+ writel(cr, sport->membase + UARTCR);
+
+ /* Enter initialization mode by setting INIT bit */
+
+ /* set the Linflex in master mode and activate by-pass filter */
+ cr1 = LINFLEXD_LINCR1_BF | LINFLEXD_LINCR1_MME
+ | LINFLEXD_LINCR1_INIT;
+ writel(cr1, sport->membase + LINCR1);
+
+ /* wait for init mode entry */
+ while ((readl(sport->membase + LINSR)
+ & LINFLEXD_LINSR_LINS_MASK)
+ != LINFLEXD_LINSR_LINS_INITMODE)
+ ;
+
+ /*
+ * UART = 0x1; - Linflex working in UART mode
+ * TXEN = 0x1; - Enable transmission of data now
+ * RXEn = 0x1; - Receiver enabled
+ * WL0 = 0x1; - 8 bit data
+ * PCE = 0x0; - No parity
+ */
+
+ /* set UART bit to allow writing other bits */
+ writel(LINFLEXD_UARTCR_UART, sport->membase + UARTCR);
+
+ cr = (LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN |
+ LINFLEXD_UARTCR_WL0 | LINFLEXD_UARTCR_UART);
+
+ writel(cr, sport->membase + UARTCR);
+
+ cr1 &= ~(LINFLEXD_LINCR1_INIT);
+
+ writel(cr1, sport->membase + LINCR1);
+
+ ier = readl(sport->membase + LINIER);
+ ier |= LINFLEXD_LINIER_DRIE;
+ ier |= LINFLEXD_LINIER_DTIE;
+
+ writel(ier, sport->membase + LINIER);
+}
+
+static int linflex_startup(struct uart_port *port)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ linflex_setup_watermark(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
+ DRIVER_NAME, port);
+
+ return ret;
+}
+
+static void linflex_shutdown(struct uart_port *port)
+{
+ unsigned long ier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* disable interrupts */
+ ier = readl(port->membase + LINIER);
+ ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
+ writel(ier, port->membase + LINIER);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ devm_free_irq(port->dev, port->irq, port);
+}
+
+static void
+linflex_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned long cr, old_cr, cr1;
+ unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
+
+ cr = readl(port->membase + UARTCR);
+ old_cr = cr;
+
+ /* Enter initialization mode by setting INIT bit */
+ cr1 = readl(port->membase + LINCR1);
+ cr1 |= LINFLEXD_LINCR1_INIT;
+ writel(cr1, port->membase + LINCR1);
+
+ /* wait for init mode entry */
+ while ((readl(port->membase + LINSR)
+ & LINFLEXD_LINSR_LINS_MASK)
+ != LINFLEXD_LINSR_LINS_INITMODE)
+ ;
+
+ /*
+ * only support CS8 and CS7, and for CS7 must enable PE.
+ * supported mode:
+ * - (7,e/o,1)
+ * - (8,n,1)
+ * - (8,e/o,1)
+ */
+ /* enter the UART into configuration mode */
+
+ while ((termios->c_cflag & CSIZE) != CS8 &&
+ (termios->c_cflag & CSIZE) != CS7) {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= old_csize;
+ old_csize = CS8;
+ }
+
+ if ((termios->c_cflag & CSIZE) == CS7) {
+ /* Word length: WL1WL0:00 */
+ cr = old_cr & ~LINFLEXD_UARTCR_WL1 & ~LINFLEXD_UARTCR_WL0;
+ }
+
+ if ((termios->c_cflag & CSIZE) == CS8) {
+ /* Word length: WL1WL0:01 */
+ cr = (old_cr | LINFLEXD_UARTCR_WL0) & ~LINFLEXD_UARTCR_WL1;
+ }
+
+ if (termios->c_cflag & CMSPAR) {
+ if ((termios->c_cflag & CSIZE) != CS8) {
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ }
+ /* has a space/sticky bit */
+ cr |= LINFLEXD_UARTCR_WL0;
+ }
+
+ if (termios->c_cflag & CSTOPB)
+ termios->c_cflag &= ~CSTOPB;
+
+ /* parity must be enabled when CS7 to match 8-bits format */
+ if ((termios->c_cflag & CSIZE) == CS7)
+ termios->c_cflag |= PARENB;
+
+ if ((termios->c_cflag & PARENB)) {
+ cr |= LINFLEXD_UARTCR_PCE;
+ if (termios->c_cflag & PARODD)
+ cr = (cr | LINFLEXD_UARTCR_PC0) &
+ (~LINFLEXD_UARTCR_PC1);
+ else
+ cr = cr & (~LINFLEXD_UARTCR_PC1 &
+ ~LINFLEXD_UARTCR_PC0);
+ } else {
+ cr &= ~LINFLEXD_UARTCR_PCE;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->read_status_mask = 0;
+
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (LINFLEXD_UARTSR_FEF |
+ LINFLEXD_UARTSR_PE0 |
+ LINFLEXD_UARTSR_PE1 |
+ LINFLEXD_UARTSR_PE2 |
+ LINFLEXD_UARTSR_PE3);
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= LINFLEXD_UARTSR_FEF;
+
+ /* characters to ignore */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= LINFLEXD_UARTSR_PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= LINFLEXD_UARTSR_PE;
+ /*
+ * if we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= LINFLEXD_UARTSR_BOF;
+ }
+
+ writel(cr, port->membase + UARTCR);
+
+ cr1 &= ~(LINFLEXD_LINCR1_INIT);
+
+ writel(cr1, port->membase + LINCR1);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *linflex_type(struct uart_port *port)
+{
+ return "FSL_LINFLEX";
+}
+
+static void linflex_release_port(struct uart_port *port)
+{
+ /* nothing to do */
+}
+
+static int linflex_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/* configure/auto-configure the port */
+static void linflex_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_LINFLEXUART;
+}
+
+static const struct uart_ops linflex_pops = {
+ .tx_empty = linflex_tx_empty,
+ .set_mctrl = linflex_set_mctrl,
+ .get_mctrl = linflex_get_mctrl,
+ .stop_tx = linflex_stop_tx,
+ .start_tx = linflex_start_tx,
+ .stop_rx = linflex_stop_rx,
+ .break_ctl = linflex_break_ctl,
+ .startup = linflex_startup,
+ .shutdown = linflex_shutdown,
+ .set_termios = linflex_set_termios,
+ .type = linflex_type,
+ .request_port = linflex_request_port,
+ .release_port = linflex_release_port,
+ .config_port = linflex_config_port,
+};
+
+static struct uart_port *linflex_ports[UART_NR];
+
+#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE
+static void linflex_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned long cr;
+
+ cr = readl(port->membase + UARTCR);
+
+ writeb(ch, port->membase + BDRL);
+
+ if (!(cr & LINFLEXD_UARTCR_TFBM))
+ while ((readl(port->membase + UARTSR) &
+ LINFLEXD_UARTSR_DTFTFF)
+ != LINFLEXD_UARTSR_DTFTFF)
+ ;
+ else
+ while (readl(port->membase + UARTSR) &
+ LINFLEXD_UARTSR_DTFTFF)
+ ;
+
+ if (!(cr & LINFLEXD_UARTCR_TFBM)) {
+ writel((readl(port->membase + UARTSR) |
+ LINFLEXD_UARTSR_DTFTFF),
+ port->membase + UARTSR);
+ }
+}
+
+static void linflex_earlycon_putchar(struct uart_port *port, int ch)
+{
+ unsigned long flags;
+ char *ret;
+
+ if (!linflex_earlycon_same_instance) {
+ linflex_console_putchar(port, ch);
+ return;
+ }
+
+ spin_lock_irqsave(&init_lock, flags);
+ if (!during_init)
+ goto outside_init;
+
+ if (earlycon_buf.len >= 1 << CONFIG_LOG_BUF_SHIFT)
+ goto init_release;
+
+ if (!earlycon_buf.cap) {
+ earlycon_buf.content = kmalloc(EARLYCON_BUFFER_INITIAL_CAP,
+ GFP_ATOMIC);
+ earlycon_buf.cap = earlycon_buf.content ?
+ EARLYCON_BUFFER_INITIAL_CAP : 0;
+ } else if (earlycon_buf.len == earlycon_buf.cap) {
+ ret = krealloc(earlycon_buf.content, earlycon_buf.cap << 1,
+ GFP_ATOMIC);
+ if (ret) {
+ earlycon_buf.content = ret;
+ earlycon_buf.cap <<= 1;
+ }
+ }
+
+ if (earlycon_buf.len < earlycon_buf.cap)
+ earlycon_buf.content[earlycon_buf.len++] = ch;
+
+ goto init_release;
+
+outside_init:
+ linflex_console_putchar(port, ch);
+init_release:
+ spin_unlock_irqrestore(&init_lock, flags);
+}
+
+static void linflex_string_write(struct uart_port *sport, const char *s,
+ unsigned int count)
+{
+ unsigned long cr, ier = 0;
+
+ ier = readl(sport->membase + LINIER);
+ linflex_stop_tx(sport);
+
+ cr = readl(sport->membase + UARTCR);
+ cr |= (LINFLEXD_UARTCR_TXEN);
+ writel(cr, sport->membase + UARTCR);
+
+ uart_console_write(sport, s, count, linflex_console_putchar);
+
+ writel(ier, sport->membase + LINIER);
+}
+
+static void
+linflex_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_port *sport = linflex_ports[co->index];
+ unsigned long flags;
+ int locked = 1;
+
+ if (sport->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock_irqsave(&sport->lock, flags);
+ else
+ spin_lock_irqsave(&sport->lock, flags);
+
+ linflex_string_write(sport, s, count);
+
+ if (locked)
+ spin_unlock_irqrestore(&sport->lock, flags);
+}
+
+/*
+ * if the port was already initialised (eg, by a boot loader),
+ * try to determine the current setup.
+ */
+static void __init
+linflex_console_get_options(struct uart_port *sport, int *parity, int *bits)
+{
+ unsigned long cr;
+
+ cr = readl(sport->membase + UARTCR);
+ cr &= LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN;
+
+ if (!cr)
+ return;
+
+ /* ok, the port was enabled */
+
+ *parity = 'n';
+ if (cr & LINFLEXD_UARTCR_PCE) {
+ if (cr & LINFLEXD_UARTCR_PC0)
+ *parity = 'o';
+ else
+ *parity = 'e';
+ }
+
+ if ((cr & LINFLEXD_UARTCR_WL0) && ((cr & LINFLEXD_UARTCR_WL1) == 0)) {
+ if (cr & LINFLEXD_UARTCR_PCE)
+ *bits = 9;
+ else
+ *bits = 8;
+ }
+}
+
+static int __init linflex_console_setup(struct console *co, char *options)
+{
+ struct uart_port *sport;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+ int i;
+ unsigned long flags;
+ /*
+ * check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index == -1 || co->index >= ARRAY_SIZE(linflex_ports))
+ co->index = 0;
+
+ sport = linflex_ports[co->index];
+ if (!sport)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ linflex_console_get_options(sport, &parity, &bits);
+
+ if (earlycon_port && sport->mapbase == earlycon_port->mapbase) {
+ linflex_earlycon_same_instance = true;
+
+ spin_lock_irqsave(&init_lock, flags);
+ during_init = true;
+ spin_unlock_irqrestore(&init_lock, flags);
+
+ /* Workaround for character loss or output of many invalid
+ * characters, when INIT mode is entered shortly after a
+ * character has just been printed.
+ */
+ udelay(PREINIT_DELAY);
+ }
+
+ linflex_setup_watermark(sport);
+
+ ret = uart_set_options(sport, co, baud, parity, bits, flow);
+
+ if (!linflex_earlycon_same_instance)
+ goto done;
+
+ spin_lock_irqsave(&init_lock, flags);
+
+ /* Emptying buffer */
+ if (earlycon_buf.len) {
+ for (i = 0; i < earlycon_buf.len; i++)
+ linflex_console_putchar(earlycon_port,
+ earlycon_buf.content[i]);
+
+ kfree(earlycon_buf.content);
+ earlycon_buf.len = 0;
+ }
+
+ during_init = false;
+ spin_unlock_irqrestore(&init_lock, flags);
+
+done:
+ return ret;
+}
+
+static struct uart_driver linflex_reg;
+static struct console linflex_console = {
+ .name = DEV_NAME,
+ .write = linflex_console_write,
+ .device = uart_console_device,
+ .setup = linflex_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &linflex_reg,
+};
+
+static void linflex_earlycon_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, linflex_earlycon_putchar);
+}
+
+static int __init linflex_early_console_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = linflex_earlycon_write;
+ earlycon_port = &device->port;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(linflex, "fsl,s32v234-linflexuart",
+ linflex_early_console_setup);
+
+#define LINFLEX_CONSOLE (&linflex_console)
+#else
+#define LINFLEX_CONSOLE NULL
+#endif
+
+static struct uart_driver linflex_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = DEV_NAME,
+ .nr = ARRAY_SIZE(linflex_ports),
+ .cons = LINFLEX_CONSOLE,
+};
+
+static int linflex_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct uart_port *sport;
+ struct resource *res;
+ int ret;
+
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+ if (!sport)
+ return -ENOMEM;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
+ if (ret >= UART_NR) {
+ dev_err(&pdev->dev, "driver limited to %d serial ports\n",
+ UART_NR);
+ return -ENOMEM;
+ }
+
+ sport->line = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ sport->mapbase = res->start;
+ sport->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sport->membase))
+ return PTR_ERR(sport->membase);
+
+ sport->dev = &pdev->dev;
+ sport->type = PORT_LINFLEXUART;
+ sport->iotype = UPIO_MEM;
+ sport->irq = platform_get_irq(pdev, 0);
+ sport->ops = &linflex_pops;
+ sport->flags = UPF_BOOT_AUTOCONF;
+
+ linflex_ports[sport->line] = sport;
+
+ platform_set_drvdata(pdev, sport);
+
+ ret = uart_add_one_port(&linflex_reg, sport);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int linflex_remove(struct platform_device *pdev)
+{
+ struct uart_port *sport = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&linflex_reg, sport);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int linflex_suspend(struct device *dev)
+{
+ struct uart_port *sport = dev_get_drvdata(dev);
+
+ uart_suspend_port(&linflex_reg, sport);
+
+ return 0;
+}
+
+static int linflex_resume(struct device *dev)
+{
+ struct uart_port *sport = dev_get_drvdata(dev);
+
+ uart_resume_port(&linflex_reg, sport);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume);
+
+static struct platform_driver linflex_driver = {
+ .probe = linflex_probe,
+ .remove = linflex_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = linflex_dt_ids,
+ .pm = &linflex_pm_ops,
+ },
+};
+
+static int __init linflex_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&linflex_reg);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&linflex_driver);
+ if (ret)
+ uart_unregister_driver(&linflex_reg);
+
+ return ret;
+}
+
+static void __exit linflex_serial_exit(void)
+{
+ platform_driver_unregister(&linflex_driver);
+ uart_unregister_driver(&linflex_reg);
+}
+
+module_init(linflex_serial_init);
+module_exit(linflex_serial_exit);
+
+MODULE_DESCRIPTION("Freescale linflex serial port driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 92dad2b4ec36..3e17bb8a0b16 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -214,6 +214,7 @@
#define UARTFIFO_TXSIZE_OFF 4
#define UARTFIFO_RXFE 0x00000008
#define UARTFIFO_RXSIZE_OFF 0
+#define UARTFIFO_DEPTH(x) (0x1 << ((x) ? ((x) + 1) : 0))
#define UARTWATER_COUNT_MASK 0xff
#define UARTWATER_TXCNT_OFF 8
@@ -451,6 +452,11 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
dma_async_issue_pending(sport->dma_tx_chan);
}
+static bool lpuart_stopped_or_empty(struct uart_port *port)
+{
+ return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port);
+}
+
static void lpuart_dma_tx_complete(void *arg)
{
struct lpuart_port *sport = arg;
@@ -478,7 +484,7 @@ static void lpuart_dma_tx_complete(void *arg)
spin_lock_irqsave(&sport->port.lock, flags);
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
+ if (!lpuart_stopped_or_empty(&sport->port))
lpuart_dma_tx(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -517,9 +523,16 @@ static int lpuart_dma_tx_request(struct uart_port *port)
return 0;
}
+static bool lpuart_is_32(struct lpuart_port *sport)
+{
+ return sport->port.iotype == UPIO_MEM32 ||
+ sport->port.iotype == UPIO_MEM32BE;
+}
+
static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ u32 val;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
@@ -529,6 +542,30 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
dmaengine_terminate_all(sport->dma_tx_chan);
}
+
+ if (lpuart_is_32(sport)) {
+ val = lpuart32_read(&sport->port, UARTFIFO);
+ val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(&sport->port, val, UARTFIFO);
+ } else {
+ val = readb(sport->port.membase + UARTPFIFO);
+ val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
+ writeb(val, sport->port.membase + UARTCFIFO);
+ }
+}
+
+static void lpuart_wait_bit_set(struct uart_port *port, unsigned int offset,
+ u8 bit)
+{
+ while (!(readb(port->membase + offset) & bit))
+ cpu_relax();
+}
+
+static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
+ u32 bit)
+{
+ while (!(lpuart32_read(port, offset) & bit))
+ cpu_relax();
}
#if defined(CONFIG_CONSOLE_POLL)
@@ -574,9 +611,7 @@ static int lpuart_poll_init(struct uart_port *port)
static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
{
/* drain */
- while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
- barrier();
-
+ lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
writeb(c, port->membase + UARTDR);
}
@@ -599,26 +634,26 @@ static int lpuart32_poll_init(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
/* Disable Rx & Tx */
- writel(0, sport->port.membase + UARTCTRL);
+ lpuart32_write(&sport->port, UARTCTRL, 0);
- temp = readl(sport->port.membase + UARTFIFO);
+ temp = lpuart32_read(&sport->port, UARTFIFO);
/* Enable Rx and Tx FIFO */
- writel(temp | UARTFIFO_RXFE | UARTFIFO_TXFE,
- sport->port.membase + UARTFIFO);
+ lpuart32_write(&sport->port, UARTFIFO,
+ temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
/* flush Tx and Rx FIFO */
- writel(UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH,
- sport->port.membase + UARTFIFO);
+ lpuart32_write(&sport->port, UARTFIFO,
+ UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
/* explicitly clear RDRF */
- if (readl(sport->port.membase + UARTSTAT) & UARTSTAT_RDRF) {
- readl(sport->port.membase + UARTDATA);
- writel(UARTFIFO_RXUF, sport->port.membase + UARTFIFO);
+ if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
+ lpuart32_read(&sport->port, UARTDATA);
+ lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
}
/* Enable Rx and Tx */
- writel(UARTCTRL_RE | UARTCTRL_TE, sport->port.membase + UARTCTRL);
+ lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
@@ -626,18 +661,16 @@ static int lpuart32_poll_init(struct uart_port *port)
static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
{
- while (!(readl(port->membase + UARTSTAT) & UARTSTAT_TDRE))
- barrier();
-
- writel(c, port->membase + UARTDATA);
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
+ lpuart32_write(port, UARTDATA, c);
}
static int lpuart32_poll_get_char(struct uart_port *port)
{
- if (!(readl(port->membase + UARTSTAT) & UARTSTAT_RDRF))
+ if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
return NO_POLL_CHAR;
- return readl(port->membase + UARTDATA);
+ return lpuart32_read(port, UARTDATA);
}
#endif
@@ -645,6 +678,18 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
+ if (sport->port.x_char) {
+ writeb(sport->port.x_char, sport->port.membase + UARTDR);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ if (lpuart_stopped_or_empty(&sport->port)) {
+ lpuart_stop_tx(&sport->port);
+ return;
+ }
+
while (!uart_circ_empty(xmit) &&
(readb(sport->port.membase + UARTTCFIFO) < sport->txfifo_size)) {
writeb(xmit->buf[xmit->tail], sport->port.membase + UARTDR);
@@ -664,6 +709,18 @@ static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long txcnt;
+ if (sport->port.x_char) {
+ lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ if (lpuart_stopped_or_empty(&sport->port)) {
+ lpuart32_stop_tx(&sport->port);
+ return;
+ }
+
txcnt = lpuart32_read(&sport->port, UARTWATER);
txcnt = txcnt >> UARTWATER_TXCNT_OFF;
txcnt &= UARTWATER_COUNT_MASK;
@@ -687,14 +744,13 @@ static void lpuart_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- struct circ_buf *xmit = &sport->port.state->xmit;
unsigned char temp;
temp = readb(port->membase + UARTCR2);
writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
if (sport->lpuart_dma_tx_use) {
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
+ if (!lpuart_stopped_or_empty(port))
lpuart_dma_tx(sport);
} else {
if (readb(port->membase + UARTSR1) & UARTSR1_TDRE)
@@ -705,11 +761,10 @@ static void lpuart_start_tx(struct uart_port *port)
static void lpuart32_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long temp;
if (sport->lpuart_dma_tx_use) {
- if (!uart_circ_empty(xmit) && !uart_tx_stopped(port))
+ if (!lpuart_stopped_or_empty(port))
lpuart_dma_tx(sport);
} else {
temp = lpuart32_read(port, UARTCTRL);
@@ -753,52 +808,18 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
return 0;
}
-static bool lpuart_is_32(struct lpuart_port *sport)
-{
- return sport->port.iotype == UPIO_MEM32 ||
- sport->port.iotype == UPIO_MEM32BE;
-}
-
-static irqreturn_t lpuart_txint(int irq, void *dev_id)
+static void lpuart_txint(struct lpuart_port *sport)
{
- struct lpuart_port *sport = dev_id;
- struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
- if (sport->port.x_char) {
- if (lpuart_is_32(sport))
- lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
- else
- writeb(sport->port.x_char, sport->port.membase + UARTDR);
- goto out;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
- if (lpuart_is_32(sport))
- lpuart32_stop_tx(&sport->port);
- else
- lpuart_stop_tx(&sport->port);
- goto out;
- }
-
- if (lpuart_is_32(sport))
- lpuart32_transmit_buffer(sport);
- else
- lpuart_transmit_buffer(sport);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&sport->port);
-
-out:
+ lpuart_transmit_buffer(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
- return IRQ_HANDLED;
}
-static irqreturn_t lpuart_rxint(int irq, void *dev_id)
+static void lpuart_rxint(struct lpuart_port *sport)
{
- struct lpuart_port *sport = dev_id;
- unsigned int flg, ignored = 0;
+ unsigned int flg, ignored = 0, overrun = 0;
struct tty_port *port = &sport->port.state->port;
unsigned long flags;
unsigned char rx, sr;
@@ -825,7 +846,7 @@ static irqreturn_t lpuart_rxint(int irq, void *dev_id)
sport->port.icount.frame++;
if (sr & UARTSR1_OR)
- sport->port.icount.overrun++;
+ overrun++;
if (sr & sport->port.ignore_status_mask) {
if (++ignored > 100)
@@ -852,15 +873,33 @@ static irqreturn_t lpuart_rxint(int irq, void *dev_id)
}
out:
+ if (overrun) {
+ sport->port.icount.overrun += overrun;
+
+ /*
+ * Overruns cause FIFO pointers to become missaligned.
+ * Flushing the receive FIFO reinitializes the pointers.
+ */
+ writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
+ writeb(UARTSFIFO_RXOF, sport->port.membase + UARTSFIFO);
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
tty_flip_buffer_push(port);
- return IRQ_HANDLED;
}
-static irqreturn_t lpuart32_rxint(int irq, void *dev_id)
+static void lpuart32_txint(struct lpuart_port *sport)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ lpuart32_transmit_buffer(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+}
+
+static void lpuart32_rxint(struct lpuart_port *sport)
{
- struct lpuart_port *sport = dev_id;
unsigned int flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
unsigned long flags;
@@ -919,7 +958,6 @@ out:
spin_unlock_irqrestore(&sport->port.lock, flags);
tty_flip_buffer_push(port);
- return IRQ_HANDLED;
}
static irqreturn_t lpuart_int(int irq, void *dev_id)
@@ -929,11 +967,11 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
sts = readb(sport->port.membase + UARTSR1);
- if (sts & UARTSR1_RDRF)
- lpuart_rxint(irq, dev_id);
+ if (sts & UARTSR1_RDRF && !sport->lpuart_dma_rx_use)
+ lpuart_rxint(sport);
- if (sts & UARTSR1_TDRE)
- lpuart_txint(irq, dev_id);
+ if (sts & UARTSR1_TDRE && !sport->lpuart_dma_tx_use)
+ lpuart_txint(sport);
return IRQ_HANDLED;
}
@@ -948,10 +986,10 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
rxcount = rxcount >> UARTWATER_RXCNT_OFF;
if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
- lpuart32_rxint(irq, dev_id);
+ lpuart32_rxint(sport);
if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
- lpuart_txint(irq, dev_id);
+ lpuart32_txint(sport);
lpuart32_write(&sport->port, sts, UARTSTAT);
return IRQ_HANDLED;
@@ -982,6 +1020,13 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
unsigned char sr = readb(sport->port.membase + UARTSR1);
if (sr & (UARTSR1_PE | UARTSR1_FE)) {
+ unsigned char cr2;
+
+ /* Disable receiver during this operation... */
+ cr2 = readb(sport->port.membase + UARTCR2);
+ cr2 &= ~UARTCR2_RE;
+ writeb(cr2, sport->port.membase + UARTCR2);
+
/* Read DR to clear the error flags */
readb(sport->port.membase + UARTDR);
@@ -989,6 +1034,25 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.parity++;
else if (sr & UARTSR1_FE)
sport->port.icount.frame++;
+ /*
+ * At this point parity/framing error is
+ * cleared However, since the DMA already read
+ * the data register and we had to read it
+ * again after reading the status register to
+ * properly clear the flags, the FIFO actually
+ * underflowed... This requires a clearing of
+ * the FIFO...
+ */
+ if (readb(sport->port.membase + UARTSFIFO) &
+ UARTSFIFO_RXUF) {
+ writeb(UARTSFIFO_RXUF,
+ sport->port.membase + UARTSFIFO);
+ writeb(UARTCFIFO_RXFLUSH,
+ sport->port.membase + UARTCFIFO);
+ }
+
+ cr2 |= UARTCR2_RE;
+ writeb(cr2, sport->port.membase + UARTCR2);
}
}
@@ -1097,12 +1161,11 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
if (sport->rx_dma_rng_buf_len < 16)
sport->rx_dma_rng_buf_len = 16;
- ring->buf = kmalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
+ ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
if (!ring->buf)
return -ENOMEM;
sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
- sg_set_buf(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
if (!nent) {
@@ -1340,6 +1403,17 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(cr2_saved, sport->port.membase + UARTCR2);
}
+static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
+{
+ unsigned char cr2;
+
+ lpuart_setup_watermark(sport);
+
+ cr2 = readb(sport->port.membase + UARTCR2);
+ cr2 |= UARTCR2_RIE | UARTCR2_RE | UARTCR2_TE;
+ writeb(cr2, sport->port.membase + UARTCR2);
+}
+
static void lpuart32_setup_watermark(struct lpuart_port *sport)
{
unsigned long val, ctrl;
@@ -1365,109 +1439,90 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
}
-static void rx_dma_timer_init(struct lpuart_port *sport)
-{
- timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
- sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
- add_timer(&sport->lpuart_timer);
-}
-
-static int lpuart_startup(struct uart_port *port)
+static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
{
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned long flags;
- unsigned char temp;
-
- /* determine FIFO size and enable FIFO mode */
- temp = readb(sport->port.membase + UARTPFIFO);
-
- sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
- UARTPFIFO_FIFOSIZE_MASK) + 1);
+ u32 temp;
- sport->port.fifosize = sport->txfifo_size;
+ lpuart32_setup_watermark(sport);
- sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
- UARTPFIFO_FIFOSIZE_MASK) + 1);
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+ temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+}
- spin_lock_irqsave(&sport->port.lock, flags);
+static void rx_dma_timer_init(struct lpuart_port *sport)
+{
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+}
- lpuart_setup_watermark(sport);
+static void lpuart_tx_dma_startup(struct lpuart_port *sport)
+{
+ u32 uartbaud;
- temp = readb(sport->port.membase + UARTCR2);
- temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
- writeb(temp, sport->port.membase + UARTCR2);
+ if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
+ init_waitqueue_head(&sport->dma_wait);
+ sport->lpuart_dma_tx_use = true;
+ if (lpuart_is_32(sport)) {
+ uartbaud = lpuart32_read(&sport->port, UARTBAUD);
+ lpuart32_write(&sport->port,
+ uartbaud | UARTBAUD_TDMAE, UARTBAUD);
+ } else {
+ writeb(readb(sport->port.membase + UARTCR5) |
+ UARTCR5_TDMAS, sport->port.membase + UARTCR5);
+ }
+ } else {
+ sport->lpuart_dma_tx_use = false;
+ }
+}
+static void lpuart_rx_dma_startup(struct lpuart_port *sport)
+{
if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
/* set Rx DMA timeout */
sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
if (!sport->dma_rx_timeout)
- sport->dma_rx_timeout = 1;
+ sport->dma_rx_timeout = 1;
sport->lpuart_dma_rx_use = true;
rx_dma_timer_init(sport);
} else {
sport->lpuart_dma_rx_use = false;
}
-
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
- init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
- temp = readb(port->membase + UARTCR5);
- writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
- } else {
- sport->lpuart_dma_tx_use = false;
- }
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-
- return 0;
}
-static int lpuart32_startup(struct uart_port *port)
+static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned long temp;
-
- /* determine FIFO size */
- temp = lpuart32_read(&sport->port, UARTFIFO);
+ unsigned char temp;
- sport->txfifo_size = 0x1 << (((temp >> UARTFIFO_TXSIZE_OFF) &
- UARTFIFO_FIFOSIZE_MASK) - 1);
+ /* determine FIFO size and enable FIFO mode */
+ temp = readb(sport->port.membase + UARTPFIFO);
+ sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
+ UARTPFIFO_FIFOSIZE_MASK);
sport->port.fifosize = sport->txfifo_size;
- sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
- UARTFIFO_FIFOSIZE_MASK) - 1);
+ sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
+ UARTPFIFO_FIFOSIZE_MASK);
spin_lock_irqsave(&sport->port.lock, flags);
- lpuart32_setup_watermark(sport);
+ lpuart_setup_watermark_enable(sport);
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ lpuart_rx_dma_startup(sport);
+ lpuart_tx_dma_startup(sport);
- if (sport->dma_rx_chan && !lpuart_start_rx_dma(sport)) {
- /* set Rx DMA timeout */
- sport->dma_rx_timeout = msecs_to_jiffies(DMA_RX_TIMEOUT);
- if (!sport->dma_rx_timeout)
- sport->dma_rx_timeout = 1;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- sport->lpuart_dma_rx_use = true;
- rx_dma_timer_init(sport);
- } else {
- sport->lpuart_dma_rx_use = false;
- }
+ return 0;
+}
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(port)) {
- init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
- temp = lpuart32_read(&sport->port, UARTBAUD);
- lpuart32_write(&sport->port, temp | UARTBAUD_TDMAE, UARTBAUD);
- } else {
- sport->lpuart_dma_tx_use = false;
- }
+static void lpuart32_configure(struct lpuart_port *sport)
+{
+ unsigned long temp;
if (sport->lpuart_dma_rx_use) {
/* RXWATER must be 0 */
@@ -1481,27 +1536,40 @@ static int lpuart32_startup(struct uart_port *port)
if (!sport->lpuart_dma_tx_use)
temp |= UARTCTRL_TIE;
lpuart32_write(&sport->port, temp, UARTCTRL);
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
- return 0;
}
-static void lpuart_shutdown(struct uart_port *port)
+static int lpuart32_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned char temp;
unsigned long flags;
+ unsigned long temp;
- spin_lock_irqsave(&port->lock, flags);
+ /* determine FIFO size */
+ temp = lpuart32_read(&sport->port, UARTFIFO);
- /* disable Rx/Tx and interrupts */
- temp = readb(port->membase + UARTCR2);
- temp &= ~(UARTCR2_TE | UARTCR2_RE |
- UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
- writeb(temp, port->membase + UARTCR2);
+ sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
+ UARTFIFO_FIFOSIZE_MASK);
+ sport->port.fifosize = sport->txfifo_size;
+
+ sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
+ UARTFIFO_FIFOSIZE_MASK);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ lpuart32_setup_watermark_enable(sport);
- spin_unlock_irqrestore(&port->lock, flags);
+ lpuart_rx_dma_startup(sport);
+ lpuart_tx_dma_startup(sport);
+
+ lpuart32_configure(sport);
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return 0;
+}
+
+static void lpuart_dma_shutdown(struct lpuart_port *sport)
+{
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
@@ -1513,11 +1581,28 @@ static void lpuart_shutdown(struct uart_port *port)
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
-
- lpuart_stop_tx(port);
}
}
+static void lpuart_shutdown(struct uart_port *port)
+{
+ struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
+ unsigned char temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* disable Rx/Tx and interrupts */
+ temp = readb(port->membase + UARTCR2);
+ temp &= ~(UARTCR2_TE | UARTCR2_RE |
+ UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
+ writeb(temp, port->membase + UARTCR2);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ lpuart_dma_shutdown(sport);
+}
+
static void lpuart32_shutdown(struct uart_port *port)
{
struct lpuart_port *sport =
@@ -1535,20 +1620,7 @@ static void lpuart32_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
- if (sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
- lpuart_dma_rx_free(&sport->port);
- }
-
- if (sport->lpuart_dma_tx_use) {
- if (wait_event_interruptible(sport->dma_wait,
- !sport->dma_tx_in_progress)) {
- sport->dma_tx_in_progress = false;
- dmaengine_terminate_all(sport->dma_tx_chan);
- }
-
- lpuart32_stop_tx(port);
- }
+ lpuart_dma_shutdown(sport);
}
static void
@@ -1602,21 +1674,18 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
if (sport->port.rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
- if (termios->c_cflag & CRTSCTS) {
- modem |= (UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
- } else {
- termios->c_cflag &= ~CRTSCTS;
+ if (termios->c_cflag & CRTSCTS)
+ modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
+ else
modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
- }
- if (termios->c_cflag & CSTOPB)
- termios->c_cflag &= ~CSTOPB;
+ termios->c_cflag &= ~CSTOPB;
/* parity must be enabled when CS7 to match 8-bits format */
if ((termios->c_cflag & CSIZE) == CS7)
termios->c_cflag |= PARENB;
- if ((termios->c_cflag & PARENB)) {
+ if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) {
cr1 &= ~UARTCR1_PE;
if (termios->c_cflag & PARODD)
@@ -1655,7 +1724,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
+ sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
sport->port.read_status_mask |= UARTSR1_FE;
@@ -1677,8 +1746,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
- while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
- barrier();
+ lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
/* disable transmit and receive */
writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
@@ -1769,7 +1837,7 @@ lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
tmp |= UARTBAUD_BOTHEDGE;
tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
- tmp |= (((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT);
+ tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
tmp &= ~UARTBAUD_SBR_MASK;
tmp |= sbr & UARTBAUD_SBR_MASK;
@@ -1822,7 +1890,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
}
if (termios->c_cflag & CRTSCTS) {
- modem |= (UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+ modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
} else {
termios->c_cflag &= ~CRTSCTS;
modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
@@ -1871,7 +1939,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= (UARTSTAT_FE | UARTSTAT_PE);
+ sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
sport->port.read_status_mask |= UARTSTAT_FE;
@@ -1893,8 +1961,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
- while (!(lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_TC))
- barrier();
+ lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
@@ -2009,17 +2076,13 @@ static struct lpuart_port *lpuart_ports[UART_NR];
#ifdef CONFIG_SERIAL_FSL_LPUART_CONSOLE
static void lpuart_console_putchar(struct uart_port *port, int ch)
{
- while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
- barrier();
-
+ lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TDRE);
writeb(ch, port->membase + UARTDR);
}
static void lpuart32_console_putchar(struct uart_port *port, int ch)
{
- while (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE))
- barrier();
-
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
lpuart32_write(port, ch, UARTDATA);
}
@@ -2038,15 +2101,14 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
/* first save CR2 and then disable interrupts */
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
- cr2 |= (UARTCR2_TE | UARTCR2_RE);
+ cr2 |= UARTCR2_TE | UARTCR2_RE;
cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
writeb(cr2, sport->port.membase + UARTCR2);
uart_console_write(&sport->port, s, count, lpuart_console_putchar);
/* wait for transmitter finish complete and restore CR2 */
- while (!(readb(sport->port.membase + UARTSR1) & UARTSR1_TC))
- barrier();
+ lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
writeb(old_cr2, sport->port.membase + UARTCR2);
@@ -2069,15 +2131,14 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
/* first save CR2 and then disable interrupts */
cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
- cr |= (UARTCTRL_TE | UARTCTRL_RE);
+ cr |= UARTCTRL_TE | UARTCTRL_RE;
cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
lpuart32_write(&sport->port, cr, UARTCTRL);
uart_console_write(&sport->port, s, count, lpuart32_console_putchar);
/* wait for transmitter finish complete and restore CR2 */
- while (!(lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_TC))
- barrier();
+ lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
lpuart32_write(&sport->port, old_cr, UARTCTRL);
@@ -2288,6 +2349,7 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
@@ -2320,8 +2382,6 @@ static int lpuart_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
- pdev->dev.coherent_dma_mask = 0;
-
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
@@ -2346,10 +2406,8 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.type = PORT_LPUART;
sport->devtype = sdata->devtype;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot obtain irq\n");
+ if (ret < 0)
return ret;
- }
sport->port.irq = ret;
sport->port.iotype = sdata->iotype;
if (lpuart_is_32(sport))
@@ -2514,22 +2572,14 @@ static int lpuart_resume(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
- unsigned long temp;
if (sport->port.suspended && !irq_wake)
lpuart_enable_clks(sport);
- if (lpuart_is_32(sport)) {
- lpuart32_setup_watermark(sport);
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
- lpuart32_write(&sport->port, temp, UARTCTRL);
- } else {
- lpuart_setup_watermark(sport);
- temp = readb(sport->port.membase + UARTCR2);
- temp |= (UARTCR2_RIE | UARTCR2_TIE | UARTCR2_RE | UARTCR2_TE);
- writeb(temp, sport->port.membase + UARTCR2);
- }
+ if (lpuart_is_32(sport))
+ lpuart32_setup_watermark_enable(sport);
+ else
+ lpuart_setup_watermark_enable(sport);
if (sport->lpuart_dma_rx_use) {
if (irq_wake) {
@@ -2540,36 +2590,10 @@ static int lpuart_resume(struct device *dev)
}
}
- if (sport->dma_tx_chan && !lpuart_dma_tx_request(&sport->port)) {
- init_waitqueue_head(&sport->dma_wait);
- sport->lpuart_dma_tx_use = true;
- if (lpuart_is_32(sport)) {
- temp = lpuart32_read(&sport->port, UARTBAUD);
- lpuart32_write(&sport->port,
- temp | UARTBAUD_TDMAE, UARTBAUD);
- } else {
- writeb(readb(sport->port.membase + UARTCR5) |
- UARTCR5_TDMAS, sport->port.membase + UARTCR5);
- }
- } else {
- sport->lpuart_dma_tx_use = false;
- }
+ lpuart_tx_dma_startup(sport);
- if (lpuart_is_32(sport)) {
- if (sport->lpuart_dma_rx_use) {
- /* RXWATER must be 0 */
- temp = lpuart32_read(&sport->port, UARTWATER);
- temp &= ~(UARTWATER_WATER_MASK <<
- UARTWATER_RXWATER_OFF);
- lpuart32_write(&sport->port, temp, UARTWATER);
- }
- temp = lpuart32_read(&sport->port, UARTCTRL);
- if (!sport->lpuart_dma_rx_use)
- temp |= UARTCTRL_RIE;
- if (!sport->lpuart_dma_tx_use)
- temp |= UARTCTRL_TIE;
- lpuart32_write(&sport->port, temp, UARTCTRL);
- }
+ if (lpuart_is_32(sport))
+ lpuart32_configure(sport);
uart_resume_port(&lpuart_reg, &sport->port);
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index ad374f7c476d..624f3d541c68 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -207,8 +207,6 @@ static int get_port_memory(struct icom_port *icom_port)
return -ENOMEM;
}
- memset(icom_port->statStg, 0, 4096);
-
/* FODs: Frame Out Descriptor Queue, this is a FIFO queue that
indicates that frames are to be transmitted
*/
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 57d6e6ba556e..87c58f9f6390 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -402,12 +402,6 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
-/* called with port.lock taken and irqs caller dependent */
-static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2)
-{
- *ucr2 |= UCR2_CTSC;
-}
-
/* called with port.lock taken and irqs off */
static void imx_uart_start_rx(struct uart_port *port)
{
@@ -445,7 +439,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
return;
ucr1 = imx_uart_readl(sport, UCR1);
- imx_uart_writel(sport, ucr1 & ~UCR1_TXMPTYEN, UCR1);
+ imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
/* in rs485 mode disable transmitter if shifter is empty */
if (port->rs485.flags & SER_RS485_ENABLED &&
@@ -523,7 +517,7 @@ static inline void imx_uart_transmit_buffer(struct imx_port *sport)
* and the TX IRQ is disabled.
**/
ucr1 = imx_uart_readl(sport, UCR1);
- ucr1 &= ~UCR1_TXMPTYEN;
+ ucr1 &= ~UCR1_TRDYEN;
if (sport->dma_is_txing) {
ucr1 |= UCR1_TXDMAEN;
imx_uart_writel(sport, ucr1, UCR1);
@@ -685,7 +679,7 @@ static void imx_uart_start_tx(struct uart_port *port)
if (!sport->dma_is_enabled) {
ucr1 = imx_uart_readl(sport, UCR1);
- imx_uart_writel(sport, ucr1 | UCR1_TXMPTYEN, UCR1);
+ imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1);
}
if (sport->dma_is_enabled) {
@@ -694,7 +688,7 @@ static void imx_uart_start_tx(struct uart_port *port)
* disable TX DMA to let TX interrupt to send X-char */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~UCR1_TXDMAEN;
- ucr1 |= UCR1_TXMPTYEN;
+ ucr1 |= UCR1_TRDYEN;
imx_uart_writel(sport, ucr1, UCR1);
return;
}
@@ -880,7 +874,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
usr1 &= ~USR1_RRDY;
if ((ucr2 & UCR2_ATEN) == 0)
usr1 &= ~USR1_AGTIM;
- if ((ucr1 & UCR1_TXMPTYEN) == 0)
+ if ((ucr1 & UCR1_TRDYEN) == 0)
usr1 &= ~USR1_TRDY;
if ((ucr4 & UCR4_TCEN) == 0)
usr2 &= ~USR2_TXDC;
@@ -969,10 +963,22 @@ static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (!(port->rs485.flags & SER_RS485_ENABLED)) {
u32 ucr2;
+ /*
+ * Turn off autoRTS if RTS is lowered and restore autoRTS
+ * setting if RTS is raised.
+ */
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
- if (mctrl & TIOCM_RTS)
- ucr2 |= UCR2_CTS | UCR2_CTSC;
+ if (mctrl & TIOCM_RTS) {
+ ucr2 |= UCR2_CTS;
+ /*
+ * UCR2_IRTS is unset if and only if the port is
+ * configured for CRTSCTS, so we use inverted UCR2_IRTS
+ * to get the state to restore to.
+ */
+ if (!(ucr2 & UCR2_IRTS))
+ ucr2 |= UCR2_CTSC;
+ }
imx_uart_writel(sport, ucr2, UCR2);
}
@@ -1468,7 +1474,7 @@ static void imx_uart_shutdown(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
ucr1 = imx_uart_readl(sport, UCR1);
- ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
+ ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
imx_uart_writel(sport, ucr1, UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1535,11 +1541,11 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
- u32 ucr2, old_ucr1, old_ucr2, ufcr;
+ u32 ucr2, old_ucr2, ufcr;
unsigned int baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned long div;
- unsigned long num, denom;
+ unsigned long num, denom, old_ubir, old_ubmr;
uint64_t tdiv64;
/*
@@ -1587,8 +1593,14 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
else
imx_uart_rts_inactive(sport, &ucr2);
- } else if (termios->c_cflag & CRTSCTS)
- imx_uart_rts_auto(sport, &ucr2);
+ } else if (termios->c_cflag & CRTSCTS) {
+ /*
+ * Only let receiver control RTS output if we were not requested
+ * to have RTS inactive (which then should take precedence).
+ */
+ if (ucr2 & UCR2_CTS)
+ ucr2 |= UCR2_CTSC;
+ }
if (termios->c_cflag & CRTSCTS)
ucr2 &= ~UCR2_IRTS;
@@ -1631,21 +1643,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
*/
uart_update_timeout(port, termios->c_cflag, baud);
- /*
- * disable interrupts and drain transmitter
- */
- old_ucr1 = imx_uart_readl(sport, UCR1);
- imx_uart_writel(sport,
- old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
- UCR1);
- imx_uart_writel(sport, old_ucr2 & ~UCR2_ATEN, UCR2);
-
- while (!(imx_uart_readl(sport, USR2) & USR2_TXDC))
- barrier();
-
- /* then, disable everything */
- imx_uart_writel(sport, old_ucr2 & ~(UCR2_TXEN | UCR2_RXEN | UCR2_ATEN), UCR2);
-
/* custom-baudrate handling */
div = sport->port.uartclk / (baud * 16);
if (baud == 38400 && quot != div)
@@ -1673,15 +1670,26 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
imx_uart_writel(sport, ufcr, UFCR);
- imx_uart_writel(sport, num, UBIR);
- imx_uart_writel(sport, denom, UBMR);
+ /*
+ * Two registers below should always be written both and in this
+ * particular order. One consequence is that we need to check if any of
+ * them changes and then update both. We do need the check for change
+ * as even writing the same values seem to "restart"
+ * transmission/receiving logic in the hardware, that leads to data
+ * breakage even when rate doesn't in fact change. E.g., user switches
+ * RTS/CTS handshake and suddenly gets broken bytes.
+ */
+ old_ubir = imx_uart_readl(sport, UBIR);
+ old_ubmr = imx_uart_readl(sport, UBMR);
+ if (old_ubir != num || old_ubmr != denom) {
+ imx_uart_writel(sport, num, UBIR);
+ imx_uart_writel(sport, denom, UBMR);
+ }
if (!imx_uart_is_imx1(sport))
imx_uart_writel(sport, sport->port.uartclk / div / 1000,
IMX21_ONEMS);
- imx_uart_writel(sport, old_ucr1, UCR1);
-
imx_uart_writel(sport, ucr2, UCR2);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
@@ -1770,7 +1778,7 @@ static int imx_uart_poll_init(struct uart_port *port)
ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
- ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN | UCR1_RRDYEN);
+ ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN);
ucr2 |= UCR2_RXEN;
ucr2 &= ~UCR2_ATEN;
@@ -1930,7 +1938,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
if (imx_uart_is_imx1(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
- ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
+ ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
@@ -2286,7 +2294,7 @@ static int imx_uart_probe(struct platform_device *pdev)
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
- UCR1_TXMPTYEN | UCR1_RTSDEN);
+ UCR1_TRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
if (!imx_uart_is_imx1(sport) && sport->dte_mode) {
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
deleted file mode 100644
index d8a1cdd6a53d..000000000000
--- a/drivers/tty/serial/ioc3_serial.c
+++ /dev/null
@@ -1,2195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-/*
- * This file contains a module version of the ioc3 serial driver. This
- * includes all the support functions needed (support functions, etc.)
- * and the serial driver itself.
- */
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/circ_buf.h>
-#include <linux/serial_reg.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/serial_core.h>
-#include <linux/ioc3.h>
-#include <linux/slab.h>
-
-/*
- * Interesting things about the ioc3
- */
-
-#define LOGICAL_PORTS 2 /* rs232(0) and rs422(1) */
-#define PORTS_PER_CARD 2
-#define LOGICAL_PORTS_PER_CARD (PORTS_PER_CARD * LOGICAL_PORTS)
-#define MAX_CARDS 8
-#define MAX_LOGICAL_PORTS (LOGICAL_PORTS_PER_CARD * MAX_CARDS)
-
-/* determine given the sio_ir what port it applies to */
-#define GET_PORT_FROM_SIO_IR(_x) (_x & SIO_IR_SA) ? 0 : 1
-
-
-/*
- * we have 2 logical ports (rs232, rs422) for each physical port
- * evens are rs232, odds are rs422
- */
-#define GET_PHYSICAL_PORT(_x) ((_x) >> 1)
-#define GET_LOGICAL_PORT(_x) ((_x) & 1)
-#define IS_PHYSICAL_PORT(_x) !((_x) & 1)
-#define IS_RS232(_x) !((_x) & 1)
-
-static unsigned int Num_of_ioc3_cards;
-static unsigned int Submodule_slot;
-
-/* defining this will get you LOTS of great debug info */
-//#define DEBUG_INTERRUPTS
-#define DPRINT_CONFIG(_x...) ;
-//#define DPRINT_CONFIG(_x...) printk _x
-#define NOT_PROGRESS() ;
-//#define NOT_PROGRESS() printk("%s : fails %d\n", __func__, __LINE__)
-
-/* number of characters we want to transmit to the lower level at a time */
-#define MAX_CHARS 256
-#define FIFO_SIZE (MAX_CHARS-1) /* it's a uchar */
-
-/* Device name we're using */
-#define DEVICE_NAME "ttySIOC"
-#define DEVICE_MAJOR 204
-#define DEVICE_MINOR 116
-
-/* flags for next_char_state */
-#define NCS_BREAK 0x1
-#define NCS_PARITY 0x2
-#define NCS_FRAMING 0x4
-#define NCS_OVERRUN 0x8
-
-/* cause we need SOME parameters ... */
-#define MIN_BAUD_SUPPORTED 1200
-#define MAX_BAUD_SUPPORTED 115200
-
-/* protocol types supported */
-#define PROTO_RS232 0
-#define PROTO_RS422 1
-
-/* Notification types */
-#define N_DATA_READY 0x01
-#define N_OUTPUT_LOWAT 0x02
-#define N_BREAK 0x04
-#define N_PARITY_ERROR 0x08
-#define N_FRAMING_ERROR 0x10
-#define N_OVERRUN_ERROR 0x20
-#define N_DDCD 0x40
-#define N_DCTS 0x80
-
-#define N_ALL_INPUT (N_DATA_READY | N_BREAK \
- | N_PARITY_ERROR | N_FRAMING_ERROR \
- | N_OVERRUN_ERROR | N_DDCD | N_DCTS)
-
-#define N_ALL_OUTPUT N_OUTPUT_LOWAT
-
-#define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR \
- | N_OVERRUN_ERROR)
-
-#define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK \
- | N_PARITY_ERROR | N_FRAMING_ERROR \
- | N_OVERRUN_ERROR | N_DDCD | N_DCTS)
-
-#define SER_CLK_SPEED(prediv) ((22000000 << 1) / prediv)
-#define SER_DIVISOR(x, clk) (((clk) + (x) * 8) / ((x) * 16))
-#define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div))
-
-/* Some masks */
-#define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \
- | UART_LCR_WLEN7 | UART_LCR_WLEN8)
-#define LCR_MASK_STOP_BITS (UART_LCR_STOP)
-
-#define PENDING(_a, _p) (readl(&(_p)->vma->sio_ir) & (_a)->ic_enable)
-
-#define RING_BUF_SIZE 4096
-#define BUF_SIZE_BIT SBBR_L_SIZE
-#define PROD_CONS_MASK PROD_CONS_PTR_4K
-
-#define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
-
-/* driver specific - one per card */
-struct ioc3_card {
- struct {
- /* uart ports are allocated here */
- struct uart_port icp_uart_port[LOGICAL_PORTS];
- /* the ioc3_port used for this port */
- struct ioc3_port *icp_port;
- } ic_port[PORTS_PER_CARD];
- /* currently enabled interrupts */
- uint32_t ic_enable;
-};
-
-/* Local port info for each IOC3 serial port */
-struct ioc3_port {
- /* handy reference material */
- struct uart_port *ip_port;
- struct ioc3_card *ip_card;
- struct ioc3_driver_data *ip_idd;
- struct ioc3_submodule *ip_is;
-
- /* pci mem addresses for this port */
- struct ioc3_serialregs __iomem *ip_serial_regs;
- struct ioc3_uartregs __iomem *ip_uart_regs;
-
- /* Ring buffer page for this port */
- dma_addr_t ip_dma_ringbuf;
- /* vaddr of ring buffer */
- struct ring_buffer *ip_cpu_ringbuf;
-
- /* Rings for this port */
- struct ring *ip_inring;
- struct ring *ip_outring;
-
- /* Hook to port specific values */
- struct port_hooks *ip_hooks;
-
- spinlock_t ip_lock;
-
- /* Various rx/tx parameters */
- int ip_baud;
- int ip_tx_lowat;
- int ip_rx_timeout;
-
- /* Copy of notification bits */
- int ip_notify;
-
- /* Shadow copies of various registers so we don't need to PIO
- * read them constantly
- */
- uint32_t ip_sscr;
- uint32_t ip_tx_prod;
- uint32_t ip_rx_cons;
- unsigned char ip_flags;
-};
-
-/* tx low water mark. We need to notify the driver whenever tx is getting
- * close to empty so it can refill the tx buffer and keep things going.
- * Let's assume that if we interrupt 1 ms before the tx goes idle, we'll
- * have no trouble getting in more chars in time (I certainly hope so).
- */
-#define TX_LOWAT_LATENCY 1000
-#define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
-#define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
-
-/* Flags per port */
-#define INPUT_HIGH 0x01
- /* used to signify that we have turned off the rx_high
- * temporarily - we need to drain the fifo and don't
- * want to get blasted with interrupts.
- */
-#define DCD_ON 0x02
- /* DCD state is on */
-#define LOWAT_WRITTEN 0x04
-#define READ_ABORTED 0x08
- /* the read was aborted - used to avaoid infinate looping
- * in the interrupt handler
- */
-#define INPUT_ENABLE 0x10
-
-/* Since each port has different register offsets and bitmasks
- * for everything, we'll store those that we need in tables so we
- * don't have to be constantly checking the port we are dealing with.
- */
-struct port_hooks {
- uint32_t intr_delta_dcd;
- uint32_t intr_delta_cts;
- uint32_t intr_tx_mt;
- uint32_t intr_rx_timer;
- uint32_t intr_rx_high;
- uint32_t intr_tx_explicit;
- uint32_t intr_clear;
- uint32_t intr_all;
- char rs422_select_pin;
-};
-
-static struct port_hooks hooks_array[PORTS_PER_CARD] = {
- /* values for port A */
- {
- .intr_delta_dcd = SIO_IR_SA_DELTA_DCD,
- .intr_delta_cts = SIO_IR_SA_DELTA_CTS,
- .intr_tx_mt = SIO_IR_SA_TX_MT,
- .intr_rx_timer = SIO_IR_SA_RX_TIMER,
- .intr_rx_high = SIO_IR_SA_RX_HIGH,
- .intr_tx_explicit = SIO_IR_SA_TX_EXPLICIT,
- .intr_clear = (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL
- | SIO_IR_SA_RX_HIGH
- | SIO_IR_SA_RX_TIMER
- | SIO_IR_SA_DELTA_DCD
- | SIO_IR_SA_DELTA_CTS
- | SIO_IR_SA_INT
- | SIO_IR_SA_TX_EXPLICIT
- | SIO_IR_SA_MEMERR),
- .intr_all = SIO_IR_SA,
- .rs422_select_pin = GPPR_UARTA_MODESEL_PIN,
- },
-
- /* values for port B */
- {
- .intr_delta_dcd = SIO_IR_SB_DELTA_DCD,
- .intr_delta_cts = SIO_IR_SB_DELTA_CTS,
- .intr_tx_mt = SIO_IR_SB_TX_MT,
- .intr_rx_timer = SIO_IR_SB_RX_TIMER,
- .intr_rx_high = SIO_IR_SB_RX_HIGH,
- .intr_tx_explicit = SIO_IR_SB_TX_EXPLICIT,
- .intr_clear = (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL
- | SIO_IR_SB_RX_HIGH
- | SIO_IR_SB_RX_TIMER
- | SIO_IR_SB_DELTA_DCD
- | SIO_IR_SB_DELTA_CTS
- | SIO_IR_SB_INT
- | SIO_IR_SB_TX_EXPLICIT
- | SIO_IR_SB_MEMERR),
- .intr_all = SIO_IR_SB,
- .rs422_select_pin = GPPR_UARTB_MODESEL_PIN,
- }
-};
-
-struct ring_entry {
- union {
- struct {
- uint32_t alldata;
- uint32_t allsc;
- } all;
- struct {
- char data[4]; /* data bytes */
- char sc[4]; /* status/control */
- } s;
- } u;
-};
-
-/* Test the valid bits in any of the 4 sc chars using "allsc" member */
-#define RING_ANY_VALID \
- ((uint32_t)(RXSB_MODEM_VALID | RXSB_DATA_VALID) * 0x01010101)
-
-#define ring_sc u.s.sc
-#define ring_data u.s.data
-#define ring_allsc u.all.allsc
-
-/* Number of entries per ring buffer. */
-#define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
-
-/* An individual ring */
-struct ring {
- struct ring_entry entries[ENTRIES_PER_RING];
-};
-
-/* The whole enchilada */
-struct ring_buffer {
- struct ring TX_A;
- struct ring RX_A;
- struct ring TX_B;
- struct ring RX_B;
-};
-
-/* Get a ring from a port struct */
-#define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh)
-
-/* for Infinite loop detection */
-#define MAXITER 10000000
-
-
-/**
- * set_baud - Baud rate setting code
- * @port: port to set
- * @baud: baud rate to use
- */
-static int set_baud(struct ioc3_port *port, int baud)
-{
- int divisor;
- int actual_baud;
- int diff;
- int lcr, prediv;
- struct ioc3_uartregs __iomem *uart;
-
- for (prediv = 6; prediv < 64; prediv++) {
- divisor = SER_DIVISOR(baud, SER_CLK_SPEED(prediv));
- if (!divisor)
- continue; /* invalid divisor */
- actual_baud = DIVISOR_TO_BAUD(divisor, SER_CLK_SPEED(prediv));
-
- diff = actual_baud - baud;
- if (diff < 0)
- diff = -diff;
-
- /* if we're within 1% we've found a match */
- if (diff * 100 <= actual_baud)
- break;
- }
-
- /* if the above loop completed, we didn't match
- * the baud rate. give up.
- */
- if (prediv == 64) {
- NOT_PROGRESS();
- return 1;
- }
-
- uart = port->ip_uart_regs;
- lcr = readb(&uart->iu_lcr);
-
- writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
- writeb((unsigned char)divisor, &uart->iu_dll);
- writeb((unsigned char)(divisor >> 8), &uart->iu_dlm);
- writeb((unsigned char)prediv, &uart->iu_scr);
- writeb((unsigned char)lcr, &uart->iu_lcr);
-
- return 0;
-}
-
-/**
- * get_ioc3_port - given a uart port, return the control structure
- * @the_port: uart port to find
- */
-static struct ioc3_port *get_ioc3_port(struct uart_port *the_port)
-{
- struct ioc3_driver_data *idd = dev_get_drvdata(the_port->dev);
- struct ioc3_card *card_ptr = idd->data[Submodule_slot];
- int ii, jj;
-
- if (!card_ptr) {
- NOT_PROGRESS();
- return NULL;
- }
- for (ii = 0; ii < PORTS_PER_CARD; ii++) {
- for (jj = 0; jj < LOGICAL_PORTS; jj++) {
- if (the_port == &card_ptr->ic_port[ii].icp_uart_port[jj])
- return card_ptr->ic_port[ii].icp_port;
- }
- }
- NOT_PROGRESS();
- return NULL;
-}
-
-/**
- * port_init - Initialize the sio and ioc3 hardware for a given port
- * called per port from attach...
- * @port: port to initialize
- */
-static inline int port_init(struct ioc3_port *port)
-{
- uint32_t sio_cr;
- struct port_hooks *hooks = port->ip_hooks;
- struct ioc3_uartregs __iomem *uart;
- int reset_loop_counter = 0xfffff;
- struct ioc3_driver_data *idd = port->ip_idd;
-
- /* Idle the IOC3 serial interface */
- writel(SSCR_RESET, &port->ip_serial_regs->sscr);
-
- /* Wait until any pending bus activity for this port has ceased */
- do {
- sio_cr = readl(&idd->vma->sio_cr);
- if (reset_loop_counter-- <= 0) {
- printk(KERN_WARNING
- "IOC3 unable to come out of reset"
- " scr 0x%x\n", sio_cr);
- return -1;
- }
- } while (!(sio_cr & SIO_CR_ARB_DIAG_IDLE) &&
- (((sio_cr &= SIO_CR_ARB_DIAG) == SIO_CR_ARB_DIAG_TXA)
- || sio_cr == SIO_CR_ARB_DIAG_TXB
- || sio_cr == SIO_CR_ARB_DIAG_RXA
- || sio_cr == SIO_CR_ARB_DIAG_RXB));
-
- /* Finish reset sequence */
- writel(0, &port->ip_serial_regs->sscr);
-
- /* Once RESET is done, reload cached tx_prod and rx_cons values
- * and set rings to empty by making prod == cons
- */
- port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
- writel(port->ip_tx_prod, &port->ip_serial_regs->stpir);
- port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
- writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir);
-
- /* Disable interrupts for this 16550 */
- uart = port->ip_uart_regs;
- writeb(0, &uart->iu_lcr);
- writeb(0, &uart->iu_ier);
-
- /* Set the default baud */
- set_baud(port, port->ip_baud);
-
- /* Set line control to 8 bits no parity */
- writeb(UART_LCR_WLEN8 | 0, &uart->iu_lcr);
- /* UART_LCR_STOP == 1 stop */
-
- /* Enable the FIFOs */
- writeb(UART_FCR_ENABLE_FIFO, &uart->iu_fcr);
- /* then reset 16550 FIFOs */
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
- &uart->iu_fcr);
-
- /* Clear modem control register */
- writeb(0, &uart->iu_mcr);
-
- /* Clear deltas in modem status register */
- writel(0, &port->ip_serial_regs->shadow);
-
- /* Only do this once per port pair */
- if (port->ip_hooks == &hooks_array[0]) {
- unsigned long ring_pci_addr;
- uint32_t __iomem *sbbr_l, *sbbr_h;
-
- sbbr_l = &idd->vma->sbbr_l;
- sbbr_h = &idd->vma->sbbr_h;
- ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
- DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n",
- __func__, (void *)ring_pci_addr));
-
- writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h);
- writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l);
- }
-
- /* Set the receive timeout value to 10 msec */
- writel(SRTR_HZ / 100, &port->ip_serial_regs->srtr);
-
- /* Set rx threshold, enable DMA */
- /* Set high water mark at 3/4 of full ring */
- port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
-
- /* uart experiences pauses at high baud rate reducing actual
- * throughput by 10% or so unless we enable high speed polling
- * XXX when this hardware bug is resolved we should revert to
- * normal polling speed
- */
- port->ip_sscr |= SSCR_HIGH_SPD;
-
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Disable and clear all serial related interrupt bits */
- port->ip_card->ic_enable &= ~hooks->intr_clear;
- ioc3_disable(port->ip_is, idd, hooks->intr_clear);
- ioc3_ack(port->ip_is, idd, hooks->intr_clear);
- return 0;
-}
-
-/**
- * enable_intrs - enable interrupts
- * @port: port to enable
- * @mask: mask to use
- */
-static void enable_intrs(struct ioc3_port *port, uint32_t mask)
-{
- if ((port->ip_card->ic_enable & mask) != mask) {
- port->ip_card->ic_enable |= mask;
- ioc3_enable(port->ip_is, port->ip_idd, mask);
- }
-}
-
-/**
- * local_open - local open a port
- * @port: port to open
- */
-static inline int local_open(struct ioc3_port *port)
-{
- int spiniter = 0;
-
- port->ip_flags = INPUT_ENABLE;
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & SSCR_DMA_EN) {
- writel(port->ip_sscr | SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while ((readl(&port->ip_serial_regs->sscr)
- & SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER) {
- NOT_PROGRESS();
- return -1;
- }
- }
- }
-
- /* Reset the input fifo. If the uart received chars while the port
- * was closed and DMA is not enabled, the uart may have a bunch of
- * chars hanging around in its rx fifo which will not be discarded
- * by rclr in the upper layer. We must get rid of them here.
- */
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
- &port->ip_uart_regs->iu_fcr);
-
- writeb(UART_LCR_WLEN8, &port->ip_uart_regs->iu_lcr);
- /* UART_LCR_STOP == 1 stop */
-
- /* Re-enable DMA, set default threshold to intr whenever there is
- * data available.
- */
- port->ip_sscr &= ~SSCR_RX_THRESHOLD;
- port->ip_sscr |= 1; /* default threshold */
-
- /* Plug in the new sscr. This implicitly clears the DMA_PAUSE
- * flag if it was set above
- */
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- port->ip_tx_lowat = 1;
- return 0;
-}
-
-/**
- * set_rx_timeout - Set rx timeout and threshold values.
- * @port: port to use
- * @timeout: timeout value in ticks
- */
-static inline int set_rx_timeout(struct ioc3_port *port, int timeout)
-{
- int threshold;
-
- port->ip_rx_timeout = timeout;
-
- /* Timeout is in ticks. Let's figure out how many chars we
- * can receive at the current baud rate in that interval
- * and set the rx threshold to that amount. There are 4 chars
- * per ring entry, so we'll divide the number of chars that will
- * arrive in timeout by 4.
- * So .... timeout * baud / 10 / HZ / 4, with HZ = 100.
- */
- threshold = timeout * port->ip_baud / 4000;
- if (threshold == 0)
- threshold = 1; /* otherwise we'll intr all the time! */
-
- if ((unsigned)threshold > (unsigned)SSCR_RX_THRESHOLD)
- return 1;
-
- port->ip_sscr &= ~SSCR_RX_THRESHOLD;
- port->ip_sscr |= threshold;
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Now set the rx timeout to the given value
- * again timeout * SRTR_HZ / HZ
- */
- timeout = timeout * SRTR_HZ / 100;
- if (timeout > SRTR_CNT)
- timeout = SRTR_CNT;
- writel(timeout, &port->ip_serial_regs->srtr);
- return 0;
-}
-
-/**
- * config_port - config the hardware
- * @port: port to config
- * @baud: baud rate for the port
- * @byte_size: data size
- * @stop_bits: number of stop bits
- * @parenb: parity enable ?
- * @parodd: odd parity ?
- */
-static inline int
-config_port(struct ioc3_port *port,
- int baud, int byte_size, int stop_bits, int parenb, int parodd)
-{
- char lcr, sizebits;
- int spiniter = 0;
-
- DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d "
- "parodd %d\n",
- __func__, ((struct uart_port *)port->ip_port)->line,
- baud, byte_size, stop_bits, parenb, parodd));
-
- if (set_baud(port, baud))
- return 1;
-
- switch (byte_size) {
- case 5:
- sizebits = UART_LCR_WLEN5;
- break;
- case 6:
- sizebits = UART_LCR_WLEN6;
- break;
- case 7:
- sizebits = UART_LCR_WLEN7;
- break;
- case 8:
- sizebits = UART_LCR_WLEN8;
- break;
- default:
- return 1;
- }
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & SSCR_DMA_EN) {
- writel(port->ip_sscr | SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while ((readl(&port->ip_serial_regs->sscr)
- & SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER)
- return -1;
- }
- }
-
- /* Clear relevant fields in lcr */
- lcr = readb(&port->ip_uart_regs->iu_lcr);
- lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR |
- UART_LCR_PARITY | LCR_MASK_STOP_BITS);
-
- /* Set byte size in lcr */
- lcr |= sizebits;
-
- /* Set parity */
- if (parenb) {
- lcr |= UART_LCR_PARITY;
- if (!parodd)
- lcr |= UART_LCR_EPAR;
- }
-
- /* Set stop bits */
- if (stop_bits)
- lcr |= UART_LCR_STOP /* 2 stop bits */ ;
-
- writeb(lcr, &port->ip_uart_regs->iu_lcr);
-
- /* Re-enable the DMA interface if necessary */
- if (port->ip_sscr & SSCR_DMA_EN) {
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- port->ip_baud = baud;
-
- /* When we get within this number of ring entries of filling the
- * entire ring on tx, place an EXPLICIT intr to generate a lowat
- * notification when output has drained.
- */
- port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
- if (port->ip_tx_lowat == 0)
- port->ip_tx_lowat = 1;
-
- set_rx_timeout(port, 2);
- return 0;
-}
-
-/**
- * do_write - Write bytes to the port. Returns the number of bytes
- * actually written. Called from transmit_chars
- * @port: port to use
- * @buf: the stuff to write
- * @len: how many bytes in 'buf'
- */
-static inline int do_write(struct ioc3_port *port, char *buf, int len)
-{
- int prod_ptr, cons_ptr, total = 0;
- struct ring *outring;
- struct ring_entry *entry;
- struct port_hooks *hooks = port->ip_hooks;
-
- BUG_ON(!(len >= 0));
-
- prod_ptr = port->ip_tx_prod;
- cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
- outring = port->ip_outring;
-
- /* Maintain a 1-entry red-zone. The ring buffer is full when
- * (cons - prod) % ring_size is 1. Rather than do this subtraction
- * in the body of the loop, I'll do it now.
- */
- cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK;
-
- /* Stuff the bytes into the output */
- while ((prod_ptr != cons_ptr) && (len > 0)) {
- int xx;
-
- /* Get 4 bytes (one ring entry) at a time */
- entry = (struct ring_entry *)((caddr_t) outring + prod_ptr);
-
- /* Invalidate all entries */
- entry->ring_allsc = 0;
-
- /* Copy in some bytes */
- for (xx = 0; (xx < 4) && (len > 0); xx++) {
- entry->ring_data[xx] = *buf++;
- entry->ring_sc[xx] = TXCB_VALID;
- len--;
- total++;
- }
-
- /* If we are within some small threshold of filling up the
- * entire ring buffer, we must place an EXPLICIT intr here
- * to generate a lowat interrupt in case we subsequently
- * really do fill up the ring and the caller goes to sleep.
- * No need to place more than one though.
- */
- if (!(port->ip_flags & LOWAT_WRITTEN) &&
- ((cons_ptr - prod_ptr) & PROD_CONS_MASK)
- <= port->ip_tx_lowat * (int)sizeof(struct ring_entry)) {
- port->ip_flags |= LOWAT_WRITTEN;
- entry->ring_sc[0] |= TXCB_INT_WHEN_DONE;
- }
-
- /* Go on to next entry */
- prod_ptr += sizeof(struct ring_entry);
- prod_ptr &= PROD_CONS_MASK;
- }
-
- /* If we sent something, start DMA if necessary */
- if (total > 0 && !(port->ip_sscr & SSCR_DMA_EN)) {
- port->ip_sscr |= SSCR_DMA_EN;
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
-
- /* Store the new producer pointer. If tx is disabled, we stuff the
- * data into the ring buffer, but we don't actually start tx.
- */
- if (!uart_tx_stopped(port->ip_port)) {
- writel(prod_ptr, &port->ip_serial_regs->stpir);
-
- /* If we are now transmitting, enable tx_mt interrupt so we
- * can disable DMA if necessary when the tx finishes.
- */
- if (total > 0)
- enable_intrs(port, hooks->intr_tx_mt);
- }
- port->ip_tx_prod = prod_ptr;
-
- return total;
-}
-
-/**
- * disable_intrs - disable interrupts
- * @port: port to enable
- * @mask: mask to use
- */
-static inline void disable_intrs(struct ioc3_port *port, uint32_t mask)
-{
- if (port->ip_card->ic_enable & mask) {
- ioc3_disable(port->ip_is, port->ip_idd, mask);
- port->ip_card->ic_enable &= ~mask;
- }
-}
-
-/**
- * set_notification - Modify event notification
- * @port: port to use
- * @mask: events mask
- * @set_on: set ?
- */
-static int set_notification(struct ioc3_port *port, int mask, int set_on)
-{
- struct port_hooks *hooks = port->ip_hooks;
- uint32_t intrbits, sscrbits;
-
- BUG_ON(!mask);
-
- intrbits = sscrbits = 0;
-
- if (mask & N_DATA_READY)
- intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high);
- if (mask & N_OUTPUT_LOWAT)
- intrbits |= hooks->intr_tx_explicit;
- if (mask & N_DDCD) {
- intrbits |= hooks->intr_delta_dcd;
- sscrbits |= SSCR_RX_RING_DCD;
- }
- if (mask & N_DCTS)
- intrbits |= hooks->intr_delta_cts;
-
- if (set_on) {
- enable_intrs(port, intrbits);
- port->ip_notify |= mask;
- port->ip_sscr |= sscrbits;
- } else {
- disable_intrs(port, intrbits);
- port->ip_notify &= ~mask;
- port->ip_sscr &= ~sscrbits;
- }
-
- /* We require DMA if either DATA_READY or DDCD notification is
- * currently requested. If neither of these is requested and
- * there is currently no tx in progress, DMA may be disabled.
- */
- if (port->ip_notify & (N_DATA_READY | N_DDCD))
- port->ip_sscr |= SSCR_DMA_EN;
- else if (!(port->ip_card->ic_enable & hooks->intr_tx_mt))
- port->ip_sscr &= ~SSCR_DMA_EN;
-
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- return 0;
-}
-
-/**
- * set_mcr - set the master control reg
- * @the_port: port to use
- * @mask1: mcr mask
- * @mask2: shadow mask
- */
-static inline int set_mcr(struct uart_port *the_port,
- int mask1, int mask2)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
- uint32_t shadow;
- int spiniter = 0;
- char mcr;
-
- if (!port)
- return -1;
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & SSCR_DMA_EN) {
- writel(port->ip_sscr | SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while ((readl(&port->ip_serial_regs->sscr)
- & SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER)
- return -1;
- }
- }
- shadow = readl(&port->ip_serial_regs->shadow);
- mcr = (shadow & 0xff000000) >> 24;
-
- /* Set new value */
- mcr |= mask1;
- shadow |= mask2;
- writeb(mcr, &port->ip_uart_regs->iu_mcr);
- writel(shadow, &port->ip_serial_regs->shadow);
-
- /* Re-enable the DMA interface if necessary */
- if (port->ip_sscr & SSCR_DMA_EN) {
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- return 0;
-}
-
-/**
- * ioc3_set_proto - set the protocol for the port
- * @port: port to use
- * @proto: protocol to use
- */
-static int ioc3_set_proto(struct ioc3_port *port, int proto)
-{
- struct port_hooks *hooks = port->ip_hooks;
-
- switch (proto) {
- default:
- case PROTO_RS232:
- /* Clear the appropriate GIO pin */
- DPRINT_CONFIG(("%s: rs232\n", __func__));
- writel(0, (&port->ip_idd->vma->gppr[0]
- + hooks->rs422_select_pin));
- break;
-
- case PROTO_RS422:
- /* Set the appropriate GIO pin */
- DPRINT_CONFIG(("%s: rs422\n", __func__));
- writel(1, (&port->ip_idd->vma->gppr[0]
- + hooks->rs422_select_pin));
- break;
- }
- return 0;
-}
-
-/**
- * transmit_chars - upper level write, called with the_port->lock
- * @the_port: port to write
- */
-static void transmit_chars(struct uart_port *the_port)
-{
- int xmit_count, tail, head;
- int result;
- char *start;
- struct tty_struct *tty;
- struct ioc3_port *port = get_ioc3_port(the_port);
- struct uart_state *state;
-
- if (!the_port)
- return;
- if (!port)
- return;
-
- state = the_port->state;
- tty = state->port.tty;
-
- if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) {
- /* Nothing to do or hw stopped */
- set_notification(port, N_ALL_OUTPUT, 0);
- return;
- }
-
- head = state->xmit.head;
- tail = state->xmit.tail;
- start = (char *)&state->xmit.buf[tail];
-
- /* write out all the data or until the end of the buffer */
- xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail);
- if (xmit_count > 0) {
- result = do_write(port, start, xmit_count);
- if (result > 0) {
- /* booking */
- xmit_count -= result;
- the_port->icount.tx += result;
- /* advance the pointers */
- tail += result;
- tail &= UART_XMIT_SIZE - 1;
- state->xmit.tail = tail;
- start = (char *)&state->xmit.buf[tail];
- }
- }
- if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(the_port);
-
- if (uart_circ_empty(&state->xmit)) {
- set_notification(port, N_OUTPUT_LOWAT, 0);
- } else {
- set_notification(port, N_OUTPUT_LOWAT, 1);
- }
-}
-
-/**
- * ioc3_change_speed - change the speed of the port
- * @the_port: port to change
- * @new_termios: new termios settings
- * @old_termios: old termios settings
- */
-static void
-ioc3_change_speed(struct uart_port *the_port,
- struct ktermios *new_termios, struct ktermios *old_termios)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
- unsigned int cflag, iflag;
- int baud;
- int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
- struct uart_state *state = the_port->state;
-
- cflag = new_termios->c_cflag;
- iflag = new_termios->c_iflag;
-
- switch (cflag & CSIZE) {
- case CS5:
- new_data = 5;
- break;
- case CS6:
- new_data = 6;
- break;
- case CS7:
- new_data = 7;
- break;
- case CS8:
- new_data = 8;
- break;
- default:
- /* cuz we always need a default ... */
- new_data = 5;
- break;
- }
- if (cflag & CSTOPB) {
- new_stop = 1;
- }
- if (cflag & PARENB) {
- new_parity_enable = 1;
- if (cflag & PARODD)
- new_parity = 1;
- }
- baud = uart_get_baud_rate(the_port, new_termios, old_termios,
- MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
- DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud,
- the_port->line));
-
- if (!the_port->fifosize)
- the_port->fifosize = FIFO_SIZE;
- uart_update_timeout(the_port, cflag, baud);
-
- the_port->ignore_status_mask = N_ALL_INPUT;
-
- state->port.low_latency = 1;
-
- if (iflag & IGNPAR)
- the_port->ignore_status_mask &= ~(N_PARITY_ERROR
- | N_FRAMING_ERROR);
- if (iflag & IGNBRK) {
- the_port->ignore_status_mask &= ~N_BREAK;
- if (iflag & IGNPAR)
- the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
- }
- if (!(cflag & CREAD)) {
- /* ignore everything */
- the_port->ignore_status_mask &= ~N_DATA_READY;
- }
-
- if (cflag & CRTSCTS) {
- /* enable hardware flow control */
- port->ip_sscr |= SSCR_HFC_EN;
- }
- else {
- /* disable hardware flow control */
- port->ip_sscr &= ~SSCR_HFC_EN;
- }
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Set the configuration and proper notification call */
- DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o "
- "config_port(baud %d data %d stop %d penable %d "
- " parity %d), notification 0x%x\n",
- __func__, (void *)port, the_port->line, cflag, baud,
- new_data, new_stop, new_parity_enable, new_parity,
- the_port->ignore_status_mask));
-
- if ((config_port(port, baud, /* baud */
- new_data, /* byte size */
- new_stop, /* stop bits */
- new_parity_enable, /* set parity */
- new_parity)) >= 0) { /* parity 1==odd */
- set_notification(port, the_port->ignore_status_mask, 1);
- }
-}
-
-/**
- * ic3_startup_local - Start up the serial port - returns >= 0 if no errors
- * @the_port: Port to operate on
- */
-static inline int ic3_startup_local(struct uart_port *the_port)
-{
- struct ioc3_port *port;
-
- if (!the_port) {
- NOT_PROGRESS();
- return -1;
- }
-
- port = get_ioc3_port(the_port);
- if (!port) {
- NOT_PROGRESS();
- return -1;
- }
-
- local_open(port);
-
- /* set the protocol */
- ioc3_set_proto(port, IS_RS232(the_port->line) ? PROTO_RS232 :
- PROTO_RS422);
- return 0;
-}
-
-/*
- * ioc3_cb_output_lowat - called when the output low water mark is hit
- * @port: port to output
- */
-static void ioc3_cb_output_lowat(struct ioc3_port *port)
-{
- unsigned long pflags;
-
- /* the_port->lock is set on the call here */
- if (port->ip_port) {
- spin_lock_irqsave(&port->ip_port->lock, pflags);
- transmit_chars(port->ip_port);
- spin_unlock_irqrestore(&port->ip_port->lock, pflags);
- }
-}
-
-/*
- * ioc3_cb_post_ncs - called for some basic errors
- * @port: port to use
- * @ncs: event
- */
-static void ioc3_cb_post_ncs(struct uart_port *the_port, int ncs)
-{
- struct uart_icount *icount;
-
- icount = &the_port->icount;
-
- if (ncs & NCS_BREAK)
- icount->brk++;
- if (ncs & NCS_FRAMING)
- icount->frame++;
- if (ncs & NCS_OVERRUN)
- icount->overrun++;
- if (ncs & NCS_PARITY)
- icount->parity++;
-}
-
-/**
- * do_read - Read in bytes from the port. Return the number of bytes
- * actually read.
- * @the_port: port to use
- * @buf: place to put the stuff we read
- * @len: how big 'buf' is
- */
-
-static inline int do_read(struct uart_port *the_port, char *buf, int len)
-{
- int prod_ptr, cons_ptr, total;
- struct ioc3_port *port = get_ioc3_port(the_port);
- struct ring *inring;
- struct ring_entry *entry;
- struct port_hooks *hooks;
- int byte_num;
- char *sc;
- int loop_counter;
-
- BUG_ON(!(len >= 0));
- BUG_ON(!port);
- hooks = port->ip_hooks;
-
- /* There is a nasty timing issue in the IOC3. When the rx_timer
- * expires or the rx_high condition arises, we take an interrupt.
- * At some point while servicing the interrupt, we read bytes from
- * the ring buffer and re-arm the rx_timer. However the rx_timer is
- * not started until the first byte is received *after* it is armed,
- * and any bytes pending in the rx construction buffers are not drained
- * to memory until either there are 4 bytes available or the rx_timer
- * expires. This leads to a potential situation where data is left
- * in the construction buffers forever - 1 to 3 bytes were received
- * after the interrupt was generated but before the rx_timer was
- * re-armed. At that point as long as no subsequent bytes are received
- * the timer will never be started and the bytes will remain in the
- * construction buffer forever. The solution is to execute a DRAIN
- * command after rearming the timer. This way any bytes received before
- * the DRAIN will be drained to memory, and any bytes received after
- * the DRAIN will start the TIMER and be drained when it expires.
- * Luckily, this only needs to be done when the DMA buffer is empty
- * since there is no requirement that this function return all
- * available data as long as it returns some.
- */
- /* Re-arm the timer */
-
- writel(port->ip_rx_cons | SRCIR_ARM, &port->ip_serial_regs->srcir);
-
- prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
- cons_ptr = port->ip_rx_cons;
-
- if (prod_ptr == cons_ptr) {
- int reset_dma = 0;
-
- /* Input buffer appears empty, do a flush. */
-
- /* DMA must be enabled for this to work. */
- if (!(port->ip_sscr & SSCR_DMA_EN)) {
- port->ip_sscr |= SSCR_DMA_EN;
- reset_dma = 1;
- }
-
- /* Potential race condition: we must reload the srpir after
- * issuing the drain command, otherwise we could think the rx
- * buffer is empty, then take a very long interrupt, and when
- * we come back it's full and we wait forever for the drain to
- * complete.
- */
- writel(port->ip_sscr | SSCR_RX_DRAIN,
- &port->ip_serial_regs->sscr);
- prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
-
- /* We must not wait for the DRAIN to complete unless there are
- * at least 8 bytes (2 ring entries) available to receive the
- * data otherwise the DRAIN will never complete and we'll
- * deadlock here.
- * In fact, to make things easier, I'll just ignore the flush if
- * there is any data at all now available.
- */
- if (prod_ptr == cons_ptr) {
- loop_counter = 0;
- while (readl(&port->ip_serial_regs->sscr) &
- SSCR_RX_DRAIN) {
- loop_counter++;
- if (loop_counter > MAXITER)
- return -1;
- }
-
- /* SIGH. We have to reload the prod_ptr *again* since
- * the drain may have caused it to change
- */
- prod_ptr = readl(&port->ip_serial_regs->srpir)
- & PROD_CONS_MASK;
- }
- if (reset_dma) {
- port->ip_sscr &= ~SSCR_DMA_EN;
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- }
- inring = port->ip_inring;
- port->ip_flags &= ~READ_ABORTED;
-
- total = 0;
- loop_counter = 0xfffff; /* to avoid hangs */
-
- /* Grab bytes from the hardware */
- while ((prod_ptr != cons_ptr) && (len > 0)) {
- entry = (struct ring_entry *)((caddr_t) inring + cons_ptr);
-
- if (loop_counter-- <= 0) {
- printk(KERN_WARNING "IOC3 serial: "
- "possible hang condition/"
- "port stuck on read (line %d).\n",
- the_port->line);
- break;
- }
-
- /* According to the producer pointer, this ring entry
- * must contain some data. But if the PIO happened faster
- * than the DMA, the data may not be available yet, so let's
- * wait until it arrives.
- */
- if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
- /* Indicate the read is aborted so we don't disable
- * the interrupt thinking that the consumer is
- * congested.
- */
- port->ip_flags |= READ_ABORTED;
- len = 0;
- break;
- }
-
- /* Load the bytes/status out of the ring entry */
- for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) {
- sc = &(entry->ring_sc[byte_num]);
-
- /* Check for change in modem state or overrun */
- if ((*sc & RXSB_MODEM_VALID)
- && (port->ip_notify & N_DDCD)) {
- /* Notify upper layer if DCD dropped */
- if ((port->ip_flags & DCD_ON)
- && !(*sc & RXSB_DCD)) {
- /* If we have already copied some data,
- * return it. We'll pick up the carrier
- * drop on the next pass. That way we
- * don't throw away the data that has
- * already been copied back to
- * the caller's buffer.
- */
- if (total > 0) {
- len = 0;
- break;
- }
- port->ip_flags &= ~DCD_ON;
-
- /* Turn off this notification so the
- * carrier drop protocol won't see it
- * again when it does a read.
- */
- *sc &= ~RXSB_MODEM_VALID;
-
- /* To keep things consistent, we need
- * to update the consumer pointer so
- * the next reader won't come in and
- * try to read the same ring entries
- * again. This must be done here before
- * the dcd change.
- */
-
- if ((entry->ring_allsc & RING_ANY_VALID)
- == 0) {
- cons_ptr += (int)sizeof
- (struct ring_entry);
- cons_ptr &= PROD_CONS_MASK;
- }
- writel(cons_ptr,
- &port->ip_serial_regs->srcir);
- port->ip_rx_cons = cons_ptr;
-
- /* Notify upper layer of carrier drop */
- if ((port->ip_notify & N_DDCD)
- && port->ip_port) {
- uart_handle_dcd_change
- (port->ip_port, 0);
- wake_up_interruptible
- (&the_port->state->
- port.delta_msr_wait);
- }
-
- /* If we had any data to return, we
- * would have returned it above.
- */
- return 0;
- }
- }
- if (*sc & RXSB_MODEM_VALID) {
- /* Notify that an input overrun occurred */
- if ((*sc & RXSB_OVERRUN)
- && (port->ip_notify & N_OVERRUN_ERROR)) {
- ioc3_cb_post_ncs(the_port, NCS_OVERRUN);
- }
- /* Don't look at this byte again */
- *sc &= ~RXSB_MODEM_VALID;
- }
-
- /* Check for valid data or RX errors */
- if ((*sc & RXSB_DATA_VALID) &&
- ((*sc & (RXSB_PAR_ERR
- | RXSB_FRAME_ERR | RXSB_BREAK))
- && (port->ip_notify & (N_PARITY_ERROR
- | N_FRAMING_ERROR
- | N_BREAK)))) {
- /* There is an error condition on the next byte.
- * If we have already transferred some bytes,
- * we'll stop here. Otherwise if this is the
- * first byte to be read, we'll just transfer
- * it alone after notifying the
- * upper layer of its status.
- */
- if (total > 0) {
- len = 0;
- break;
- } else {
- if ((*sc & RXSB_PAR_ERR) &&
- (port->
- ip_notify & N_PARITY_ERROR)) {
- ioc3_cb_post_ncs(the_port,
- NCS_PARITY);
- }
- if ((*sc & RXSB_FRAME_ERR) &&
- (port->
- ip_notify & N_FRAMING_ERROR)) {
- ioc3_cb_post_ncs(the_port,
- NCS_FRAMING);
- }
- if ((*sc & RXSB_BREAK)
- && (port->ip_notify & N_BREAK)) {
- ioc3_cb_post_ncs
- (the_port, NCS_BREAK);
- }
- len = 1;
- }
- }
- if (*sc & RXSB_DATA_VALID) {
- *sc &= ~RXSB_DATA_VALID;
- *buf = entry->ring_data[byte_num];
- buf++;
- len--;
- total++;
- }
- }
-
- /* If we used up this entry entirely, go on to the next one,
- * otherwise we must have run out of buffer space, so
- * leave the consumer pointer here for the next read in case
- * there are still unread bytes in this entry.
- */
- if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
- cons_ptr += (int)sizeof(struct ring_entry);
- cons_ptr &= PROD_CONS_MASK;
- }
- }
-
- /* Update consumer pointer and re-arm rx timer interrupt */
- writel(cons_ptr, &port->ip_serial_regs->srcir);
- port->ip_rx_cons = cons_ptr;
-
- /* If we have now dipped below the rx high water mark and we have
- * rx_high interrupt turned off, we can now turn it back on again.
- */
- if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr)
- & PROD_CONS_MASK) <
- ((port->
- ip_sscr &
- SSCR_RX_THRESHOLD)
- << PROD_CONS_PTR_OFF))) {
- port->ip_flags &= ~INPUT_HIGH;
- enable_intrs(port, hooks->intr_rx_high);
- }
- return total;
-}
-
-/**
- * receive_chars - upper level read.
- * @the_port: port to read from
- */
-static int receive_chars(struct uart_port *the_port)
-{
- unsigned char ch[MAX_CHARS];
- int read_count = 0, read_room, flip = 0;
- struct uart_state *state = the_port->state;
- struct ioc3_port *port = get_ioc3_port(the_port);
- unsigned long pflags;
-
- /* Make sure all the pointers are "good" ones */
- if (!state)
- return 0;
-
- if (!(port->ip_flags & INPUT_ENABLE))
- return 0;
-
- spin_lock_irqsave(&the_port->lock, pflags);
-
- read_count = do_read(the_port, ch, MAX_CHARS);
- if (read_count > 0) {
- flip = 1;
- read_room = tty_insert_flip_string(&state->port, ch,
- read_count);
- the_port->icount.rx += read_count;
- }
- spin_unlock_irqrestore(&the_port->lock, pflags);
-
- if (flip)
- tty_flip_buffer_push(&state->port);
-
- return read_count;
-}
-
-/**
- * ioc3uart_intr_one - lowest level (per port) interrupt handler.
- * @is : submodule
- * @idd: driver data
- * @pending: interrupts to handle
- */
-
-static inline int
-ioc3uart_intr_one(struct ioc3_submodule *is,
- struct ioc3_driver_data *idd,
- unsigned int pending)
-{
- int port_num = GET_PORT_FROM_SIO_IR(pending);
- struct port_hooks *hooks;
- unsigned int rx_high_rd_aborted = 0;
- unsigned long flags;
- struct uart_port *the_port;
- struct ioc3_port *port;
- int loop_counter;
- struct ioc3_card *card_ptr;
- unsigned int sio_ir;
-
- card_ptr = idd->data[is->id];
- port = card_ptr->ic_port[port_num].icp_port;
- hooks = port->ip_hooks;
-
- /* Possible race condition here: The tx_mt interrupt bit may be
- * cleared without the intervention of the interrupt handler,
- * e.g. by a write. If the top level interrupt handler reads a
- * tx_mt, then some other processor does a write, starting up
- * output, then we come in here, see the tx_mt and stop DMA, the
- * output started by the other processor will hang. Thus we can
- * only rely on tx_mt being legitimate if it is read while the
- * port lock is held. Therefore this bit must be ignored in the
- * passed in interrupt mask which was read by the top level
- * interrupt handler since the port lock was not held at the time
- * it was read. We can only rely on this bit being accurate if it
- * is read while the port lock is held. So we'll clear it for now,
- * and reload it later once we have the port lock.
- */
-
- sio_ir = pending & ~(hooks->intr_tx_mt);
- spin_lock_irqsave(&port->ip_lock, flags);
-
- loop_counter = MAXITER; /* to avoid hangs */
-
- do {
- uint32_t shadow;
-
- if (loop_counter-- <= 0) {
- printk(KERN_WARNING "IOC3 serial: "
- "possible hang condition/"
- "port stuck on interrupt (line %d).\n",
- ((struct uart_port *)port->ip_port)->line);
- break;
- }
- /* Handle a DCD change */
- if (sio_ir & hooks->intr_delta_dcd) {
- ioc3_ack(is, idd, hooks->intr_delta_dcd);
- shadow = readl(&port->ip_serial_regs->shadow);
-
- if ((port->ip_notify & N_DDCD)
- && (shadow & SHADOW_DCD)
- && (port->ip_port)) {
- the_port = port->ip_port;
- uart_handle_dcd_change(the_port,
- shadow & SHADOW_DCD);
- wake_up_interruptible
- (&the_port->state->port.delta_msr_wait);
- } else if ((port->ip_notify & N_DDCD)
- && !(shadow & SHADOW_DCD)) {
- /* Flag delta DCD/no DCD */
- uart_handle_dcd_change(port->ip_port,
- shadow & SHADOW_DCD);
- port->ip_flags |= DCD_ON;
- }
- }
-
- /* Handle a CTS change */
- if (sio_ir & hooks->intr_delta_cts) {
- ioc3_ack(is, idd, hooks->intr_delta_cts);
- shadow = readl(&port->ip_serial_regs->shadow);
-
- if ((port->ip_notify & N_DCTS) && (port->ip_port)) {
- the_port = port->ip_port;
- uart_handle_cts_change(the_port, shadow
- & SHADOW_CTS);
- wake_up_interruptible
- (&the_port->state->port.delta_msr_wait);
- }
- }
-
- /* rx timeout interrupt. Must be some data available. Put this
- * before the check for rx_high since servicing this condition
- * may cause that condition to clear.
- */
- if (sio_ir & hooks->intr_rx_timer) {
- ioc3_ack(is, idd, hooks->intr_rx_timer);
- if ((port->ip_notify & N_DATA_READY)
- && (port->ip_port)) {
- receive_chars(port->ip_port);
- }
- }
-
- /* rx high interrupt. Must be after rx_timer. */
- else if (sio_ir & hooks->intr_rx_high) {
- /* Data available, notify upper layer */
- if ((port->ip_notify & N_DATA_READY) && port->ip_port) {
- receive_chars(port->ip_port);
- }
-
- /* We can't ACK this interrupt. If receive_chars didn't
- * cause the condition to clear, we'll have to disable
- * the interrupt until the data is drained.
- * If the read was aborted, don't disable the interrupt
- * as this may cause us to hang indefinitely. An
- * aborted read generally means that this interrupt
- * hasn't been delivered to the cpu yet anyway, even
- * though we see it as asserted when we read the sio_ir.
- */
- if ((sio_ir = PENDING(card_ptr, idd))
- & hooks->intr_rx_high) {
- if (port->ip_flags & READ_ABORTED) {
- rx_high_rd_aborted++;
- }
- else {
- card_ptr->ic_enable &= ~hooks->intr_rx_high;
- port->ip_flags |= INPUT_HIGH;
- }
- }
- }
-
- /* We got a low water interrupt: notify upper layer to
- * send more data. Must come before tx_mt since servicing
- * this condition may cause that condition to clear.
- */
- if (sio_ir & hooks->intr_tx_explicit) {
- port->ip_flags &= ~LOWAT_WRITTEN;
- ioc3_ack(is, idd, hooks->intr_tx_explicit);
- if (port->ip_notify & N_OUTPUT_LOWAT)
- ioc3_cb_output_lowat(port);
- }
-
- /* Handle tx_mt. Must come after tx_explicit. */
- else if (sio_ir & hooks->intr_tx_mt) {
- /* If we are expecting a lowat notification
- * and we get to this point it probably means that for
- * some reason the tx_explicit didn't work as expected
- * (that can legitimately happen if the output buffer is
- * filled up in just the right way).
- * So send the notification now.
- */
- if (port->ip_notify & N_OUTPUT_LOWAT) {
- ioc3_cb_output_lowat(port);
-
- /* We need to reload the sio_ir since the lowat
- * call may have caused another write to occur,
- * clearing the tx_mt condition.
- */
- sio_ir = PENDING(card_ptr, idd);
- }
-
- /* If the tx_mt condition still persists even after the
- * lowat call, we've got some work to do.
- */
- if (sio_ir & hooks->intr_tx_mt) {
- /* If we are not currently expecting DMA input,
- * and the transmitter has just gone idle,
- * there is no longer any reason for DMA, so
- * disable it.
- */
- if (!(port->ip_notify
- & (N_DATA_READY | N_DDCD))) {
- BUG_ON(!(port->ip_sscr
- & SSCR_DMA_EN));
- port->ip_sscr &= ~SSCR_DMA_EN;
- writel(port->ip_sscr,
- &port->ip_serial_regs->sscr);
- }
- /* Prevent infinite tx_mt interrupt */
- card_ptr->ic_enable &= ~hooks->intr_tx_mt;
- }
- }
- sio_ir = PENDING(card_ptr, idd);
-
- /* if the read was aborted and only hooks->intr_rx_high,
- * clear hooks->intr_rx_high, so we do not loop forever.
- */
-
- if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) {
- sio_ir &= ~hooks->intr_rx_high;
- }
- } while (sio_ir & hooks->intr_all);
-
- spin_unlock_irqrestore(&port->ip_lock, flags);
- ioc3_enable(is, idd, card_ptr->ic_enable);
- return 0;
-}
-
-/**
- * ioc3uart_intr - field all serial interrupts
- * @is : submodule
- * @idd: driver data
- * @pending: interrupts to handle
- *
- */
-
-static int ioc3uart_intr(struct ioc3_submodule *is,
- struct ioc3_driver_data *idd,
- unsigned int pending)
-{
- int ret = 0;
-
- /*
- * The upper level interrupt handler sends interrupts for both ports
- * here. So we need to call for each port with its interrupts.
- */
-
- if (pending & SIO_IR_SA)
- ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SA);
- if (pending & SIO_IR_SB)
- ret |= ioc3uart_intr_one(is, idd, pending & SIO_IR_SB);
-
- return ret;
-}
-
-/**
- * ic3_type
- * @port: Port to operate with (we ignore since we only have one port)
- *
- */
-static const char *ic3_type(struct uart_port *the_port)
-{
- if (IS_RS232(the_port->line))
- return "SGI IOC3 Serial [rs232]";
- else
- return "SGI IOC3 Serial [rs422]";
-}
-
-/**
- * ic3_tx_empty - Is the transmitter empty?
- * @port: Port to operate on
- *
- */
-static unsigned int ic3_tx_empty(struct uart_port *the_port)
-{
- unsigned int ret = 0;
- struct ioc3_port *port = get_ioc3_port(the_port);
-
- if (readl(&port->ip_serial_regs->shadow) & SHADOW_TEMT)
- ret = TIOCSER_TEMT;
- return ret;
-}
-
-/**
- * ic3_stop_tx - stop the transmitter
- * @port: Port to operate on
- *
- */
-static void ic3_stop_tx(struct uart_port *the_port)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
-
- if (port)
- set_notification(port, N_OUTPUT_LOWAT, 0);
-}
-
-/**
- * ic3_stop_rx - stop the receiver
- * @port: Port to operate on
- *
- */
-static void ic3_stop_rx(struct uart_port *the_port)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
-
- if (port)
- port->ip_flags &= ~INPUT_ENABLE;
-}
-
-/**
- * null_void_function
- * @port: Port to operate on
- *
- */
-static void null_void_function(struct uart_port *the_port)
-{
-}
-
-/**
- * ic3_shutdown - shut down the port - free irq and disable
- * @port: port to shut down
- *
- */
-static void ic3_shutdown(struct uart_port *the_port)
-{
- unsigned long port_flags;
- struct ioc3_port *port;
- struct uart_state *state;
-
- port = get_ioc3_port(the_port);
- if (!port)
- return;
-
- state = the_port->state;
- wake_up_interruptible(&state->port.delta_msr_wait);
-
- spin_lock_irqsave(&the_port->lock, port_flags);
- set_notification(port, N_ALL, 0);
- spin_unlock_irqrestore(&the_port->lock, port_flags);
-}
-
-/**
- * ic3_set_mctrl - set control lines (dtr, rts, etc)
- * @port: Port to operate on
- * @mctrl: Lines to set/unset
- *
- */
-static void ic3_set_mctrl(struct uart_port *the_port, unsigned int mctrl)
-{
- unsigned char mcr = 0;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- set_mcr(the_port, mcr, SHADOW_DTR);
-}
-
-/**
- * ic3_get_mctrl - get control line info
- * @port: port to operate on
- *
- */
-static unsigned int ic3_get_mctrl(struct uart_port *the_port)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
- uint32_t shadow;
- unsigned int ret = 0;
-
- if (!port)
- return 0;
-
- shadow = readl(&port->ip_serial_regs->shadow);
- if (shadow & SHADOW_DCD)
- ret |= TIOCM_CD;
- if (shadow & SHADOW_DR)
- ret |= TIOCM_DSR;
- if (shadow & SHADOW_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-/**
- * ic3_start_tx - Start transmitter. Called with the_port->lock
- * @port: Port to operate on
- *
- */
-static void ic3_start_tx(struct uart_port *the_port)
-{
- struct ioc3_port *port = get_ioc3_port(the_port);
-
- if (port) {
- set_notification(port, N_OUTPUT_LOWAT, 1);
- enable_intrs(port, port->ip_hooks->intr_tx_mt);
- }
-}
-
-/**
- * ic3_break_ctl - handle breaks
- * @port: Port to operate on
- * @break_state: Break state
- *
- */
-static void ic3_break_ctl(struct uart_port *the_port, int break_state)
-{
-}
-
-/**
- * ic3_startup - Start up the serial port - always return 0 (We're always on)
- * @port: Port to operate on
- *
- */
-static int ic3_startup(struct uart_port *the_port)
-{
- int retval;
- struct ioc3_port *port;
- struct ioc3_card *card_ptr;
- unsigned long port_flags;
-
- if (!the_port) {
- NOT_PROGRESS();
- return -ENODEV;
- }
- port = get_ioc3_port(the_port);
- if (!port) {
- NOT_PROGRESS();
- return -ENODEV;
- }
- card_ptr = port->ip_card;
- port->ip_port = the_port;
-
- if (!card_ptr) {
- NOT_PROGRESS();
- return -ENODEV;
- }
-
- /* Start up the serial port */
- spin_lock_irqsave(&the_port->lock, port_flags);
- retval = ic3_startup_local(the_port);
- spin_unlock_irqrestore(&the_port->lock, port_flags);
- return retval;
-}
-
-/**
- * ic3_set_termios - set termios stuff
- * @port: port to operate on
- * @termios: New settings
- * @termios: Old
- *
- */
-static void
-ic3_set_termios(struct uart_port *the_port,
- struct ktermios *termios, struct ktermios *old_termios)
-{
- unsigned long port_flags;
-
- spin_lock_irqsave(&the_port->lock, port_flags);
- ioc3_change_speed(the_port, termios, old_termios);
- spin_unlock_irqrestore(&the_port->lock, port_flags);
-}
-
-/**
- * ic3_request_port - allocate resources for port - no op....
- * @port: port to operate on
- *
- */
-static int ic3_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/* Associate the uart functions above - given to serial core */
-static const struct uart_ops ioc3_ops = {
- .tx_empty = ic3_tx_empty,
- .set_mctrl = ic3_set_mctrl,
- .get_mctrl = ic3_get_mctrl,
- .stop_tx = ic3_stop_tx,
- .start_tx = ic3_start_tx,
- .stop_rx = ic3_stop_rx,
- .break_ctl = ic3_break_ctl,
- .startup = ic3_startup,
- .shutdown = ic3_shutdown,
- .set_termios = ic3_set_termios,
- .type = ic3_type,
- .release_port = null_void_function,
- .request_port = ic3_request_port,
-};
-
-/*
- * Boot-time initialization code
- */
-
-static struct uart_driver ioc3_uart = {
- .owner = THIS_MODULE,
- .driver_name = "ioc3_serial",
- .dev_name = DEVICE_NAME,
- .major = DEVICE_MAJOR,
- .minor = DEVICE_MINOR,
- .nr = MAX_LOGICAL_PORTS
-};
-
-/**
- * ioc3_serial_core_attach - register with serial core
- * This is done during pci probing
- * @is: submodule struct for this
- * @idd: handle for this card
- */
-static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
- struct ioc3_driver_data *idd)
-{
- struct ioc3_port *port;
- struct uart_port *the_port;
- struct ioc3_card *card_ptr = idd->data[is->id];
- int ii, phys_port;
- struct pci_dev *pdev = idd->pdev;
-
- DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n",
- __func__, pdev, (void *)card_ptr));
-
- if (!card_ptr)
- return -ENODEV;
-
- /* once around for each logical port on this card */
- for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) {
- phys_port = GET_PHYSICAL_PORT(ii);
- the_port = &card_ptr->ic_port[phys_port].
- icp_uart_port[GET_LOGICAL_PORT(ii)];
- port = card_ptr->ic_port[phys_port].icp_port;
- port->ip_port = the_port;
-
- DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n",
- __func__, (void *)the_port, (void *)port,
- phys_port, ii));
-
- /* membase, iobase and mapbase just need to be non-0 */
- the_port->membase = (unsigned char __iomem *)1;
- the_port->iobase = (pdev->bus->number << 16) | ii;
- the_port->line = (Num_of_ioc3_cards << 2) | ii;
- the_port->mapbase = 1;
- the_port->type = PORT_16550A;
- the_port->fifosize = FIFO_SIZE;
- the_port->ops = &ioc3_ops;
- the_port->irq = idd->irq_io;
- the_port->dev = &pdev->dev;
-
- if (uart_add_one_port(&ioc3_uart, the_port) < 0) {
- printk(KERN_WARNING
- "%s: unable to add port %d bus %d\n",
- __func__, the_port->line, pdev->bus->number);
- } else {
- DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n",
- the_port->line, the_port->irq, pdev->bus->number));
- }
-
- /* all ports are rs232 for now */
- if (IS_PHYSICAL_PORT(ii))
- ioc3_set_proto(port, PROTO_RS232);
- }
- return 0;
-}
-
-/**
- * ioc3uart_remove - register detach function
- * @is: submodule struct for this submodule
- * @idd: ioc3 driver data for this submodule
- */
-
-static int ioc3uart_remove(struct ioc3_submodule *is,
- struct ioc3_driver_data *idd)
-{
- struct ioc3_card *card_ptr = idd->data[is->id];
- struct uart_port *the_port;
- struct ioc3_port *port;
- int ii;
-
- if (card_ptr) {
- for (ii = 0; ii < LOGICAL_PORTS_PER_CARD; ii++) {
- the_port = &card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].
- icp_uart_port[GET_LOGICAL_PORT(ii)];
- if (the_port)
- uart_remove_one_port(&ioc3_uart, the_port);
- port = card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].icp_port;
- if (port && IS_PHYSICAL_PORT(ii)
- && (GET_PHYSICAL_PORT(ii) == 0)) {
- pci_free_consistent(port->ip_idd->pdev,
- TOTAL_RING_BUF_SIZE,
- (void *)port->ip_cpu_ringbuf,
- port->ip_dma_ringbuf);
- kfree(port);
- card_ptr->ic_port[GET_PHYSICAL_PORT(ii)].
- icp_port = NULL;
- }
- }
- kfree(card_ptr);
- idd->data[is->id] = NULL;
- }
- return 0;
-}
-
-/**
- * ioc3uart_probe - card probe function called from shim driver
- * @is: submodule struct for this submodule
- * @idd: ioc3 driver data for this card
- */
-
-static int
-ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
-{
- struct pci_dev *pdev = idd->pdev;
- struct ioc3_card *card_ptr;
- int ret = 0;
- struct ioc3_port *port;
- struct ioc3_port *ports[PORTS_PER_CARD];
- int phys_port;
- int cnt;
-
- DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
-
- card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL);
- if (!card_ptr) {
- printk(KERN_WARNING "ioc3_attach_one"
- ": unable to get memory for the IOC3\n");
- return -ENOMEM;
- }
- idd->data[is->id] = card_ptr;
- Submodule_slot = is->id;
-
- writel(((UARTA_BASE >> 3) << SIO_CR_SER_A_BASE_SHIFT) |
- ((UARTB_BASE >> 3) << SIO_CR_SER_B_BASE_SHIFT) |
- (0xf << SIO_CR_CMD_PULSE_SHIFT), &idd->vma->sio_cr);
-
- pci_write_config_dword(pdev, PCI_LAT, 0xff00);
-
- /* Enable serial port mode select generic PIO pins as outputs */
- ioc3_gpcr_set(idd, GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL);
-
- /* Create port structures for each port */
- for (phys_port = 0; phys_port < PORTS_PER_CARD; phys_port++) {
- port = kzalloc(sizeof(struct ioc3_port), GFP_KERNEL);
- if (!port) {
- printk(KERN_WARNING
- "IOC3 serial memory not available for port\n");
- ret = -ENOMEM;
- goto out4;
- }
- spin_lock_init(&port->ip_lock);
-
- /* we need to remember the previous ones, to point back to
- * them farther down - setting up the ring buffers.
- */
- ports[phys_port] = port;
-
- /* init to something useful */
- card_ptr->ic_port[phys_port].icp_port = port;
- port->ip_is = is;
- port->ip_idd = idd;
- port->ip_baud = 9600;
- port->ip_card = card_ptr;
- port->ip_hooks = &hooks_array[phys_port];
-
- /* Setup each port */
- if (phys_port == 0) {
- port->ip_serial_regs = &idd->vma->port_a;
- port->ip_uart_regs = &idd->vma->sregs.uarta;
-
- DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p "
- "ip_uart_regs 0x%p\n",
- __func__,
- (void *)port->ip_serial_regs,
- (void *)port->ip_uart_regs));
-
- /* setup ring buffers */
- port->ip_cpu_ringbuf = pci_alloc_consistent(pdev,
- TOTAL_RING_BUF_SIZE, &port->ip_dma_ringbuf);
-
- BUG_ON(!((((int64_t) port->ip_dma_ringbuf) &
- (TOTAL_RING_BUF_SIZE - 1)) == 0));
- port->ip_inring = RING(port, RX_A);
- port->ip_outring = RING(port, TX_A);
- DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p "
- "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
- "ip_outring 0x%p\n",
- __func__,
- (void *)port->ip_cpu_ringbuf,
- (void *)port->ip_dma_ringbuf,
- (void *)port->ip_inring,
- (void *)port->ip_outring));
- }
- else {
- port->ip_serial_regs = &idd->vma->port_b;
- port->ip_uart_regs = &idd->vma->sregs.uartb;
-
- DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p "
- "ip_uart_regs 0x%p\n",
- __func__,
- (void *)port->ip_serial_regs,
- (void *)port->ip_uart_regs));
-
- /* share the ring buffers */
- port->ip_dma_ringbuf =
- ports[phys_port - 1]->ip_dma_ringbuf;
- port->ip_cpu_ringbuf =
- ports[phys_port - 1]->ip_cpu_ringbuf;
- port->ip_inring = RING(port, RX_B);
- port->ip_outring = RING(port, TX_B);
- DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p "
- "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
- "ip_outring 0x%p\n",
- __func__,
- (void *)port->ip_cpu_ringbuf,
- (void *)port->ip_dma_ringbuf,
- (void *)port->ip_inring,
- (void *)port->ip_outring));
- }
-
- DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p",
- __func__,
- phys_port, (void *)port, (void *)card_ptr));
- DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
- (void *)port->ip_serial_regs,
- (void *)port->ip_uart_regs));
-
- /* Initialize the hardware for IOC3 */
- port_init(port);
-
- DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p "
- "outring 0x%p\n",
- __func__,
- phys_port, (void *)port,
- (void *)port->ip_inring,
- (void *)port->ip_outring));
-
- }
-
- /* register port with the serial core */
-
- ret = ioc3_serial_core_attach(is, idd);
- if (ret)
- goto out4;
-
- Num_of_ioc3_cards++;
-
- return ret;
-
- /* error exits that give back resources */
-out4:
- for (cnt = 0; cnt < phys_port; cnt++)
- kfree(ports[cnt]);
-
- kfree(card_ptr);
- return ret;
-}
-
-static struct ioc3_submodule ioc3uart_ops = {
- .name = "IOC3uart",
- .probe = ioc3uart_probe,
- .remove = ioc3uart_remove,
- /* call .intr for both ports initially */
- .irq_mask = SIO_IR_SA | SIO_IR_SB,
- .intr = ioc3uart_intr,
- .owner = THIS_MODULE,
-};
-
-/**
- * ioc3_detect - module init called,
- */
-static int __init ioc3uart_init(void)
-{
- int ret;
-
- /* register with serial core */
- if ((ret = uart_register_driver(&ioc3_uart)) < 0) {
- printk(KERN_WARNING
- "%s: Couldn't register IOC3 uart serial driver\n",
- __func__);
- return ret;
- }
- ret = ioc3_register_submodule(&ioc3uart_ops);
- if (ret)
- uart_unregister_driver(&ioc3_uart);
- return ret;
-}
-
-static void __exit ioc3uart_exit(void)
-{
- ioc3_unregister_submodule(&ioc3uart_ops);
- uart_unregister_driver(&ioc3_uart);
-}
-
-module_init(ioc3uart_init);
-module_exit(ioc3uart_exit);
-
-MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
-MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC3 card");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
deleted file mode 100644
index db5b979e5a0c..000000000000
--- a/drivers/tty/serial/ioc4_serial.c
+++ /dev/null
@@ -1,2955 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-
-/*
- * This file contains a module version of the ioc4 serial driver. This
- * includes all the support functions needed (support functions, etc.)
- * and the serial driver itself.
- */
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/circ_buf.h>
-#include <linux/serial_reg.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ioc4.h>
-#include <linux/serial_core.h>
-#include <linux/slab.h>
-
-/*
- * interesting things about the ioc4
- */
-
-#define IOC4_NUM_SERIAL_PORTS 4 /* max ports per card */
-#define IOC4_NUM_CARDS 8 /* max cards per partition */
-
-#define GET_SIO_IR(_n) (_n == 0) ? (IOC4_SIO_IR_S0) : \
- (_n == 1) ? (IOC4_SIO_IR_S1) : \
- (_n == 2) ? (IOC4_SIO_IR_S2) : \
- (IOC4_SIO_IR_S3)
-
-#define GET_OTHER_IR(_n) (_n == 0) ? (IOC4_OTHER_IR_S0_MEMERR) : \
- (_n == 1) ? (IOC4_OTHER_IR_S1_MEMERR) : \
- (_n == 2) ? (IOC4_OTHER_IR_S2_MEMERR) : \
- (IOC4_OTHER_IR_S3_MEMERR)
-
-
-/*
- * All IOC4 registers are 32 bits wide.
- */
-
-/*
- * PCI Memory Space Map
- */
-#define IOC4_PCI_ERR_ADDR_L 0x000 /* Low Error Address */
-#define IOC4_PCI_ERR_ADDR_VLD (0x1 << 0)
-#define IOC4_PCI_ERR_ADDR_MST_ID_MSK (0xf << 1)
-#define IOC4_PCI_ERR_ADDR_MST_NUM_MSK (0xe << 1)
-#define IOC4_PCI_ERR_ADDR_MST_TYP_MSK (0x1 << 1)
-#define IOC4_PCI_ERR_ADDR_MUL_ERR (0x1 << 5)
-#define IOC4_PCI_ERR_ADDR_ADDR_MSK (0x3ffffff << 6)
-
-/* Interrupt types */
-#define IOC4_SIO_INTR_TYPE 0
-#define IOC4_OTHER_INTR_TYPE 1
-#define IOC4_NUM_INTR_TYPES 2
-
-/* Bitmasks for IOC4_SIO_IR, IOC4_SIO_IEC, and IOC4_SIO_IES */
-#define IOC4_SIO_IR_S0_TX_MT 0x00000001 /* Serial port 0 TX empty */
-#define IOC4_SIO_IR_S0_RX_FULL 0x00000002 /* Port 0 RX buf full */
-#define IOC4_SIO_IR_S0_RX_HIGH 0x00000004 /* Port 0 RX hiwat */
-#define IOC4_SIO_IR_S0_RX_TIMER 0x00000008 /* Port 0 RX timeout */
-#define IOC4_SIO_IR_S0_DELTA_DCD 0x00000010 /* Port 0 delta DCD */
-#define IOC4_SIO_IR_S0_DELTA_CTS 0x00000020 /* Port 0 delta CTS */
-#define IOC4_SIO_IR_S0_INT 0x00000040 /* Port 0 pass-thru intr */
-#define IOC4_SIO_IR_S0_TX_EXPLICIT 0x00000080 /* Port 0 explicit TX thru */
-#define IOC4_SIO_IR_S1_TX_MT 0x00000100 /* Serial port 1 */
-#define IOC4_SIO_IR_S1_RX_FULL 0x00000200 /* */
-#define IOC4_SIO_IR_S1_RX_HIGH 0x00000400 /* */
-#define IOC4_SIO_IR_S1_RX_TIMER 0x00000800 /* */
-#define IOC4_SIO_IR_S1_DELTA_DCD 0x00001000 /* */
-#define IOC4_SIO_IR_S1_DELTA_CTS 0x00002000 /* */
-#define IOC4_SIO_IR_S1_INT 0x00004000 /* */
-#define IOC4_SIO_IR_S1_TX_EXPLICIT 0x00008000 /* */
-#define IOC4_SIO_IR_S2_TX_MT 0x00010000 /* Serial port 2 */
-#define IOC4_SIO_IR_S2_RX_FULL 0x00020000 /* */
-#define IOC4_SIO_IR_S2_RX_HIGH 0x00040000 /* */
-#define IOC4_SIO_IR_S2_RX_TIMER 0x00080000 /* */
-#define IOC4_SIO_IR_S2_DELTA_DCD 0x00100000 /* */
-#define IOC4_SIO_IR_S2_DELTA_CTS 0x00200000 /* */
-#define IOC4_SIO_IR_S2_INT 0x00400000 /* */
-#define IOC4_SIO_IR_S2_TX_EXPLICIT 0x00800000 /* */
-#define IOC4_SIO_IR_S3_TX_MT 0x01000000 /* Serial port 3 */
-#define IOC4_SIO_IR_S3_RX_FULL 0x02000000 /* */
-#define IOC4_SIO_IR_S3_RX_HIGH 0x04000000 /* */
-#define IOC4_SIO_IR_S3_RX_TIMER 0x08000000 /* */
-#define IOC4_SIO_IR_S3_DELTA_DCD 0x10000000 /* */
-#define IOC4_SIO_IR_S3_DELTA_CTS 0x20000000 /* */
-#define IOC4_SIO_IR_S3_INT 0x40000000 /* */
-#define IOC4_SIO_IR_S3_TX_EXPLICIT 0x80000000 /* */
-
-/* Per device interrupt masks */
-#define IOC4_SIO_IR_S0 (IOC4_SIO_IR_S0_TX_MT | \
- IOC4_SIO_IR_S0_RX_FULL | \
- IOC4_SIO_IR_S0_RX_HIGH | \
- IOC4_SIO_IR_S0_RX_TIMER | \
- IOC4_SIO_IR_S0_DELTA_DCD | \
- IOC4_SIO_IR_S0_DELTA_CTS | \
- IOC4_SIO_IR_S0_INT | \
- IOC4_SIO_IR_S0_TX_EXPLICIT)
-#define IOC4_SIO_IR_S1 (IOC4_SIO_IR_S1_TX_MT | \
- IOC4_SIO_IR_S1_RX_FULL | \
- IOC4_SIO_IR_S1_RX_HIGH | \
- IOC4_SIO_IR_S1_RX_TIMER | \
- IOC4_SIO_IR_S1_DELTA_DCD | \
- IOC4_SIO_IR_S1_DELTA_CTS | \
- IOC4_SIO_IR_S1_INT | \
- IOC4_SIO_IR_S1_TX_EXPLICIT)
-#define IOC4_SIO_IR_S2 (IOC4_SIO_IR_S2_TX_MT | \
- IOC4_SIO_IR_S2_RX_FULL | \
- IOC4_SIO_IR_S2_RX_HIGH | \
- IOC4_SIO_IR_S2_RX_TIMER | \
- IOC4_SIO_IR_S2_DELTA_DCD | \
- IOC4_SIO_IR_S2_DELTA_CTS | \
- IOC4_SIO_IR_S2_INT | \
- IOC4_SIO_IR_S2_TX_EXPLICIT)
-#define IOC4_SIO_IR_S3 (IOC4_SIO_IR_S3_TX_MT | \
- IOC4_SIO_IR_S3_RX_FULL | \
- IOC4_SIO_IR_S3_RX_HIGH | \
- IOC4_SIO_IR_S3_RX_TIMER | \
- IOC4_SIO_IR_S3_DELTA_DCD | \
- IOC4_SIO_IR_S3_DELTA_CTS | \
- IOC4_SIO_IR_S3_INT | \
- IOC4_SIO_IR_S3_TX_EXPLICIT)
-
-/* Bitmasks for IOC4_OTHER_IR, IOC4_OTHER_IEC, and IOC4_OTHER_IES */
-#define IOC4_OTHER_IR_ATA_INT 0x00000001 /* ATAPI intr pass-thru */
-#define IOC4_OTHER_IR_ATA_MEMERR 0x00000002 /* ATAPI DMA PCI error */
-#define IOC4_OTHER_IR_S0_MEMERR 0x00000004 /* Port 0 PCI error */
-#define IOC4_OTHER_IR_S1_MEMERR 0x00000008 /* Port 1 PCI error */
-#define IOC4_OTHER_IR_S2_MEMERR 0x00000010 /* Port 2 PCI error */
-#define IOC4_OTHER_IR_S3_MEMERR 0x00000020 /* Port 3 PCI error */
-#define IOC4_OTHER_IR_KBD_INT 0x00000040 /* Keyboard/mouse */
-#define IOC4_OTHER_IR_RESERVED 0x007fff80 /* Reserved */
-#define IOC4_OTHER_IR_RT_INT 0x00800000 /* INT_OUT section output */
-#define IOC4_OTHER_IR_GEN_INT 0xff000000 /* Generic pins */
-
-#define IOC4_OTHER_IR_SER_MEMERR (IOC4_OTHER_IR_S0_MEMERR | IOC4_OTHER_IR_S1_MEMERR | \
- IOC4_OTHER_IR_S2_MEMERR | IOC4_OTHER_IR_S3_MEMERR)
-
-/* Bitmasks for IOC4_SIO_CR */
-#define IOC4_SIO_CR_CMD_PULSE_SHIFT 0 /* byte bus strobe shift */
-#define IOC4_SIO_CR_ARB_DIAG_TX0 0x00000000
-#define IOC4_SIO_CR_ARB_DIAG_RX0 0x00000010
-#define IOC4_SIO_CR_ARB_DIAG_TX1 0x00000020
-#define IOC4_SIO_CR_ARB_DIAG_RX1 0x00000030
-#define IOC4_SIO_CR_ARB_DIAG_TX2 0x00000040
-#define IOC4_SIO_CR_ARB_DIAG_RX2 0x00000050
-#define IOC4_SIO_CR_ARB_DIAG_TX3 0x00000060
-#define IOC4_SIO_CR_ARB_DIAG_RX3 0x00000070
-#define IOC4_SIO_CR_SIO_DIAG_IDLE 0x00000080 /* 0 -> active request among
- serial ports (ro) */
-/* Defs for some of the generic I/O pins */
-#define IOC4_GPCR_UART0_MODESEL 0x10 /* Pin is output to port 0
- mode sel */
-#define IOC4_GPCR_UART1_MODESEL 0x20 /* Pin is output to port 1
- mode sel */
-#define IOC4_GPCR_UART2_MODESEL 0x40 /* Pin is output to port 2
- mode sel */
-#define IOC4_GPCR_UART3_MODESEL 0x80 /* Pin is output to port 3
- mode sel */
-
-#define IOC4_GPPR_UART0_MODESEL_PIN 4 /* GIO pin controlling
- uart 0 mode select */
-#define IOC4_GPPR_UART1_MODESEL_PIN 5 /* GIO pin controlling
- uart 1 mode select */
-#define IOC4_GPPR_UART2_MODESEL_PIN 6 /* GIO pin controlling
- uart 2 mode select */
-#define IOC4_GPPR_UART3_MODESEL_PIN 7 /* GIO pin controlling
- uart 3 mode select */
-
-/* Bitmasks for serial RX status byte */
-#define IOC4_RXSB_OVERRUN 0x01 /* Char(s) lost */
-#define IOC4_RXSB_PAR_ERR 0x02 /* Parity error */
-#define IOC4_RXSB_FRAME_ERR 0x04 /* Framing error */
-#define IOC4_RXSB_BREAK 0x08 /* Break character */
-#define IOC4_RXSB_CTS 0x10 /* State of CTS */
-#define IOC4_RXSB_DCD 0x20 /* State of DCD */
-#define IOC4_RXSB_MODEM_VALID 0x40 /* DCD, CTS, and OVERRUN are valid */
-#define IOC4_RXSB_DATA_VALID 0x80 /* Data byte, FRAME_ERR PAR_ERR
- * & BREAK valid */
-
-/* Bitmasks for serial TX control byte */
-#define IOC4_TXCB_INT_WHEN_DONE 0x20 /* Interrupt after this byte is sent */
-#define IOC4_TXCB_INVALID 0x00 /* Byte is invalid */
-#define IOC4_TXCB_VALID 0x40 /* Byte is valid */
-#define IOC4_TXCB_MCR 0x80 /* Data<7:0> to modem control reg */
-#define IOC4_TXCB_DELAY 0xc0 /* Delay data<7:0> mSec */
-
-/* Bitmasks for IOC4_SBBR_L */
-#define IOC4_SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
-
-/* Bitmasks for IOC4_SSCR_<3:0> */
-#define IOC4_SSCR_RX_THRESHOLD 0x000001ff /* Hiwater mark */
-#define IOC4_SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
-#define IOC4_SSCR_HFC_EN 0x00020000 /* Hardware flow control enabled */
-#define IOC4_SSCR_RX_RING_DCD 0x00040000 /* Post RX record on delta-DCD */
-#define IOC4_SSCR_RX_RING_CTS 0x00080000 /* Post RX record on delta-CTS */
-#define IOC4_SSCR_DIAG 0x00200000 /* Bypass clock divider for sim */
-#define IOC4_SSCR_RX_DRAIN 0x08000000 /* Drain RX buffer to memory */
-#define IOC4_SSCR_DMA_EN 0x10000000 /* Enable ring buffer DMA */
-#define IOC4_SSCR_DMA_PAUSE 0x20000000 /* Pause DMA */
-#define IOC4_SSCR_PAUSE_STATE 0x40000000 /* Sets when PAUSE takes effect */
-#define IOC4_SSCR_RESET 0x80000000 /* Reset DMA channels */
-
-/* All producer/consumer pointers are the same bitfield */
-#define IOC4_PROD_CONS_PTR_4K 0x00000ff8 /* For 4K buffers */
-#define IOC4_PROD_CONS_PTR_1K 0x000003f8 /* For 1K buffers */
-#define IOC4_PROD_CONS_PTR_OFF 3
-
-/* Bitmasks for IOC4_SRCIR_<3:0> */
-#define IOC4_SRCIR_ARM 0x80000000 /* Arm RX timer */
-
-/* Bitmasks for IOC4_SHADOW_<3:0> */
-#define IOC4_SHADOW_DR 0x00000001 /* Data ready */
-#define IOC4_SHADOW_OE 0x00000002 /* Overrun error */
-#define IOC4_SHADOW_PE 0x00000004 /* Parity error */
-#define IOC4_SHADOW_FE 0x00000008 /* Framing error */
-#define IOC4_SHADOW_BI 0x00000010 /* Break interrupt */
-#define IOC4_SHADOW_THRE 0x00000020 /* Xmit holding register empty */
-#define IOC4_SHADOW_TEMT 0x00000040 /* Xmit shift register empty */
-#define IOC4_SHADOW_RFCE 0x00000080 /* Char in RX fifo has an error */
-#define IOC4_SHADOW_DCTS 0x00010000 /* Delta clear to send */
-#define IOC4_SHADOW_DDCD 0x00080000 /* Delta data carrier detect */
-#define IOC4_SHADOW_CTS 0x00100000 /* Clear to send */
-#define IOC4_SHADOW_DCD 0x00800000 /* Data carrier detect */
-#define IOC4_SHADOW_DTR 0x01000000 /* Data terminal ready */
-#define IOC4_SHADOW_RTS 0x02000000 /* Request to send */
-#define IOC4_SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
-#define IOC4_SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
-#define IOC4_SHADOW_LOOP 0x10000000 /* Loopback enabled */
-
-/* Bitmasks for IOC4_SRTR_<3:0> */
-#define IOC4_SRTR_CNT 0x00000fff /* Reload value for RX timer */
-#define IOC4_SRTR_CNT_VAL 0x0fff0000 /* Current value of RX timer */
-#define IOC4_SRTR_CNT_VAL_SHIFT 16
-#define IOC4_SRTR_HZ 16000 /* SRTR clock frequency */
-
-/* Serial port register map used for DMA and PIO serial I/O */
-struct ioc4_serialregs {
- uint32_t sscr;
- uint32_t stpir;
- uint32_t stcir;
- uint32_t srpir;
- uint32_t srcir;
- uint32_t srtr;
- uint32_t shadow;
-};
-
-/* IOC4 UART register map */
-struct ioc4_uartregs {
- char i4u_lcr;
- union {
- char iir; /* read only */
- char fcr; /* write only */
- } u3;
- union {
- char ier; /* DLAB == 0 */
- char dlm; /* DLAB == 1 */
- } u2;
- union {
- char rbr; /* read only, DLAB == 0 */
- char thr; /* write only, DLAB == 0 */
- char dll; /* DLAB == 1 */
- } u1;
- char i4u_scr;
- char i4u_msr;
- char i4u_lsr;
- char i4u_mcr;
-};
-
-/* short names */
-#define i4u_dll u1.dll
-#define i4u_ier u2.ier
-#define i4u_dlm u2.dlm
-#define i4u_fcr u3.fcr
-
-/* Serial port registers used for DMA serial I/O */
-struct ioc4_serial {
- uint32_t sbbr01_l;
- uint32_t sbbr01_h;
- uint32_t sbbr23_l;
- uint32_t sbbr23_h;
-
- struct ioc4_serialregs port_0;
- struct ioc4_serialregs port_1;
- struct ioc4_serialregs port_2;
- struct ioc4_serialregs port_3;
- struct ioc4_uartregs uart_0;
- struct ioc4_uartregs uart_1;
- struct ioc4_uartregs uart_2;
- struct ioc4_uartregs uart_3;
-};
-
-/* UART clock speed */
-#define IOC4_SER_XIN_CLK_66 66666667
-#define IOC4_SER_XIN_CLK_33 33333333
-
-#define IOC4_W_IES 0
-#define IOC4_W_IEC 1
-
-typedef void ioc4_intr_func_f(void *, uint32_t);
-typedef ioc4_intr_func_f *ioc4_intr_func_t;
-
-static unsigned int Num_of_ioc4_cards;
-
-/* defining this will get you LOTS of great debug info */
-//#define DEBUG_INTERRUPTS
-#define DPRINT_CONFIG(_x...) ;
-//#define DPRINT_CONFIG(_x...) printk _x
-
-/* number of characters left in xmit buffer before we ask for more */
-#define WAKEUP_CHARS 256
-
-/* number of characters we want to transmit to the lower level at a time */
-#define IOC4_MAX_CHARS 256
-#define IOC4_FIFO_CHARS 255
-
-/* Device name we're using */
-#define DEVICE_NAME_RS232 "ttyIOC"
-#define DEVICE_NAME_RS422 "ttyAIOC"
-#define DEVICE_MAJOR 204
-#define DEVICE_MINOR_RS232 50
-#define DEVICE_MINOR_RS422 84
-
-
-/* register offsets */
-#define IOC4_SERIAL_OFFSET 0x300
-
-/* flags for next_char_state */
-#define NCS_BREAK 0x1
-#define NCS_PARITY 0x2
-#define NCS_FRAMING 0x4
-#define NCS_OVERRUN 0x8
-
-/* cause we need SOME parameters ... */
-#define MIN_BAUD_SUPPORTED 1200
-#define MAX_BAUD_SUPPORTED 115200
-
-/* protocol types supported */
-#define PROTO_RS232 3
-#define PROTO_RS422 7
-
-/* Notification types */
-#define N_DATA_READY 0x01
-#define N_OUTPUT_LOWAT 0x02
-#define N_BREAK 0x04
-#define N_PARITY_ERROR 0x08
-#define N_FRAMING_ERROR 0x10
-#define N_OVERRUN_ERROR 0x20
-#define N_DDCD 0x40
-#define N_DCTS 0x80
-
-#define N_ALL_INPUT (N_DATA_READY | N_BREAK | \
- N_PARITY_ERROR | N_FRAMING_ERROR | \
- N_OVERRUN_ERROR | N_DDCD | N_DCTS)
-
-#define N_ALL_OUTPUT N_OUTPUT_LOWAT
-
-#define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR | N_OVERRUN_ERROR)
-
-#define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK | \
- N_PARITY_ERROR | N_FRAMING_ERROR | \
- N_OVERRUN_ERROR | N_DDCD | N_DCTS)
-
-#define SER_DIVISOR(_x, clk) (((clk) + (_x) * 8) / ((_x) * 16))
-#define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div))
-
-/* Some masks */
-#define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \
- | UART_LCR_WLEN7 | UART_LCR_WLEN8)
-#define LCR_MASK_STOP_BITS (UART_LCR_STOP)
-
-#define PENDING(_p) (readl(&(_p)->ip_mem->sio_ir.raw) & _p->ip_ienb)
-#define READ_SIO_IR(_p) readl(&(_p)->ip_mem->sio_ir.raw)
-
-/* Default to 4k buffers */
-#ifdef IOC4_1K_BUFFERS
-#define RING_BUF_SIZE 1024
-#define IOC4_BUF_SIZE_BIT 0
-#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_1K
-#else
-#define RING_BUF_SIZE 4096
-#define IOC4_BUF_SIZE_BIT IOC4_SBBR_L_SIZE
-#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_4K
-#endif
-
-#define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
-
-/*
- * This is the entry saved by the driver - one per card
- */
-
-#define UART_PORT_MIN 0
-#define UART_PORT_RS232 UART_PORT_MIN
-#define UART_PORT_RS422 1
-#define UART_PORT_COUNT 2 /* one for each mode */
-
-struct ioc4_control {
- int ic_irq;
- struct {
- /* uart ports are allocated here - 1 for rs232, 1 for rs422 */
- struct uart_port icp_uart_port[UART_PORT_COUNT];
- /* Handy reference material */
- struct ioc4_port *icp_port;
- } ic_port[IOC4_NUM_SERIAL_PORTS];
- struct ioc4_soft *ic_soft;
-};
-
-/*
- * per-IOC4 data structure
- */
-#define MAX_IOC4_INTR_ENTS (8 * sizeof(uint32_t))
-struct ioc4_soft {
- struct ioc4_misc_regs __iomem *is_ioc4_misc_addr;
- struct ioc4_serial __iomem *is_ioc4_serial_addr;
-
- /* Each interrupt type has an entry in the array */
- struct ioc4_intr_type {
-
- /*
- * Each in-use entry in this array contains at least
- * one nonzero bit in sd_bits; no two entries in this
- * array have overlapping sd_bits values.
- */
- struct ioc4_intr_info {
- uint32_t sd_bits;
- ioc4_intr_func_f *sd_intr;
- void *sd_info;
- } is_intr_info[MAX_IOC4_INTR_ENTS];
-
- /* Number of entries active in the above array */
- atomic_t is_num_intrs;
- } is_intr_type[IOC4_NUM_INTR_TYPES];
-
- /* is_ir_lock must be held while
- * modifying sio_ie values, so
- * we can be sure that sio_ie is
- * not changing when we read it
- * along with sio_ir.
- */
- spinlock_t is_ir_lock; /* SIO_IE[SC] mod lock */
-};
-
-/* Local port info for each IOC4 serial ports */
-struct ioc4_port {
- struct uart_port *ip_port; /* current active port ptr */
- /* Ptrs for all ports */
- struct uart_port *ip_all_ports[UART_PORT_COUNT];
- /* Back ptrs for this port */
- struct ioc4_control *ip_control;
- struct pci_dev *ip_pdev;
- struct ioc4_soft *ip_ioc4_soft;
-
- /* pci mem addresses */
- struct ioc4_misc_regs __iomem *ip_mem;
- struct ioc4_serial __iomem *ip_serial;
- struct ioc4_serialregs __iomem *ip_serial_regs;
- struct ioc4_uartregs __iomem *ip_uart_regs;
-
- /* Ring buffer page for this port */
- dma_addr_t ip_dma_ringbuf;
- /* vaddr of ring buffer */
- struct ring_buffer *ip_cpu_ringbuf;
-
- /* Rings for this port */
- struct ring *ip_inring;
- struct ring *ip_outring;
-
- /* Hook to port specific values */
- struct hooks *ip_hooks;
-
- spinlock_t ip_lock;
-
- /* Various rx/tx parameters */
- int ip_baud;
- int ip_tx_lowat;
- int ip_rx_timeout;
-
- /* Copy of notification bits */
- int ip_notify;
-
- /* Shadow copies of various registers so we don't need to PIO
- * read them constantly
- */
- uint32_t ip_ienb; /* Enabled interrupts */
- uint32_t ip_sscr;
- uint32_t ip_tx_prod;
- uint32_t ip_rx_cons;
- int ip_pci_bus_speed;
- unsigned char ip_flags;
-};
-
-/* tx low water mark. We need to notify the driver whenever tx is getting
- * close to empty so it can refill the tx buffer and keep things going.
- * Let's assume that if we interrupt 1 ms before the tx goes idle, we'll
- * have no trouble getting in more chars in time (I certainly hope so).
- */
-#define TX_LOWAT_LATENCY 1000
-#define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
-#define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
-
-/* Flags per port */
-#define INPUT_HIGH 0x01
-#define DCD_ON 0x02
-#define LOWAT_WRITTEN 0x04
-#define READ_ABORTED 0x08
-#define PORT_ACTIVE 0x10
-#define PORT_INACTIVE 0 /* This is the value when "off" */
-
-
-/* Since each port has different register offsets and bitmasks
- * for everything, we'll store those that we need in tables so we
- * don't have to be constantly checking the port we are dealing with.
- */
-struct hooks {
- uint32_t intr_delta_dcd;
- uint32_t intr_delta_cts;
- uint32_t intr_tx_mt;
- uint32_t intr_rx_timer;
- uint32_t intr_rx_high;
- uint32_t intr_tx_explicit;
- uint32_t intr_dma_error;
- uint32_t intr_clear;
- uint32_t intr_all;
- int rs422_select_pin;
-};
-
-static struct hooks hooks_array[IOC4_NUM_SERIAL_PORTS] = {
- /* Values for port 0 */
- {
- IOC4_SIO_IR_S0_DELTA_DCD, IOC4_SIO_IR_S0_DELTA_CTS,
- IOC4_SIO_IR_S0_TX_MT, IOC4_SIO_IR_S0_RX_TIMER,
- IOC4_SIO_IR_S0_RX_HIGH, IOC4_SIO_IR_S0_TX_EXPLICIT,
- IOC4_OTHER_IR_S0_MEMERR,
- (IOC4_SIO_IR_S0_TX_MT | IOC4_SIO_IR_S0_RX_FULL |
- IOC4_SIO_IR_S0_RX_HIGH | IOC4_SIO_IR_S0_RX_TIMER |
- IOC4_SIO_IR_S0_DELTA_DCD | IOC4_SIO_IR_S0_DELTA_CTS |
- IOC4_SIO_IR_S0_INT | IOC4_SIO_IR_S0_TX_EXPLICIT),
- IOC4_SIO_IR_S0, IOC4_GPPR_UART0_MODESEL_PIN,
- },
-
- /* Values for port 1 */
- {
- IOC4_SIO_IR_S1_DELTA_DCD, IOC4_SIO_IR_S1_DELTA_CTS,
- IOC4_SIO_IR_S1_TX_MT, IOC4_SIO_IR_S1_RX_TIMER,
- IOC4_SIO_IR_S1_RX_HIGH, IOC4_SIO_IR_S1_TX_EXPLICIT,
- IOC4_OTHER_IR_S1_MEMERR,
- (IOC4_SIO_IR_S1_TX_MT | IOC4_SIO_IR_S1_RX_FULL |
- IOC4_SIO_IR_S1_RX_HIGH | IOC4_SIO_IR_S1_RX_TIMER |
- IOC4_SIO_IR_S1_DELTA_DCD | IOC4_SIO_IR_S1_DELTA_CTS |
- IOC4_SIO_IR_S1_INT | IOC4_SIO_IR_S1_TX_EXPLICIT),
- IOC4_SIO_IR_S1, IOC4_GPPR_UART1_MODESEL_PIN,
- },
-
- /* Values for port 2 */
- {
- IOC4_SIO_IR_S2_DELTA_DCD, IOC4_SIO_IR_S2_DELTA_CTS,
- IOC4_SIO_IR_S2_TX_MT, IOC4_SIO_IR_S2_RX_TIMER,
- IOC4_SIO_IR_S2_RX_HIGH, IOC4_SIO_IR_S2_TX_EXPLICIT,
- IOC4_OTHER_IR_S2_MEMERR,
- (IOC4_SIO_IR_S2_TX_MT | IOC4_SIO_IR_S2_RX_FULL |
- IOC4_SIO_IR_S2_RX_HIGH | IOC4_SIO_IR_S2_RX_TIMER |
- IOC4_SIO_IR_S2_DELTA_DCD | IOC4_SIO_IR_S2_DELTA_CTS |
- IOC4_SIO_IR_S2_INT | IOC4_SIO_IR_S2_TX_EXPLICIT),
- IOC4_SIO_IR_S2, IOC4_GPPR_UART2_MODESEL_PIN,
- },
-
- /* Values for port 3 */
- {
- IOC4_SIO_IR_S3_DELTA_DCD, IOC4_SIO_IR_S3_DELTA_CTS,
- IOC4_SIO_IR_S3_TX_MT, IOC4_SIO_IR_S3_RX_TIMER,
- IOC4_SIO_IR_S3_RX_HIGH, IOC4_SIO_IR_S3_TX_EXPLICIT,
- IOC4_OTHER_IR_S3_MEMERR,
- (IOC4_SIO_IR_S3_TX_MT | IOC4_SIO_IR_S3_RX_FULL |
- IOC4_SIO_IR_S3_RX_HIGH | IOC4_SIO_IR_S3_RX_TIMER |
- IOC4_SIO_IR_S3_DELTA_DCD | IOC4_SIO_IR_S3_DELTA_CTS |
- IOC4_SIO_IR_S3_INT | IOC4_SIO_IR_S3_TX_EXPLICIT),
- IOC4_SIO_IR_S3, IOC4_GPPR_UART3_MODESEL_PIN,
- }
-};
-
-/* A ring buffer entry */
-struct ring_entry {
- union {
- struct {
- uint32_t alldata;
- uint32_t allsc;
- } all;
- struct {
- char data[4]; /* data bytes */
- char sc[4]; /* status/control */
- } s;
- } u;
-};
-
-/* Test the valid bits in any of the 4 sc chars using "allsc" member */
-#define RING_ANY_VALID \
- ((uint32_t)(IOC4_RXSB_MODEM_VALID | IOC4_RXSB_DATA_VALID) * 0x01010101)
-
-#define ring_sc u.s.sc
-#define ring_data u.s.data
-#define ring_allsc u.all.allsc
-
-/* Number of entries per ring buffer. */
-#define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
-
-/* An individual ring */
-struct ring {
- struct ring_entry entries[ENTRIES_PER_RING];
-};
-
-/* The whole enchilada */
-struct ring_buffer {
- struct ring TX_0_OR_2;
- struct ring RX_0_OR_2;
- struct ring TX_1_OR_3;
- struct ring RX_1_OR_3;
-};
-
-/* Get a ring from a port struct */
-#define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh)
-
-/* Infinite loop detection.
- */
-#define MAXITER 10000000
-
-/* Prototypes */
-static void receive_chars(struct uart_port *);
-static void handle_intr(void *arg, uint32_t sio_ir);
-
-/*
- * port_is_active - determines if this port is currently active
- * @port: ptr to soft struct for this port
- * @uart_port: uart port to test for
- */
-static inline int port_is_active(struct ioc4_port *port,
- struct uart_port *uart_port)
-{
- if (port) {
- if ((port->ip_flags & PORT_ACTIVE)
- && (port->ip_port == uart_port))
- return 1;
- }
- return 0;
-}
-
-
-/**
- * write_ireg - write the interrupt regs
- * @ioc4_soft: ptr to soft struct for this port
- * @val: value to write
- * @which: which register
- * @type: which ireg set
- */
-static inline void
-write_ireg(struct ioc4_soft *ioc4_soft, uint32_t val, int which, int type)
-{
- struct ioc4_misc_regs __iomem *mem = ioc4_soft->is_ioc4_misc_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc4_soft->is_ir_lock, flags);
-
- switch (type) {
- case IOC4_SIO_INTR_TYPE:
- switch (which) {
- case IOC4_W_IES:
- writel(val, &mem->sio_ies.raw);
- break;
-
- case IOC4_W_IEC:
- writel(val, &mem->sio_iec.raw);
- break;
- }
- break;
-
- case IOC4_OTHER_INTR_TYPE:
- switch (which) {
- case IOC4_W_IES:
- writel(val, &mem->other_ies.raw);
- break;
-
- case IOC4_W_IEC:
- writel(val, &mem->other_iec.raw);
- break;
- }
- break;
-
- default:
- break;
- }
- spin_unlock_irqrestore(&ioc4_soft->is_ir_lock, flags);
-}
-
-/**
- * set_baud - Baud rate setting code
- * @port: port to set
- * @baud: baud rate to use
- */
-static int set_baud(struct ioc4_port *port, int baud)
-{
- int actual_baud;
- int diff;
- int lcr;
- unsigned short divisor;
- struct ioc4_uartregs __iomem *uart;
-
- divisor = SER_DIVISOR(baud, port->ip_pci_bus_speed);
- if (!divisor)
- return 1;
- actual_baud = DIVISOR_TO_BAUD(divisor, port->ip_pci_bus_speed);
-
- diff = actual_baud - baud;
- if (diff < 0)
- diff = -diff;
-
- /* If we're within 1%, we've found a match */
- if (diff * 100 > actual_baud)
- return 1;
-
- uart = port->ip_uart_regs;
- lcr = readb(&uart->i4u_lcr);
- writeb(lcr | UART_LCR_DLAB, &uart->i4u_lcr);
- writeb((unsigned char)divisor, &uart->i4u_dll);
- writeb((unsigned char)(divisor >> 8), &uart->i4u_dlm);
- writeb(lcr, &uart->i4u_lcr);
- return 0;
-}
-
-
-/**
- * get_ioc4_port - given a uart port, return the control structure
- * @port: uart port
- * @set: set this port as current
- */
-static struct ioc4_port *get_ioc4_port(struct uart_port *the_port, int set)
-{
- struct ioc4_driver_data *idd = dev_get_drvdata(the_port->dev);
- struct ioc4_control *control = idd->idd_serial_data;
- struct ioc4_port *port;
- int port_num, port_type;
-
- if (control) {
- for ( port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS;
- port_num++ ) {
- port = control->ic_port[port_num].icp_port;
- if (!port)
- continue;
- for (port_type = UART_PORT_MIN;
- port_type < UART_PORT_COUNT;
- port_type++) {
- if (the_port == port->ip_all_ports
- [port_type]) {
- /* set local copy */
- if (set) {
- port->ip_port = the_port;
- }
- return port;
- }
- }
- }
- }
- return NULL;
-}
-
-/* The IOC4 hardware provides no atomic way to determine if interrupts
- * are pending since two reads are required to do so. The handler must
- * read the SIO_IR and the SIO_IES, and take the logical and of the
- * two. When this value is zero, all interrupts have been serviced and
- * the handler may return.
- *
- * This has the unfortunate "hole" that, if some other CPU or
- * some other thread or some higher level interrupt manages to
- * modify SIO_IE between our reads of SIO_IR and SIO_IE, we may
- * think we have observed SIO_IR&SIO_IE==0 when in fact this
- * condition never really occurred.
- *
- * To solve this, we use a simple spinlock that must be held
- * whenever modifying SIO_IE; holding this lock while observing
- * both SIO_IR and SIO_IE guarantees that we do not falsely
- * conclude that no enabled interrupts are pending.
- */
-
-static inline uint32_t
-pending_intrs(struct ioc4_soft *soft, int type)
-{
- struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
- unsigned long flag;
- uint32_t intrs = 0;
-
- BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
- || (type == IOC4_OTHER_INTR_TYPE)));
-
- spin_lock_irqsave(&soft->is_ir_lock, flag);
-
- switch (type) {
- case IOC4_SIO_INTR_TYPE:
- intrs = readl(&mem->sio_ir.raw) & readl(&mem->sio_ies.raw);
- break;
-
- case IOC4_OTHER_INTR_TYPE:
- intrs = readl(&mem->other_ir.raw) & readl(&mem->other_ies.raw);
-
- /* Don't process any ATA interrupte */
- intrs &= ~(IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
- break;
-
- default:
- break;
- }
- spin_unlock_irqrestore(&soft->is_ir_lock, flag);
- return intrs;
-}
-
-/**
- * port_init - Initialize the sio and ioc4 hardware for a given port
- * called per port from attach...
- * @port: port to initialize
- */
-static inline int port_init(struct ioc4_port *port)
-{
- uint32_t sio_cr;
- struct hooks *hooks = port->ip_hooks;
- struct ioc4_uartregs __iomem *uart;
-
- /* Idle the IOC4 serial interface */
- writel(IOC4_SSCR_RESET, &port->ip_serial_regs->sscr);
-
- /* Wait until any pending bus activity for this port has ceased */
- do
- sio_cr = readl(&port->ip_mem->sio_cr.raw);
- while (!(sio_cr & IOC4_SIO_CR_SIO_DIAG_IDLE));
-
- /* Finish reset sequence */
- writel(0, &port->ip_serial_regs->sscr);
-
- /* Once RESET is done, reload cached tx_prod and rx_cons values
- * and set rings to empty by making prod == cons
- */
- port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
- writel(port->ip_tx_prod, &port->ip_serial_regs->stpir);
- port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
- writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
-
- /* Disable interrupts for this 16550 */
- uart = port->ip_uart_regs;
- writeb(0, &uart->i4u_lcr);
- writeb(0, &uart->i4u_ier);
-
- /* Set the default baud */
- set_baud(port, port->ip_baud);
-
- /* Set line control to 8 bits no parity */
- writeb(UART_LCR_WLEN8 | 0, &uart->i4u_lcr);
- /* UART_LCR_STOP == 1 stop */
-
- /* Enable the FIFOs */
- writeb(UART_FCR_ENABLE_FIFO, &uart->i4u_fcr);
- /* then reset 16550 FIFOs */
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
- &uart->i4u_fcr);
-
- /* Clear modem control register */
- writeb(0, &uart->i4u_mcr);
-
- /* Clear deltas in modem status register */
- readb(&uart->i4u_msr);
-
- /* Only do this once per port pair */
- if (port->ip_hooks == &hooks_array[0]
- || port->ip_hooks == &hooks_array[2]) {
- unsigned long ring_pci_addr;
- uint32_t __iomem *sbbr_l;
- uint32_t __iomem *sbbr_h;
-
- if (port->ip_hooks == &hooks_array[0]) {
- sbbr_l = &port->ip_serial->sbbr01_l;
- sbbr_h = &port->ip_serial->sbbr01_h;
- } else {
- sbbr_l = &port->ip_serial->sbbr23_l;
- sbbr_h = &port->ip_serial->sbbr23_h;
- }
-
- ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
- DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n",
- __func__, ring_pci_addr));
-
- writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h);
- writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l);
- }
-
- /* Set the receive timeout value to 10 msec */
- writel(IOC4_SRTR_HZ / 100, &port->ip_serial_regs->srtr);
-
- /* Set rx threshold, enable DMA */
- /* Set high water mark at 3/4 of full ring */
- port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Disable and clear all serial related interrupt bits */
- write_ireg(port->ip_ioc4_soft, hooks->intr_clear,
- IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
- port->ip_ienb &= ~hooks->intr_clear;
- writel(hooks->intr_clear, &port->ip_mem->sio_ir.raw);
- return 0;
-}
-
-/**
- * handle_dma_error_intr - service any pending DMA error interrupts for the
- * given port - 2nd level called via sd_intr
- * @arg: handler arg
- * @other_ir: ioc4regs
- */
-static void handle_dma_error_intr(void *arg, uint32_t other_ir)
-{
- struct ioc4_port *port = (struct ioc4_port *)arg;
- struct hooks *hooks = port->ip_hooks;
- unsigned long flags;
-
- spin_lock_irqsave(&port->ip_lock, flags);
-
- /* ACK the interrupt */
- writel(hooks->intr_dma_error, &port->ip_mem->other_ir.raw);
-
- if (readl(&port->ip_mem->pci_err_addr_l.raw) & IOC4_PCI_ERR_ADDR_VLD) {
- printk(KERN_ERR
- "PCI error address is 0x%llx, "
- "master is serial port %c %s\n",
- (((uint64_t)readl(&port->ip_mem->pci_err_addr_h)
- << 32)
- | readl(&port->ip_mem->pci_err_addr_l.raw))
- & IOC4_PCI_ERR_ADDR_ADDR_MSK, '1' +
- ((char)(readl(&port->ip_mem->pci_err_addr_l.raw) &
- IOC4_PCI_ERR_ADDR_MST_NUM_MSK) >> 1),
- (readl(&port->ip_mem->pci_err_addr_l.raw)
- & IOC4_PCI_ERR_ADDR_MST_TYP_MSK)
- ? "RX" : "TX");
-
- if (readl(&port->ip_mem->pci_err_addr_l.raw)
- & IOC4_PCI_ERR_ADDR_MUL_ERR) {
- printk(KERN_ERR
- "Multiple errors occurred\n");
- }
- }
- spin_unlock_irqrestore(&port->ip_lock, flags);
-
- /* Re-enable DMA error interrupts */
- write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error, IOC4_W_IES,
- IOC4_OTHER_INTR_TYPE);
-}
-
-/**
- * intr_connect - interrupt connect function
- * @soft: soft struct for this card
- * @type: interrupt type
- * @intrbits: bit pattern to set
- * @intr: handler function
- * @info: handler arg
- */
-static void
-intr_connect(struct ioc4_soft *soft, int type,
- uint32_t intrbits, ioc4_intr_func_f * intr, void *info)
-{
- int i;
- struct ioc4_intr_info *intr_ptr;
-
- BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
- || (type == IOC4_OTHER_INTR_TYPE)));
-
- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
- BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
-
- /* Save off the lower level interrupt handler */
- intr_ptr = &soft->is_intr_type[type].is_intr_info[i];
- intr_ptr->sd_bits = intrbits;
- intr_ptr->sd_intr = intr;
- intr_ptr->sd_info = info;
-}
-
-/**
- * ioc4_intr - Top level IOC4 interrupt handler.
- * @irq: irq value
- * @arg: handler arg
- */
-
-static irqreturn_t ioc4_intr(int irq, void *arg)
-{
- struct ioc4_soft *soft;
- uint32_t this_ir, this_mir;
- int xx, num_intrs = 0;
- int intr_type;
- int handled = 0;
- struct ioc4_intr_info *intr_info;
-
- soft = arg;
- for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
- num_intrs = (int)atomic_read(
- &soft->is_intr_type[intr_type].is_num_intrs);
-
- this_mir = this_ir = pending_intrs(soft, intr_type);
-
- /* Farm out the interrupt to the various drivers depending on
- * which interrupt bits are set.
- */
- for (xx = 0; xx < num_intrs; xx++) {
- intr_info = &soft->is_intr_type[intr_type].is_intr_info[xx];
- this_mir = this_ir & intr_info->sd_bits;
- if (this_mir) {
- /* Disable owned interrupts, call handler */
- handled++;
- write_ireg(soft, intr_info->sd_bits, IOC4_W_IEC,
- intr_type);
- intr_info->sd_intr(intr_info->sd_info, this_mir);
- this_ir &= ~this_mir;
- }
- }
- }
-#ifdef DEBUG_INTERRUPTS
- {
- struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
- unsigned long flag;
-
- spin_lock_irqsave(&soft->is_ir_lock, flag);
- printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x "
- "other_ir 0x%x other_ies 0x%x mask 0x%x\n",
- __func__, __LINE__,
- (void *)mem, readl(&mem->sio_ir.raw),
- readl(&mem->sio_ies.raw),
- readl(&mem->other_ir.raw),
- readl(&mem->other_ies.raw),
- IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
- spin_unlock_irqrestore(&soft->is_ir_lock, flag);
- }
-#endif
- return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/**
- * ioc4_attach_local - Device initialization.
- * Called at *_attach() time for each
- * IOC4 with serial ports in the system.
- * @idd: Master module data for this IOC4
- */
-static inline int ioc4_attach_local(struct ioc4_driver_data *idd)
-{
- struct ioc4_port *port;
- struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS];
- int port_number;
- uint16_t ioc4_revid_min = 62;
- uint16_t ioc4_revid;
- struct pci_dev *pdev = idd->idd_pdev;
- struct ioc4_control* control = idd->idd_serial_data;
- struct ioc4_soft *soft = control->ic_soft;
- void __iomem *ioc4_misc = idd->idd_misc_regs;
- void __iomem *ioc4_serial = soft->is_ioc4_serial_addr;
-
- /* IOC4 firmware must be at least rev 62 */
- pci_read_config_word(pdev, PCI_COMMAND_SPECIAL, &ioc4_revid);
-
- printk(KERN_INFO "IOC4 firmware revision %d\n", ioc4_revid);
- if (ioc4_revid < ioc4_revid_min) {
- printk(KERN_WARNING
- "IOC4 serial not supported on firmware rev %d, "
- "please upgrade to rev %d or higher\n",
- ioc4_revid, ioc4_revid_min);
- return -EPERM;
- }
- BUG_ON(ioc4_misc == NULL);
- BUG_ON(ioc4_serial == NULL);
-
- /* Create port structures for each port */
- for (port_number = 0; port_number < IOC4_NUM_SERIAL_PORTS;
- port_number++) {
- port = kzalloc(sizeof(struct ioc4_port), GFP_KERNEL);
- if (!port) {
- printk(KERN_WARNING
- "IOC4 serial memory not available for port\n");
- goto free;
- }
- spin_lock_init(&port->ip_lock);
-
- /* we need to remember the previous ones, to point back to
- * them farther down - setting up the ring buffers.
- */
- ports[port_number] = port;
-
- /* Allocate buffers and jumpstart the hardware. */
- control->ic_port[port_number].icp_port = port;
- port->ip_ioc4_soft = soft;
- port->ip_pdev = pdev;
- port->ip_ienb = 0;
- /* Use baud rate calculations based on detected PCI
- * bus speed. Simply test whether the PCI clock is
- * running closer to 66MHz or 33MHz.
- */
- if (idd->count_period/IOC4_EXTINT_COUNT_DIVISOR < 20) {
- port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_66;
- } else {
- port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_33;
- }
- port->ip_baud = 9600;
- port->ip_control = control;
- port->ip_mem = ioc4_misc;
- port->ip_serial = ioc4_serial;
-
- /* point to the right hook */
- port->ip_hooks = &hooks_array[port_number];
-
- /* Get direct hooks to the serial regs and uart regs
- * for this port
- */
- switch (port_number) {
- case 0:
- port->ip_serial_regs = &(port->ip_serial->port_0);
- port->ip_uart_regs = &(port->ip_serial->uart_0);
- break;
- case 1:
- port->ip_serial_regs = &(port->ip_serial->port_1);
- port->ip_uart_regs = &(port->ip_serial->uart_1);
- break;
- case 2:
- port->ip_serial_regs = &(port->ip_serial->port_2);
- port->ip_uart_regs = &(port->ip_serial->uart_2);
- break;
- default:
- case 3:
- port->ip_serial_regs = &(port->ip_serial->port_3);
- port->ip_uart_regs = &(port->ip_serial->uart_3);
- break;
- }
-
- /* ring buffers are 1 to a pair of ports */
- if (port_number && (port_number & 1)) {
- /* odd use the evens buffer */
- port->ip_dma_ringbuf =
- ports[port_number - 1]->ip_dma_ringbuf;
- port->ip_cpu_ringbuf =
- ports[port_number - 1]->ip_cpu_ringbuf;
- port->ip_inring = RING(port, RX_1_OR_3);
- port->ip_outring = RING(port, TX_1_OR_3);
-
- } else {
- if (port->ip_dma_ringbuf == 0) {
- port->ip_cpu_ringbuf = pci_alloc_consistent
- (pdev, TOTAL_RING_BUF_SIZE,
- &port->ip_dma_ringbuf);
-
- }
- BUG_ON(!((((int64_t)port->ip_dma_ringbuf) &
- (TOTAL_RING_BUF_SIZE - 1)) == 0));
- DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p "
- "ip_dma_ringbuf 0x%p\n",
- __func__,
- (void *)port->ip_cpu_ringbuf,
- (void *)port->ip_dma_ringbuf));
- port->ip_inring = RING(port, RX_0_OR_2);
- port->ip_outring = RING(port, TX_0_OR_2);
- }
- DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p",
- __func__,
- port_number, (void *)port, (void *)control));
- DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
- (void *)port->ip_serial_regs,
- (void *)port->ip_uart_regs));
-
- /* Initialize the hardware for IOC4 */
- port_init(port);
-
- DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p "
- "outring 0x%p\n",
- __func__,
- port_number, (void *)port,
- (void *)port->ip_inring,
- (void *)port->ip_outring));
-
- /* Attach interrupt handlers */
- intr_connect(soft, IOC4_SIO_INTR_TYPE,
- GET_SIO_IR(port_number),
- handle_intr, port);
-
- intr_connect(soft, IOC4_OTHER_INTR_TYPE,
- GET_OTHER_IR(port_number),
- handle_dma_error_intr, port);
- }
- return 0;
-
-free:
- while (port_number)
- kfree(ports[--port_number]);
- return -ENOMEM;
-}
-
-/**
- * enable_intrs - enable interrupts
- * @port: port to enable
- * @mask: mask to use
- */
-static void enable_intrs(struct ioc4_port *port, uint32_t mask)
-{
- struct hooks *hooks = port->ip_hooks;
-
- if ((port->ip_ienb & mask) != mask) {
- write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IES,
- IOC4_SIO_INTR_TYPE);
- port->ip_ienb |= mask;
- }
-
- if (port->ip_ienb)
- write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
- IOC4_W_IES, IOC4_OTHER_INTR_TYPE);
-}
-
-/**
- * local_open - local open a port
- * @port: port to open
- */
-static inline int local_open(struct ioc4_port *port)
-{
- int spiniter = 0;
-
- port->ip_flags = PORT_ACTIVE;
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
- writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while((readl(&port->ip_serial_regs-> sscr)
- & IOC4_SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER) {
- port->ip_flags = PORT_INACTIVE;
- return -1;
- }
- }
- }
-
- /* Reset the input fifo. If the uart received chars while the port
- * was closed and DMA is not enabled, the uart may have a bunch of
- * chars hanging around in its rx fifo which will not be discarded
- * by rclr in the upper layer. We must get rid of them here.
- */
- writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
- &port->ip_uart_regs->i4u_fcr);
-
- writeb(UART_LCR_WLEN8, &port->ip_uart_regs->i4u_lcr);
- /* UART_LCR_STOP == 1 stop */
-
- /* Re-enable DMA, set default threshold to intr whenever there is
- * data available.
- */
- port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
- port->ip_sscr |= 1; /* default threshold */
-
- /* Plug in the new sscr. This implicitly clears the DMA_PAUSE
- * flag if it was set above
- */
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- port->ip_tx_lowat = 1;
- return 0;
-}
-
-/**
- * set_rx_timeout - Set rx timeout and threshold values.
- * @port: port to use
- * @timeout: timeout value in ticks
- */
-static inline int set_rx_timeout(struct ioc4_port *port, int timeout)
-{
- int threshold;
-
- port->ip_rx_timeout = timeout;
-
- /* Timeout is in ticks. Let's figure out how many chars we
- * can receive at the current baud rate in that interval
- * and set the rx threshold to that amount. There are 4 chars
- * per ring entry, so we'll divide the number of chars that will
- * arrive in timeout by 4.
- * So .... timeout * baud / 10 / HZ / 4, with HZ = 100.
- */
- threshold = timeout * port->ip_baud / 4000;
- if (threshold == 0)
- threshold = 1; /* otherwise we'll intr all the time! */
-
- if ((unsigned)threshold > (unsigned)IOC4_SSCR_RX_THRESHOLD)
- return 1;
-
- port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
- port->ip_sscr |= threshold;
-
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Now set the rx timeout to the given value
- * again timeout * IOC4_SRTR_HZ / HZ
- */
- timeout = timeout * IOC4_SRTR_HZ / 100;
- if (timeout > IOC4_SRTR_CNT)
- timeout = IOC4_SRTR_CNT;
-
- writel(timeout, &port->ip_serial_regs->srtr);
- return 0;
-}
-
-/**
- * config_port - config the hardware
- * @port: port to config
- * @baud: baud rate for the port
- * @byte_size: data size
- * @stop_bits: number of stop bits
- * @parenb: parity enable ?
- * @parodd: odd parity ?
- */
-static inline int
-config_port(struct ioc4_port *port,
- int baud, int byte_size, int stop_bits, int parenb, int parodd)
-{
- char lcr, sizebits;
- int spiniter = 0;
-
- DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n",
- __func__, baud, byte_size, stop_bits, parenb, parodd));
-
- if (set_baud(port, baud))
- return 1;
-
- switch (byte_size) {
- case 5:
- sizebits = UART_LCR_WLEN5;
- break;
- case 6:
- sizebits = UART_LCR_WLEN6;
- break;
- case 7:
- sizebits = UART_LCR_WLEN7;
- break;
- case 8:
- sizebits = UART_LCR_WLEN8;
- break;
- default:
- return 1;
- }
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
- writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while((readl(&port->ip_serial_regs->sscr)
- & IOC4_SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER)
- return -1;
- }
- }
-
- /* Clear relevant fields in lcr */
- lcr = readb(&port->ip_uart_regs->i4u_lcr);
- lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR |
- UART_LCR_PARITY | LCR_MASK_STOP_BITS);
-
- /* Set byte size in lcr */
- lcr |= sizebits;
-
- /* Set parity */
- if (parenb) {
- lcr |= UART_LCR_PARITY;
- if (!parodd)
- lcr |= UART_LCR_EPAR;
- }
-
- /* Set stop bits */
- if (stop_bits)
- lcr |= UART_LCR_STOP /* 2 stop bits */ ;
-
- writeb(lcr, &port->ip_uart_regs->i4u_lcr);
-
- /* Re-enable the DMA interface if necessary */
- if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- port->ip_baud = baud;
-
- /* When we get within this number of ring entries of filling the
- * entire ring on tx, place an EXPLICIT intr to generate a lowat
- * notification when output has drained.
- */
- port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
- if (port->ip_tx_lowat == 0)
- port->ip_tx_lowat = 1;
-
- set_rx_timeout(port, 2);
-
- return 0;
-}
-
-/**
- * do_write - Write bytes to the port. Returns the number of bytes
- * actually written. Called from transmit_chars
- * @port: port to use
- * @buf: the stuff to write
- * @len: how many bytes in 'buf'
- */
-static inline int do_write(struct ioc4_port *port, char *buf, int len)
-{
- int prod_ptr, cons_ptr, total = 0;
- struct ring *outring;
- struct ring_entry *entry;
- struct hooks *hooks = port->ip_hooks;
-
- BUG_ON(!(len >= 0));
-
- prod_ptr = port->ip_tx_prod;
- cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
- outring = port->ip_outring;
-
- /* Maintain a 1-entry red-zone. The ring buffer is full when
- * (cons - prod) % ring_size is 1. Rather than do this subtraction
- * in the body of the loop, I'll do it now.
- */
- cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK;
-
- /* Stuff the bytes into the output */
- while ((prod_ptr != cons_ptr) && (len > 0)) {
- int xx;
-
- /* Get 4 bytes (one ring entry) at a time */
- entry = (struct ring_entry *)((caddr_t) outring + prod_ptr);
-
- /* Invalidate all entries */
- entry->ring_allsc = 0;
-
- /* Copy in some bytes */
- for (xx = 0; (xx < 4) && (len > 0); xx++) {
- entry->ring_data[xx] = *buf++;
- entry->ring_sc[xx] = IOC4_TXCB_VALID;
- len--;
- total++;
- }
-
- /* If we are within some small threshold of filling up the
- * entire ring buffer, we must place an EXPLICIT intr here
- * to generate a lowat interrupt in case we subsequently
- * really do fill up the ring and the caller goes to sleep.
- * No need to place more than one though.
- */
- if (!(port->ip_flags & LOWAT_WRITTEN) &&
- ((cons_ptr - prod_ptr) & PROD_CONS_MASK)
- <= port->ip_tx_lowat
- * (int)sizeof(struct ring_entry)) {
- port->ip_flags |= LOWAT_WRITTEN;
- entry->ring_sc[0] |= IOC4_TXCB_INT_WHEN_DONE;
- }
-
- /* Go on to next entry */
- prod_ptr += sizeof(struct ring_entry);
- prod_ptr &= PROD_CONS_MASK;
- }
-
- /* If we sent something, start DMA if necessary */
- if (total > 0 && !(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
- port->ip_sscr |= IOC4_SSCR_DMA_EN;
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
-
- /* Store the new producer pointer. If tx is disabled, we stuff the
- * data into the ring buffer, but we don't actually start tx.
- */
- if (!uart_tx_stopped(port->ip_port)) {
- writel(prod_ptr, &port->ip_serial_regs->stpir);
-
- /* If we are now transmitting, enable tx_mt interrupt so we
- * can disable DMA if necessary when the tx finishes.
- */
- if (total > 0)
- enable_intrs(port, hooks->intr_tx_mt);
- }
- port->ip_tx_prod = prod_ptr;
- return total;
-}
-
-/**
- * disable_intrs - disable interrupts
- * @port: port to enable
- * @mask: mask to use
- */
-static void disable_intrs(struct ioc4_port *port, uint32_t mask)
-{
- struct hooks *hooks = port->ip_hooks;
-
- if (port->ip_ienb & mask) {
- write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IEC,
- IOC4_SIO_INTR_TYPE);
- port->ip_ienb &= ~mask;
- }
-
- if (!port->ip_ienb)
- write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
- IOC4_W_IEC, IOC4_OTHER_INTR_TYPE);
-}
-
-/**
- * set_notification - Modify event notification
- * @port: port to use
- * @mask: events mask
- * @set_on: set ?
- */
-static int set_notification(struct ioc4_port *port, int mask, int set_on)
-{
- struct hooks *hooks = port->ip_hooks;
- uint32_t intrbits, sscrbits;
-
- BUG_ON(!mask);
-
- intrbits = sscrbits = 0;
-
- if (mask & N_DATA_READY)
- intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high);
- if (mask & N_OUTPUT_LOWAT)
- intrbits |= hooks->intr_tx_explicit;
- if (mask & N_DDCD) {
- intrbits |= hooks->intr_delta_dcd;
- sscrbits |= IOC4_SSCR_RX_RING_DCD;
- }
- if (mask & N_DCTS)
- intrbits |= hooks->intr_delta_cts;
-
- if (set_on) {
- enable_intrs(port, intrbits);
- port->ip_notify |= mask;
- port->ip_sscr |= sscrbits;
- } else {
- disable_intrs(port, intrbits);
- port->ip_notify &= ~mask;
- port->ip_sscr &= ~sscrbits;
- }
-
- /* We require DMA if either DATA_READY or DDCD notification is
- * currently requested. If neither of these is requested and
- * there is currently no tx in progress, DMA may be disabled.
- */
- if (port->ip_notify & (N_DATA_READY | N_DDCD))
- port->ip_sscr |= IOC4_SSCR_DMA_EN;
- else if (!(port->ip_ienb & hooks->intr_tx_mt))
- port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
-
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- return 0;
-}
-
-/**
- * set_mcr - set the master control reg
- * @the_port: port to use
- * @mask1: mcr mask
- * @mask2: shadow mask
- */
-static inline int set_mcr(struct uart_port *the_port,
- int mask1, int mask2)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- uint32_t shadow;
- int spiniter = 0;
- char mcr;
-
- if (!port)
- return -1;
-
- /* Pause the DMA interface if necessary */
- if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
- writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
- &port->ip_serial_regs->sscr);
- while ((readl(&port->ip_serial_regs->sscr)
- & IOC4_SSCR_PAUSE_STATE) == 0) {
- spiniter++;
- if (spiniter > MAXITER)
- return -1;
- }
- }
- shadow = readl(&port->ip_serial_regs->shadow);
- mcr = (shadow & 0xff000000) >> 24;
-
- /* Set new value */
- mcr |= mask1;
- shadow |= mask2;
-
- writeb(mcr, &port->ip_uart_regs->i4u_mcr);
- writel(shadow, &port->ip_serial_regs->shadow);
-
- /* Re-enable the DMA interface if necessary */
- if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- return 0;
-}
-
-/**
- * ioc4_set_proto - set the protocol for the port
- * @port: port to use
- * @proto: protocol to use
- */
-static int ioc4_set_proto(struct ioc4_port *port, int proto)
-{
- struct hooks *hooks = port->ip_hooks;
-
- switch (proto) {
- case PROTO_RS232:
- /* Clear the appropriate GIO pin */
- writel(0, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
- break;
-
- case PROTO_RS422:
- /* Set the appropriate GIO pin */
- writel(1, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
- break;
-
- default:
- return 1;
- }
- return 0;
-}
-
-/**
- * transmit_chars - upper level write, called with ip_lock
- * @the_port: port to write
- */
-static void transmit_chars(struct uart_port *the_port)
-{
- int xmit_count, tail, head;
- int result;
- char *start;
- struct tty_struct *tty;
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- struct uart_state *state;
-
- if (!the_port)
- return;
- if (!port)
- return;
-
- state = the_port->state;
- tty = state->port.tty;
-
- if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) {
- /* Nothing to do or hw stopped */
- set_notification(port, N_ALL_OUTPUT, 0);
- return;
- }
-
- head = state->xmit.head;
- tail = state->xmit.tail;
- start = (char *)&state->xmit.buf[tail];
-
- /* write out all the data or until the end of the buffer */
- xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail);
- if (xmit_count > 0) {
- result = do_write(port, start, xmit_count);
- if (result > 0) {
- /* booking */
- xmit_count -= result;
- the_port->icount.tx += result;
- /* advance the pointers */
- tail += result;
- tail &= UART_XMIT_SIZE - 1;
- state->xmit.tail = tail;
- start = (char *)&state->xmit.buf[tail];
- }
- }
- if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(the_port);
-
- if (uart_circ_empty(&state->xmit)) {
- set_notification(port, N_OUTPUT_LOWAT, 0);
- } else {
- set_notification(port, N_OUTPUT_LOWAT, 1);
- }
-}
-
-/**
- * ioc4_change_speed - change the speed of the port
- * @the_port: port to change
- * @new_termios: new termios settings
- * @old_termios: old termios settings
- */
-static void
-ioc4_change_speed(struct uart_port *the_port,
- struct ktermios *new_termios, struct ktermios *old_termios)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- int baud, bits;
- unsigned cflag, iflag;
- int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
- struct uart_state *state = the_port->state;
-
- cflag = new_termios->c_cflag;
- iflag = new_termios->c_iflag;
-
- switch (cflag & CSIZE) {
- case CS5:
- new_data = 5;
- bits = 7;
- break;
- case CS6:
- new_data = 6;
- bits = 8;
- break;
- case CS7:
- new_data = 7;
- bits = 9;
- break;
- case CS8:
- new_data = 8;
- bits = 10;
- break;
- default:
- /* cuz we always need a default ... */
- new_data = 5;
- bits = 7;
- break;
- }
- if (cflag & CSTOPB) {
- bits++;
- new_stop = 1;
- }
- if (cflag & PARENB) {
- bits++;
- new_parity_enable = 1;
- if (cflag & PARODD)
- new_parity = 1;
- }
- baud = uart_get_baud_rate(the_port, new_termios, old_termios,
- MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
- DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud));
-
- /* default is 9600 */
- if (!baud)
- baud = 9600;
-
- if (!the_port->fifosize)
- the_port->fifosize = IOC4_FIFO_CHARS;
- the_port->timeout = ((the_port->fifosize * HZ * bits) / (baud / 10));
- the_port->timeout += HZ / 50; /* Add .02 seconds of slop */
-
- the_port->ignore_status_mask = N_ALL_INPUT;
-
- state->port.low_latency = 1;
-
- if (iflag & IGNPAR)
- the_port->ignore_status_mask &= ~(N_PARITY_ERROR
- | N_FRAMING_ERROR);
- if (iflag & IGNBRK) {
- the_port->ignore_status_mask &= ~N_BREAK;
- if (iflag & IGNPAR)
- the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
- }
- if (!(cflag & CREAD)) {
- /* ignore everything */
- the_port->ignore_status_mask &= ~N_DATA_READY;
- }
-
- if (cflag & CRTSCTS) {
- port->ip_sscr |= IOC4_SSCR_HFC_EN;
- }
- else {
- port->ip_sscr &= ~IOC4_SSCR_HFC_EN;
- }
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
-
- /* Set the configuration and proper notification call */
- DPRINT_CONFIG(("%s : port 0x%p cflag 0%o "
- "config_port(baud %d data %d stop %d p enable %d parity %d),"
- " notification 0x%x\n",
- __func__, (void *)port, cflag, baud, new_data, new_stop,
- new_parity_enable, new_parity, the_port->ignore_status_mask));
-
- if ((config_port(port, baud, /* baud */
- new_data, /* byte size */
- new_stop, /* stop bits */
- new_parity_enable, /* set parity */
- new_parity)) >= 0) { /* parity 1==odd */
- set_notification(port, the_port->ignore_status_mask, 1);
- }
-}
-
-/**
- * ic4_startup_local - Start up the serial port - returns >= 0 if no errors
- * @the_port: Port to operate on
- */
-static inline int ic4_startup_local(struct uart_port *the_port)
-{
- struct ioc4_port *port;
- struct uart_state *state;
-
- if (!the_port)
- return -1;
-
- port = get_ioc4_port(the_port, 0);
- if (!port)
- return -1;
-
- state = the_port->state;
-
- local_open(port);
-
- /* set the protocol - mapbase has the port type */
- ioc4_set_proto(port, the_port->mapbase);
-
- /* set the speed of the serial port */
- ioc4_change_speed(the_port, &state->port.tty->termios,
- (struct ktermios *)0);
-
- return 0;
-}
-
-/*
- * ioc4_cb_output_lowat - called when the output low water mark is hit
- * @the_port: port to output
- */
-static void ioc4_cb_output_lowat(struct uart_port *the_port)
-{
- unsigned long pflags;
-
- /* ip_lock is set on the call here */
- if (the_port) {
- spin_lock_irqsave(&the_port->lock, pflags);
- transmit_chars(the_port);
- spin_unlock_irqrestore(&the_port->lock, pflags);
- }
-}
-
-/**
- * handle_intr - service any interrupts for the given port - 2nd level
- * called via sd_intr
- * @arg: handler arg
- * @sio_ir: ioc4regs
- */
-static void handle_intr(void *arg, uint32_t sio_ir)
-{
- struct ioc4_port *port = (struct ioc4_port *)arg;
- struct hooks *hooks = port->ip_hooks;
- unsigned int rx_high_rd_aborted = 0;
- unsigned long flags;
- struct uart_port *the_port;
- int loop_counter;
-
- /* Possible race condition here: The tx_mt interrupt bit may be
- * cleared without the intervention of the interrupt handler,
- * e.g. by a write. If the top level interrupt handler reads a
- * tx_mt, then some other processor does a write, starting up
- * output, then we come in here, see the tx_mt and stop DMA, the
- * output started by the other processor will hang. Thus we can
- * only rely on tx_mt being legitimate if it is read while the
- * port lock is held. Therefore this bit must be ignored in the
- * passed in interrupt mask which was read by the top level
- * interrupt handler since the port lock was not held at the time
- * it was read. We can only rely on this bit being accurate if it
- * is read while the port lock is held. So we'll clear it for now,
- * and reload it later once we have the port lock.
- */
- sio_ir &= ~(hooks->intr_tx_mt);
-
- spin_lock_irqsave(&port->ip_lock, flags);
-
- loop_counter = MAXITER; /* to avoid hangs */
-
- do {
- uint32_t shadow;
-
- if ( loop_counter-- <= 0 ) {
- printk(KERN_WARNING "IOC4 serial: "
- "possible hang condition/"
- "port stuck on interrupt.\n");
- break;
- }
-
- /* Handle a DCD change */
- if (sio_ir & hooks->intr_delta_dcd) {
- /* ACK the interrupt */
- writel(hooks->intr_delta_dcd,
- &port->ip_mem->sio_ir.raw);
-
- shadow = readl(&port->ip_serial_regs->shadow);
-
- if ((port->ip_notify & N_DDCD)
- && (shadow & IOC4_SHADOW_DCD)
- && (port->ip_port)) {
- the_port = port->ip_port;
- the_port->icount.dcd = 1;
- wake_up_interruptible
- (&the_port->state->port.delta_msr_wait);
- } else if ((port->ip_notify & N_DDCD)
- && !(shadow & IOC4_SHADOW_DCD)) {
- /* Flag delta DCD/no DCD */
- port->ip_flags |= DCD_ON;
- }
- }
-
- /* Handle a CTS change */
- if (sio_ir & hooks->intr_delta_cts) {
- /* ACK the interrupt */
- writel(hooks->intr_delta_cts,
- &port->ip_mem->sio_ir.raw);
-
- shadow = readl(&port->ip_serial_regs->shadow);
-
- if ((port->ip_notify & N_DCTS)
- && (port->ip_port)) {
- the_port = port->ip_port;
- the_port->icount.cts =
- (shadow & IOC4_SHADOW_CTS) ? 1 : 0;
- wake_up_interruptible
- (&the_port->state->port.delta_msr_wait);
- }
- }
-
- /* rx timeout interrupt. Must be some data available. Put this
- * before the check for rx_high since servicing this condition
- * may cause that condition to clear.
- */
- if (sio_ir & hooks->intr_rx_timer) {
- /* ACK the interrupt */
- writel(hooks->intr_rx_timer,
- &port->ip_mem->sio_ir.raw);
-
- if ((port->ip_notify & N_DATA_READY)
- && (port->ip_port)) {
- /* ip_lock is set on call here */
- receive_chars(port->ip_port);
- }
- }
-
- /* rx high interrupt. Must be after rx_timer. */
- else if (sio_ir & hooks->intr_rx_high) {
- /* Data available, notify upper layer */
- if ((port->ip_notify & N_DATA_READY)
- && port->ip_port) {
- /* ip_lock is set on call here */
- receive_chars(port->ip_port);
- }
-
- /* We can't ACK this interrupt. If receive_chars didn't
- * cause the condition to clear, we'll have to disable
- * the interrupt until the data is drained.
- * If the read was aborted, don't disable the interrupt
- * as this may cause us to hang indefinitely. An
- * aborted read generally means that this interrupt
- * hasn't been delivered to the cpu yet anyway, even
- * though we see it as asserted when we read the sio_ir.
- */
- if ((sio_ir = PENDING(port)) & hooks->intr_rx_high) {
- if ((port->ip_flags & READ_ABORTED) == 0) {
- port->ip_ienb &= ~hooks->intr_rx_high;
- port->ip_flags |= INPUT_HIGH;
- } else {
- rx_high_rd_aborted++;
- }
- }
- }
-
- /* We got a low water interrupt: notify upper layer to
- * send more data. Must come before tx_mt since servicing
- * this condition may cause that condition to clear.
- */
- if (sio_ir & hooks->intr_tx_explicit) {
- port->ip_flags &= ~LOWAT_WRITTEN;
-
- /* ACK the interrupt */
- writel(hooks->intr_tx_explicit,
- &port->ip_mem->sio_ir.raw);
-
- if (port->ip_notify & N_OUTPUT_LOWAT)
- ioc4_cb_output_lowat(port->ip_port);
- }
-
- /* Handle tx_mt. Must come after tx_explicit. */
- else if (sio_ir & hooks->intr_tx_mt) {
- /* If we are expecting a lowat notification
- * and we get to this point it probably means that for
- * some reason the tx_explicit didn't work as expected
- * (that can legitimately happen if the output buffer is
- * filled up in just the right way).
- * So send the notification now.
- */
- if (port->ip_notify & N_OUTPUT_LOWAT) {
- ioc4_cb_output_lowat(port->ip_port);
-
- /* We need to reload the sio_ir since the lowat
- * call may have caused another write to occur,
- * clearing the tx_mt condition.
- */
- sio_ir = PENDING(port);
- }
-
- /* If the tx_mt condition still persists even after the
- * lowat call, we've got some work to do.
- */
- if (sio_ir & hooks->intr_tx_mt) {
-
- /* If we are not currently expecting DMA input,
- * and the transmitter has just gone idle,
- * there is no longer any reason for DMA, so
- * disable it.
- */
- if (!(port->ip_notify
- & (N_DATA_READY | N_DDCD))) {
- BUG_ON(!(port->ip_sscr
- & IOC4_SSCR_DMA_EN));
- port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
- writel(port->ip_sscr,
- &port->ip_serial_regs->sscr);
- }
-
- /* Prevent infinite tx_mt interrupt */
- port->ip_ienb &= ~hooks->intr_tx_mt;
- }
- }
- sio_ir = PENDING(port);
-
- /* if the read was aborted and only hooks->intr_rx_high,
- * clear hooks->intr_rx_high, so we do not loop forever.
- */
-
- if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) {
- sio_ir &= ~hooks->intr_rx_high;
- }
- } while (sio_ir & hooks->intr_all);
-
- spin_unlock_irqrestore(&port->ip_lock, flags);
-
- /* Re-enable interrupts before returning from interrupt handler.
- * Getting interrupted here is okay. It'll just v() our semaphore, and
- * we'll come through the loop again.
- */
-
- write_ireg(port->ip_ioc4_soft, port->ip_ienb, IOC4_W_IES,
- IOC4_SIO_INTR_TYPE);
-}
-
-/*
- * ioc4_cb_post_ncs - called for some basic errors
- * @port: port to use
- * @ncs: event
- */
-static void ioc4_cb_post_ncs(struct uart_port *the_port, int ncs)
-{
- struct uart_icount *icount;
-
- icount = &the_port->icount;
-
- if (ncs & NCS_BREAK)
- icount->brk++;
- if (ncs & NCS_FRAMING)
- icount->frame++;
- if (ncs & NCS_OVERRUN)
- icount->overrun++;
- if (ncs & NCS_PARITY)
- icount->parity++;
-}
-
-/**
- * do_read - Read in bytes from the port. Return the number of bytes
- * actually read.
- * @the_port: port to use
- * @buf: place to put the stuff we read
- * @len: how big 'buf' is
- */
-
-static inline int do_read(struct uart_port *the_port, unsigned char *buf,
- int len)
-{
- int prod_ptr, cons_ptr, total;
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- struct ring *inring;
- struct ring_entry *entry;
- struct hooks *hooks;
- int byte_num;
- char *sc;
- int loop_counter;
-
- BUG_ON(!(len >= 0));
- BUG_ON(!port);
- hooks = port->ip_hooks;
-
- /* There is a nasty timing issue in the IOC4. When the rx_timer
- * expires or the rx_high condition arises, we take an interrupt.
- * At some point while servicing the interrupt, we read bytes from
- * the ring buffer and re-arm the rx_timer. However the rx_timer is
- * not started until the first byte is received *after* it is armed,
- * and any bytes pending in the rx construction buffers are not drained
- * to memory until either there are 4 bytes available or the rx_timer
- * expires. This leads to a potential situation where data is left
- * in the construction buffers forever - 1 to 3 bytes were received
- * after the interrupt was generated but before the rx_timer was
- * re-armed. At that point as long as no subsequent bytes are received
- * the timer will never be started and the bytes will remain in the
- * construction buffer forever. The solution is to execute a DRAIN
- * command after rearming the timer. This way any bytes received before
- * the DRAIN will be drained to memory, and any bytes received after
- * the DRAIN will start the TIMER and be drained when it expires.
- * Luckily, this only needs to be done when the DMA buffer is empty
- * since there is no requirement that this function return all
- * available data as long as it returns some.
- */
- /* Re-arm the timer */
- writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
-
- prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
- cons_ptr = port->ip_rx_cons;
-
- if (prod_ptr == cons_ptr) {
- int reset_dma = 0;
-
- /* Input buffer appears empty, do a flush. */
-
- /* DMA must be enabled for this to work. */
- if (!(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
- port->ip_sscr |= IOC4_SSCR_DMA_EN;
- reset_dma = 1;
- }
-
- /* Potential race condition: we must reload the srpir after
- * issuing the drain command, otherwise we could think the rx
- * buffer is empty, then take a very long interrupt, and when
- * we come back it's full and we wait forever for the drain to
- * complete.
- */
- writel(port->ip_sscr | IOC4_SSCR_RX_DRAIN,
- &port->ip_serial_regs->sscr);
- prod_ptr = readl(&port->ip_serial_regs->srpir)
- & PROD_CONS_MASK;
-
- /* We must not wait for the DRAIN to complete unless there are
- * at least 8 bytes (2 ring entries) available to receive the
- * data otherwise the DRAIN will never complete and we'll
- * deadlock here.
- * In fact, to make things easier, I'll just ignore the flush if
- * there is any data at all now available.
- */
- if (prod_ptr == cons_ptr) {
- loop_counter = 0;
- while (readl(&port->ip_serial_regs->sscr) &
- IOC4_SSCR_RX_DRAIN) {
- loop_counter++;
- if (loop_counter > MAXITER)
- return -1;
- }
-
- /* SIGH. We have to reload the prod_ptr *again* since
- * the drain may have caused it to change
- */
- prod_ptr = readl(&port->ip_serial_regs->srpir)
- & PROD_CONS_MASK;
- }
- if (reset_dma) {
- port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
- writel(port->ip_sscr, &port->ip_serial_regs->sscr);
- }
- }
- inring = port->ip_inring;
- port->ip_flags &= ~READ_ABORTED;
-
- total = 0;
- loop_counter = 0xfffff; /* to avoid hangs */
-
- /* Grab bytes from the hardware */
- while ((prod_ptr != cons_ptr) && (len > 0)) {
- entry = (struct ring_entry *)((caddr_t)inring + cons_ptr);
-
- if ( loop_counter-- <= 0 ) {
- printk(KERN_WARNING "IOC4 serial: "
- "possible hang condition/"
- "port stuck on read.\n");
- break;
- }
-
- /* According to the producer pointer, this ring entry
- * must contain some data. But if the PIO happened faster
- * than the DMA, the data may not be available yet, so let's
- * wait until it arrives.
- */
- if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
- /* Indicate the read is aborted so we don't disable
- * the interrupt thinking that the consumer is
- * congested.
- */
- port->ip_flags |= READ_ABORTED;
- len = 0;
- break;
- }
-
- /* Load the bytes/status out of the ring entry */
- for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) {
- sc = &(entry->ring_sc[byte_num]);
-
- /* Check for change in modem state or overrun */
- if ((*sc & IOC4_RXSB_MODEM_VALID)
- && (port->ip_notify & N_DDCD)) {
- /* Notify upper layer if DCD dropped */
-
- if ((port->ip_flags & DCD_ON)
- && !(*sc & IOC4_RXSB_DCD)) {
-
- /* If we have already copied some data,
- * return it. We'll pick up the carrier
- * drop on the next pass. That way we
- * don't throw away the data that has
- * already been copied back to
- * the caller's buffer.
- */
- if (total > 0) {
- len = 0;
- break;
- }
- port->ip_flags &= ~DCD_ON;
-
- /* Turn off this notification so the
- * carrier drop protocol won't see it
- * again when it does a read.
- */
- *sc &= ~IOC4_RXSB_MODEM_VALID;
-
- /* To keep things consistent, we need
- * to update the consumer pointer so
- * the next reader won't come in and
- * try to read the same ring entries
- * again. This must be done here before
- * the dcd change.
- */
-
- if ((entry->ring_allsc & RING_ANY_VALID)
- == 0) {
- cons_ptr += (int)sizeof
- (struct ring_entry);
- cons_ptr &= PROD_CONS_MASK;
- }
- writel(cons_ptr,
- &port->ip_serial_regs->srcir);
- port->ip_rx_cons = cons_ptr;
-
- /* Notify upper layer of carrier drop */
- if ((port->ip_notify & N_DDCD)
- && port->ip_port) {
- the_port->icount.dcd = 0;
- wake_up_interruptible
- (&the_port->state->
- port.delta_msr_wait);
- }
-
- /* If we had any data to return, we
- * would have returned it above.
- */
- return 0;
- }
- }
- if (*sc & IOC4_RXSB_MODEM_VALID) {
- /* Notify that an input overrun occurred */
- if ((*sc & IOC4_RXSB_OVERRUN)
- && (port->ip_notify & N_OVERRUN_ERROR)) {
- ioc4_cb_post_ncs(the_port, NCS_OVERRUN);
- }
- /* Don't look at this byte again */
- *sc &= ~IOC4_RXSB_MODEM_VALID;
- }
-
- /* Check for valid data or RX errors */
- if ((*sc & IOC4_RXSB_DATA_VALID) &&
- ((*sc & (IOC4_RXSB_PAR_ERR
- | IOC4_RXSB_FRAME_ERR
- | IOC4_RXSB_BREAK))
- && (port->ip_notify & (N_PARITY_ERROR
- | N_FRAMING_ERROR
- | N_BREAK)))) {
- /* There is an error condition on the next byte.
- * If we have already transferred some bytes,
- * we'll stop here. Otherwise if this is the
- * first byte to be read, we'll just transfer
- * it alone after notifying the
- * upper layer of its status.
- */
- if (total > 0) {
- len = 0;
- break;
- } else {
- if ((*sc & IOC4_RXSB_PAR_ERR) &&
- (port->ip_notify & N_PARITY_ERROR)) {
- ioc4_cb_post_ncs(the_port,
- NCS_PARITY);
- }
- if ((*sc & IOC4_RXSB_FRAME_ERR) &&
- (port->ip_notify & N_FRAMING_ERROR)){
- ioc4_cb_post_ncs(the_port,
- NCS_FRAMING);
- }
- if ((*sc & IOC4_RXSB_BREAK)
- && (port->ip_notify & N_BREAK)) {
- ioc4_cb_post_ncs
- (the_port,
- NCS_BREAK);
- }
- len = 1;
- }
- }
- if (*sc & IOC4_RXSB_DATA_VALID) {
- *sc &= ~IOC4_RXSB_DATA_VALID;
- *buf = entry->ring_data[byte_num];
- buf++;
- len--;
- total++;
- }
- }
-
- /* If we used up this entry entirely, go on to the next one,
- * otherwise we must have run out of buffer space, so
- * leave the consumer pointer here for the next read in case
- * there are still unread bytes in this entry.
- */
- if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
- cons_ptr += (int)sizeof(struct ring_entry);
- cons_ptr &= PROD_CONS_MASK;
- }
- }
-
- /* Update consumer pointer and re-arm rx timer interrupt */
- writel(cons_ptr, &port->ip_serial_regs->srcir);
- port->ip_rx_cons = cons_ptr;
-
- /* If we have now dipped below the rx high water mark and we have
- * rx_high interrupt turned off, we can now turn it back on again.
- */
- if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr)
- & PROD_CONS_MASK) < ((port->ip_sscr &
- IOC4_SSCR_RX_THRESHOLD)
- << IOC4_PROD_CONS_PTR_OFF))) {
- port->ip_flags &= ~INPUT_HIGH;
- enable_intrs(port, hooks->intr_rx_high);
- }
- return total;
-}
-
-/**
- * receive_chars - upper level read. Called with ip_lock.
- * @the_port: port to read from
- */
-static void receive_chars(struct uart_port *the_port)
-{
- unsigned char ch[IOC4_MAX_CHARS];
- int read_count, request_count = IOC4_MAX_CHARS;
- struct uart_icount *icount;
- struct uart_state *state = the_port->state;
- unsigned long pflags;
-
- /* Make sure all the pointers are "good" ones */
- if (!state)
- return;
-
- spin_lock_irqsave(&the_port->lock, pflags);
-
- request_count = tty_buffer_request_room(&state->port, IOC4_MAX_CHARS);
-
- if (request_count > 0) {
- icount = &the_port->icount;
- read_count = do_read(the_port, ch, request_count);
- if (read_count > 0) {
- tty_insert_flip_string(&state->port, ch, read_count);
- icount->rx += read_count;
- }
- }
-
- spin_unlock_irqrestore(&the_port->lock, pflags);
-
- tty_flip_buffer_push(&state->port);
-}
-
-/**
- * ic4_type - What type of console are we?
- * @port: Port to operate with (we ignore since we only have one port)
- *
- */
-static const char *ic4_type(struct uart_port *the_port)
-{
- if (the_port->mapbase == PROTO_RS232)
- return "SGI IOC4 Serial [rs232]";
- else
- return "SGI IOC4 Serial [rs422]";
-}
-
-/**
- * ic4_tx_empty - Is the transmitter empty?
- * @port: Port to operate on
- *
- */
-static unsigned int ic4_tx_empty(struct uart_port *the_port)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- unsigned int ret = 0;
-
- if (port_is_active(port, the_port)) {
- if (readl(&port->ip_serial_regs->shadow) & IOC4_SHADOW_TEMT)
- ret = TIOCSER_TEMT;
- }
- return ret;
-}
-
-/**
- * ic4_stop_tx - stop the transmitter
- * @port: Port to operate on
- *
- */
-static void ic4_stop_tx(struct uart_port *the_port)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
-
- if (port_is_active(port, the_port))
- set_notification(port, N_OUTPUT_LOWAT, 0);
-}
-
-/**
- * null_void_function -
- * @port: Port to operate on
- *
- */
-static void null_void_function(struct uart_port *the_port)
-{
-}
-
-/**
- * ic4_shutdown - shut down the port - free irq and disable
- * @port: Port to shut down
- *
- */
-static void ic4_shutdown(struct uart_port *the_port)
-{
- unsigned long port_flags;
- struct ioc4_port *port;
- struct uart_state *state;
-
- port = get_ioc4_port(the_port, 0);
- if (!port)
- return;
-
- state = the_port->state;
- port->ip_port = NULL;
-
- wake_up_interruptible(&state->port.delta_msr_wait);
-
- if (state->port.tty)
- set_bit(TTY_IO_ERROR, &state->port.tty->flags);
-
- spin_lock_irqsave(&the_port->lock, port_flags);
- set_notification(port, N_ALL, 0);
- port->ip_flags = PORT_INACTIVE;
- spin_unlock_irqrestore(&the_port->lock, port_flags);
-}
-
-/**
- * ic4_set_mctrl - set control lines (dtr, rts, etc)
- * @port: Port to operate on
- * @mctrl: Lines to set/unset
- *
- */
-static void ic4_set_mctrl(struct uart_port *the_port, unsigned int mctrl)
-{
- unsigned char mcr = 0;
- struct ioc4_port *port;
-
- port = get_ioc4_port(the_port, 0);
- if (!port_is_active(port, the_port))
- return;
-
- if (mctrl & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (mctrl & TIOCM_DTR)
- mcr |= UART_MCR_DTR;
- if (mctrl & TIOCM_OUT1)
- mcr |= UART_MCR_OUT1;
- if (mctrl & TIOCM_OUT2)
- mcr |= UART_MCR_OUT2;
- if (mctrl & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
-
- set_mcr(the_port, mcr, IOC4_SHADOW_DTR);
-}
-
-/**
- * ic4_get_mctrl - get control line info
- * @port: port to operate on
- *
- */
-static unsigned int ic4_get_mctrl(struct uart_port *the_port)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
- uint32_t shadow;
- unsigned int ret = 0;
-
- if (!port_is_active(port, the_port))
- return 0;
-
- shadow = readl(&port->ip_serial_regs->shadow);
- if (shadow & IOC4_SHADOW_DCD)
- ret |= TIOCM_CAR;
- if (shadow & IOC4_SHADOW_DR)
- ret |= TIOCM_DSR;
- if (shadow & IOC4_SHADOW_CTS)
- ret |= TIOCM_CTS;
- return ret;
-}
-
-/**
- * ic4_start_tx - Start transmitter, flush any output
- * @port: Port to operate on
- *
- */
-static void ic4_start_tx(struct uart_port *the_port)
-{
- struct ioc4_port *port = get_ioc4_port(the_port, 0);
-
- if (port_is_active(port, the_port)) {
- set_notification(port, N_OUTPUT_LOWAT, 1);
- enable_intrs(port, port->ip_hooks->intr_tx_mt);
- }
-}
-
-/**
- * ic4_break_ctl - handle breaks
- * @port: Port to operate on
- * @break_state: Break state
- *
- */
-static void ic4_break_ctl(struct uart_port *the_port, int break_state)
-{
-}
-
-/**
- * ic4_startup - Start up the serial port
- * @port: Port to operate on
- *
- */
-static int ic4_startup(struct uart_port *the_port)
-{
- int retval;
- struct ioc4_port *port;
- struct ioc4_control *control;
- struct uart_state *state;
- unsigned long port_flags;
-
- if (!the_port)
- return -ENODEV;
- port = get_ioc4_port(the_port, 1);
- if (!port)
- return -ENODEV;
- state = the_port->state;
-
- control = port->ip_control;
- if (!control) {
- port->ip_port = NULL;
- return -ENODEV;
- }
-
- /* Start up the serial port */
- spin_lock_irqsave(&the_port->lock, port_flags);
- retval = ic4_startup_local(the_port);
- spin_unlock_irqrestore(&the_port->lock, port_flags);
- return retval;
-}
-
-/**
- * ic4_set_termios - set termios stuff
- * @port: port to operate on
- * @termios: New settings
- * @termios: Old
- *
- */
-static void
-ic4_set_termios(struct uart_port *the_port,
- struct ktermios *termios, struct ktermios *old_termios)
-{
- unsigned long port_flags;
-
- spin_lock_irqsave(&the_port->lock, port_flags);
- ioc4_change_speed(the_port, termios, old_termios);
- spin_unlock_irqrestore(&the_port->lock, port_flags);
-}
-
-/**
- * ic4_request_port - allocate resources for port - no op....
- * @port: port to operate on
- *
- */
-static int ic4_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/* Associate the uart functions above - given to serial core */
-
-static const struct uart_ops ioc4_ops = {
- .tx_empty = ic4_tx_empty,
- .set_mctrl = ic4_set_mctrl,
- .get_mctrl = ic4_get_mctrl,
- .stop_tx = ic4_stop_tx,
- .start_tx = ic4_start_tx,
- .stop_rx = null_void_function,
- .break_ctl = ic4_break_ctl,
- .startup = ic4_startup,
- .shutdown = ic4_shutdown,
- .set_termios = ic4_set_termios,
- .type = ic4_type,
- .release_port = null_void_function,
- .request_port = ic4_request_port,
-};
-
-/*
- * Boot-time initialization code
- */
-
-static struct uart_driver ioc4_uart_rs232 = {
- .owner = THIS_MODULE,
- .driver_name = "ioc4_serial_rs232",
- .dev_name = DEVICE_NAME_RS232,
- .major = DEVICE_MAJOR,
- .minor = DEVICE_MINOR_RS232,
- .nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
-};
-
-static struct uart_driver ioc4_uart_rs422 = {
- .owner = THIS_MODULE,
- .driver_name = "ioc4_serial_rs422",
- .dev_name = DEVICE_NAME_RS422,
- .major = DEVICE_MAJOR,
- .minor = DEVICE_MINOR_RS422,
- .nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
-};
-
-
-/**
- * ioc4_serial_remove_one - detach function
- *
- * @idd: IOC4 master module data for this IOC4
- */
-
-static int ioc4_serial_remove_one(struct ioc4_driver_data *idd)
-{
- int port_num, port_type;
- struct ioc4_control *control;
- struct uart_port *the_port;
- struct ioc4_port *port;
- struct ioc4_soft *soft;
-
- /* If serial driver did not attach, don't try to detach */
- control = idd->idd_serial_data;
- if (!control)
- return 0;
-
- for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
- for (port_type = UART_PORT_MIN;
- port_type < UART_PORT_COUNT;
- port_type++) {
- the_port = &control->ic_port[port_num].icp_uart_port
- [port_type];
- if (the_port) {
- switch (port_type) {
- case UART_PORT_RS422:
- uart_remove_one_port(&ioc4_uart_rs422,
- the_port);
- break;
- default:
- case UART_PORT_RS232:
- uart_remove_one_port(&ioc4_uart_rs232,
- the_port);
- break;
- }
- }
- }
- port = control->ic_port[port_num].icp_port;
- /* we allocate in pairs */
- if (!(port_num & 1) && port) {
- pci_free_consistent(port->ip_pdev,
- TOTAL_RING_BUF_SIZE,
- port->ip_cpu_ringbuf,
- port->ip_dma_ringbuf);
- kfree(port);
- }
- }
- soft = control->ic_soft;
- if (soft) {
- free_irq(control->ic_irq, soft);
- if (soft->is_ioc4_serial_addr) {
- iounmap(soft->is_ioc4_serial_addr);
- release_mem_region((unsigned long)
- soft->is_ioc4_serial_addr,
- sizeof(struct ioc4_serial));
- }
- kfree(soft);
- }
- kfree(control);
- idd->idd_serial_data = NULL;
-
- return 0;
-}
-
-
-/**
- * ioc4_serial_core_attach_rs232 - register with serial core
- * This is done during pci probing
- * @pdev: handle for this card
- */
-static inline int
-ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
-{
- struct ioc4_port *port;
- struct uart_port *the_port;
- struct ioc4_driver_data *idd = pci_get_drvdata(pdev);
- struct ioc4_control *control = idd->idd_serial_data;
- int port_num;
- int port_type_idx;
- struct uart_driver *u_driver;
-
-
- DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n",
- __func__, pdev, (void *)control));
-
- if (!control)
- return -ENODEV;
-
- port_type_idx = (port_type == PROTO_RS232) ? UART_PORT_RS232
- : UART_PORT_RS422;
-
- u_driver = (port_type == PROTO_RS232) ? &ioc4_uart_rs232
- : &ioc4_uart_rs422;
-
- /* once around for each port on this card */
- for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
- the_port = &control->ic_port[port_num].icp_uart_port
- [port_type_idx];
- port = control->ic_port[port_num].icp_port;
- port->ip_all_ports[port_type_idx] = the_port;
-
- DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n",
- __func__, (void *)the_port,
- (void *)port,
- port_type == PROTO_RS232 ? "rs232" : "rs422"));
-
- /* membase, iobase and mapbase just need to be non-0 */
- the_port->membase = (unsigned char __iomem *)1;
- the_port->iobase = (pdev->bus->number << 16) | port_num;
- the_port->line = (Num_of_ioc4_cards << 2) | port_num;
- the_port->mapbase = port_type;
- the_port->type = PORT_16550A;
- the_port->fifosize = IOC4_FIFO_CHARS;
- the_port->ops = &ioc4_ops;
- the_port->irq = control->ic_irq;
- the_port->dev = &pdev->dev;
- spin_lock_init(&the_port->lock);
- if (uart_add_one_port(u_driver, the_port) < 0) {
- printk(KERN_WARNING
- "%s: unable to add port %d bus %d\n",
- __func__, the_port->line, pdev->bus->number);
- } else {
- DPRINT_CONFIG(
- ("IOC4 serial port %d irq = %d, bus %d\n",
- the_port->line, the_port->irq, pdev->bus->number));
- }
- }
- return 0;
-}
-
-/**
- * ioc4_serial_attach_one - register attach function
- * called per card found from IOC4 master module.
- * @idd: Master module data for this IOC4
- */
-static int
-ioc4_serial_attach_one(struct ioc4_driver_data *idd)
-{
- unsigned long tmp_addr1;
- struct ioc4_serial __iomem *serial;
- struct ioc4_soft *soft;
- struct ioc4_control *control;
- int ret = 0;
-
-
- DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev,
- idd->idd_pci_id));
-
- /* PCI-RT does not bring out serial connections.
- * Do not attach to this particular IOC4.
- */
- if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
- return 0;
-
- /* request serial registers */
- tmp_addr1 = idd->idd_bar0 + IOC4_SERIAL_OFFSET;
-
- if (!request_mem_region(tmp_addr1, sizeof(struct ioc4_serial),
- "sioc4_uart")) {
- printk(KERN_WARNING
- "ioc4 (%p): unable to get request region for "
- "uart space\n", (void *)idd->idd_pdev);
- ret = -ENODEV;
- goto out1;
- }
- serial = ioremap(tmp_addr1, sizeof(struct ioc4_serial));
- if (!serial) {
- printk(KERN_WARNING
- "ioc4 (%p) : unable to remap ioc4 serial register\n",
- (void *)idd->idd_pdev);
- ret = -ENODEV;
- goto out2;
- }
- DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n",
- __func__, (void *)idd->idd_misc_regs,
- (void *)serial));
-
- /* Get memory for the new card */
- control = kzalloc(sizeof(struct ioc4_control), GFP_KERNEL);
-
- if (!control) {
- printk(KERN_WARNING "ioc4_attach_one"
- ": unable to get memory for the IOC4\n");
- ret = -ENOMEM;
- goto out2;
- }
- idd->idd_serial_data = control;
-
- /* Allocate the soft structure */
- soft = kzalloc(sizeof(struct ioc4_soft), GFP_KERNEL);
- if (!soft) {
- printk(KERN_WARNING
- "ioc4 (%p): unable to get memory for the soft struct\n",
- (void *)idd->idd_pdev);
- ret = -ENOMEM;
- goto out3;
- }
-
- spin_lock_init(&soft->is_ir_lock);
- soft->is_ioc4_misc_addr = idd->idd_misc_regs;
- soft->is_ioc4_serial_addr = serial;
-
- /* Init the IOC4 */
- writel(0xf << IOC4_SIO_CR_CMD_PULSE_SHIFT,
- &idd->idd_misc_regs->sio_cr.raw);
-
- /* Enable serial port mode select generic PIO pins as outputs */
- writel(IOC4_GPCR_UART0_MODESEL | IOC4_GPCR_UART1_MODESEL
- | IOC4_GPCR_UART2_MODESEL | IOC4_GPCR_UART3_MODESEL,
- &idd->idd_misc_regs->gpcr_s.raw);
-
- /* Clear and disable all serial interrupts */
- write_ireg(soft, ~0, IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
- writel(~0, &idd->idd_misc_regs->sio_ir.raw);
- write_ireg(soft, IOC4_OTHER_IR_SER_MEMERR, IOC4_W_IEC,
- IOC4_OTHER_INTR_TYPE);
- writel(IOC4_OTHER_IR_SER_MEMERR, &idd->idd_misc_regs->other_ir.raw);
- control->ic_soft = soft;
-
- /* Hook up interrupt handler */
- if (!request_irq(idd->idd_pdev->irq, ioc4_intr, IRQF_SHARED,
- "sgi-ioc4serial", soft)) {
- control->ic_irq = idd->idd_pdev->irq;
- } else {
- printk(KERN_WARNING
- "%s : request_irq fails for IRQ 0x%x\n ",
- __func__, idd->idd_pdev->irq);
- }
- ret = ioc4_attach_local(idd);
- if (ret)
- goto out4;
-
- /* register port with the serial core - 1 rs232, 1 rs422 */
-
- ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS232);
- if (ret)
- goto out4;
-
- ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS422);
- if (ret)
- goto out5;
-
- Num_of_ioc4_cards++;
-
- return ret;
-
- /* error exits that give back resources */
-out5:
- ioc4_serial_remove_one(idd);
- return ret;
-out4:
- kfree(soft);
-out3:
- kfree(control);
-out2:
- if (serial)
- iounmap(serial);
- release_mem_region(tmp_addr1, sizeof(struct ioc4_serial));
-out1:
-
- return ret;
-}
-
-
-static struct ioc4_submodule ioc4_serial_submodule = {
- .is_name = "IOC4_serial",
- .is_owner = THIS_MODULE,
- .is_probe = ioc4_serial_attach_one,
- .is_remove = ioc4_serial_remove_one,
-};
-
-/**
- * ioc4_serial_init - module init
- */
-static int __init ioc4_serial_init(void)
-{
- int ret;
-
- /* register with serial core */
- if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) {
- printk(KERN_WARNING
- "%s: Couldn't register rs232 IOC4 serial driver\n",
- __func__);
- goto out;
- }
- if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
- printk(KERN_WARNING
- "%s: Couldn't register rs422 IOC4 serial driver\n",
- __func__);
- goto out_uart_rs232;
- }
-
- /* register with IOC4 main module */
- ret = ioc4_register_submodule(&ioc4_serial_submodule);
- if (ret)
- goto out_uart_rs422;
- return 0;
-
-out_uart_rs422:
- uart_unregister_driver(&ioc4_uart_rs422);
-out_uart_rs232:
- uart_unregister_driver(&ioc4_uart_rs232);
-out:
- return ret;
-}
-
-static void __exit ioc4_serial_exit(void)
-{
- ioc4_unregister_submodule(&ioc4_serial_submodule);
- uart_unregister_driver(&ioc4_uart_rs232);
- uart_unregister_driver(&ioc4_uart_rs422);
-}
-
-late_initcall(ioc4_serial_init); /* Call only after tty init is done */
-module_exit(ioc4_serial_exit);
-
-MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
-MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC4 Base-IO Card");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index bfe5e9e034ec..c7d51b51898f 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -277,10 +277,14 @@ static void kgdboc_pre_exp_handler(void)
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
+
+ atomic_inc(&ignore_console_lock_warning);
}
static void kgdboc_post_exp_handler(void)
{
+ atomic_dec(&ignore_console_lock_warning);
+
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 9de9f0f239a1..fcbea43dc334 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -57,6 +57,7 @@
#define ASC_IRNCR_TIR 0x1
#define ASC_IRNCR_RIR 0x2
#define ASC_IRNCR_EIR 0x4
+#define ASC_IRNCR_MASK GENMASK(2, 0)
#define ASCOPT_CSIZE 0x3
#define TXFIFO_FL 1
@@ -99,7 +100,12 @@
static void lqasc_tx_chars(struct uart_port *port);
static struct ltq_uart_port *lqasc_port[MAXPORTS];
static struct uart_driver lqasc_reg;
-static DEFINE_SPINLOCK(ltq_asc_lock);
+
+struct ltq_soc_data {
+ int (*fetch_irq)(struct device *dev, struct ltq_uart_port *ltq_port);
+ int (*request_irq)(struct uart_port *port);
+ void (*free_irq)(struct uart_port *port);
+};
struct ltq_uart_port {
struct uart_port port;
@@ -110,6 +116,10 @@ struct ltq_uart_port {
unsigned int tx_irq;
unsigned int rx_irq;
unsigned int err_irq;
+ unsigned int common_irq;
+ spinlock_t lock; /* exclusive access for multi core */
+
+ const struct ltq_soc_data *soc;
};
static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
@@ -135,9 +145,11 @@ static void
lqasc_start_tx(struct uart_port *port)
{
unsigned long flags;
- spin_lock_irqsave(&ltq_asc_lock, flags);
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ spin_lock_irqsave(&ltq_port->lock, flags);
lqasc_tx_chars(port);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
return;
}
@@ -245,9 +257,11 @@ lqasc_tx_int(int irq, void *_port)
{
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
- spin_lock_irqsave(&ltq_asc_lock, flags);
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ spin_lock_irqsave(&ltq_port->lock, flags);
__raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
lqasc_start_tx(port);
return IRQ_HANDLED;
}
@@ -257,11 +271,13 @@ lqasc_err_int(int irq, void *_port)
{
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
- spin_lock_irqsave(&ltq_asc_lock, flags);
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ spin_lock_irqsave(&ltq_port->lock, flags);
/* clear any pending interrupts */
asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
return IRQ_HANDLED;
}
@@ -270,10 +286,37 @@ lqasc_rx_int(int irq, void *_port)
{
unsigned long flags;
struct uart_port *port = (struct uart_port *)_port;
- spin_lock_irqsave(&ltq_asc_lock, flags);
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ spin_lock_irqsave(&ltq_port->lock, flags);
__raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
lqasc_rx_chars(port);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lqasc_irq(int irq, void *p)
+{
+ unsigned long flags;
+ u32 stat;
+ struct uart_port *port = p;
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ spin_lock_irqsave(&ltq_port->lock, flags);
+ stat = readl(port->membase + LTQ_ASC_IRNCR);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
+ if (!(stat & ASC_IRNCR_MASK))
+ return IRQ_NONE;
+
+ if (stat & ASC_IRNCR_TIR)
+ lqasc_tx_int(irq, p);
+
+ if (stat & ASC_IRNCR_RIR)
+ lqasc_rx_int(irq, p);
+
+ if (stat & ASC_IRNCR_EIR)
+ lqasc_err_int(irq, p);
+
return IRQ_HANDLED;
}
@@ -307,11 +350,13 @@ lqasc_startup(struct uart_port *port)
{
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
+ unsigned long flags;
if (!IS_ERR(ltq_port->clk))
clk_prepare_enable(ltq_port->clk);
port->uartclk = clk_get_rate(ltq_port->freqclk);
+ spin_lock_irqsave(&ltq_port->lock, flags);
asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
@@ -331,35 +376,14 @@ lqasc_startup(struct uart_port *port)
asc_update_bits(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN |
ASCCON_ROEN, port->membase + LTQ_ASC_CON);
- retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
- 0, "asc_tx", port);
- if (retval) {
- pr_err("failed to request lqasc_tx_int\n");
- return retval;
- }
-
- retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
- 0, "asc_rx", port);
- if (retval) {
- pr_err("failed to request lqasc_rx_int\n");
- goto err1;
- }
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
- retval = request_irq(ltq_port->err_irq, lqasc_err_int,
- 0, "asc_err", port);
- if (retval) {
- pr_err("failed to request lqasc_err_int\n");
- goto err2;
- }
+ retval = ltq_port->soc->request_irq(port);
+ if (retval)
+ return retval;
__raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
port->membase + LTQ_ASC_IRNREN);
- return 0;
-
-err2:
- free_irq(ltq_port->rx_irq, port);
-err1:
- free_irq(ltq_port->tx_irq, port);
return retval;
}
@@ -367,15 +391,17 @@ static void
lqasc_shutdown(struct uart_port *port)
{
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
- free_irq(ltq_port->tx_irq, port);
- free_irq(ltq_port->rx_irq, port);
- free_irq(ltq_port->err_irq, port);
+ unsigned long flags;
+
+ ltq_port->soc->free_irq(port);
+ spin_lock_irqsave(&ltq_port->lock, flags);
__raw_writel(0, port->membase + LTQ_ASC_CON);
asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
port->membase + LTQ_ASC_RXFCON);
asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
if (!IS_ERR(ltq_port->clk))
clk_disable_unprepare(ltq_port->clk);
}
@@ -390,6 +416,7 @@ lqasc_set_termios(struct uart_port *port,
unsigned int baud;
unsigned int con = 0;
unsigned long flags;
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
cflag = new->c_cflag;
iflag = new->c_iflag;
@@ -443,7 +470,7 @@ lqasc_set_termios(struct uart_port *port,
/* set error signals - framing, parity and overrun, enable receiver */
con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN;
- spin_lock_irqsave(&ltq_asc_lock, flags);
+ spin_lock_irqsave(&ltq_port->lock, flags);
/* set up CON */
asc_update_bits(0, con, port->membase + LTQ_ASC_CON);
@@ -471,7 +498,7 @@ lqasc_set_termios(struct uart_port *port,
/* enable rx */
__raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(new))
@@ -589,17 +616,14 @@ lqasc_console_putchar(struct uart_port *port, int ch)
static void lqasc_serial_port_write(struct uart_port *port, const char *s,
u_int count)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ltq_asc_lock, flags);
uart_console_write(port, s, count, lqasc_console_putchar);
- spin_unlock_irqrestore(&ltq_asc_lock, flags);
}
static void
lqasc_console_write(struct console *co, const char *s, u_int count)
{
struct ltq_uart_port *ltq_port;
+ unsigned long flags;
if (co->index >= MAXPORTS)
return;
@@ -608,7 +632,9 @@ lqasc_console_write(struct console *co, const char *s, u_int count)
if (!ltq_port)
return;
+ spin_lock_irqsave(&ltq_port->lock, flags);
lqasc_serial_port_write(&ltq_port->port, s, count);
+ spin_unlock_irqrestore(&ltq_port->lock, flags);
}
static int __init
@@ -677,7 +703,8 @@ lqasc_serial_early_console_setup(struct earlycon_device *device,
device->con->write = lqasc_serial_early_console_write;
return 0;
}
-OF_EARLYCON_DECLARE(lantiq, DRVNAME, lqasc_serial_early_console_setup);
+OF_EARLYCON_DECLARE(lantiq, "lantiq,asc", lqasc_serial_early_console_setup);
+OF_EARLYCON_DECLARE(lantiq, "intel,lgm-asc", lqasc_serial_early_console_setup);
static struct uart_driver lqasc_reg = {
.owner = THIS_MODULE,
@@ -689,24 +716,134 @@ static struct uart_driver lqasc_reg = {
.cons = &lqasc_console,
};
+static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port)
+{
+ struct uart_port *port = &ltq_port->port;
+ struct resource irqres[3];
+ int ret;
+
+ ret = of_irq_to_resource_table(dev->of_node, irqres, 3);
+ if (ret != 3) {
+ dev_err(dev,
+ "failed to get IRQs for serial port\n");
+ return -ENODEV;
+ }
+ ltq_port->tx_irq = irqres[0].start;
+ ltq_port->rx_irq = irqres[1].start;
+ ltq_port->err_irq = irqres[2].start;
+ port->irq = irqres[0].start;
+
+ return 0;
+}
+
+static int request_irq_lantiq(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+ int retval;
+
+ retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
+ 0, "asc_tx", port);
+ if (retval) {
+ dev_err(port->dev, "failed to request asc_tx\n");
+ return retval;
+ }
+
+ retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
+ 0, "asc_rx", port);
+ if (retval) {
+ dev_err(port->dev, "failed to request asc_rx\n");
+ goto err1;
+ }
+
+ retval = request_irq(ltq_port->err_irq, lqasc_err_int,
+ 0, "asc_err", port);
+ if (retval) {
+ dev_err(port->dev, "failed to request asc_err\n");
+ goto err2;
+ }
+ return 0;
+
+err2:
+ free_irq(ltq_port->rx_irq, port);
+err1:
+ free_irq(ltq_port->tx_irq, port);
+ return retval;
+}
+
+static void free_irq_lantiq(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ free_irq(ltq_port->tx_irq, port);
+ free_irq(ltq_port->rx_irq, port);
+ free_irq(ltq_port->err_irq, port);
+}
+
+static int fetch_irq_intel(struct device *dev, struct ltq_uart_port *ltq_port)
+{
+ struct uart_port *port = &ltq_port->port;
+ int ret;
+
+ ret = of_irq_get(dev->of_node, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to fetch IRQ for serial port\n");
+ return ret;
+ }
+ ltq_port->common_irq = ret;
+ port->irq = ret;
+
+ return 0;
+}
+
+static int request_irq_intel(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+ int retval;
+
+ retval = request_irq(ltq_port->common_irq, lqasc_irq, 0,
+ "asc_irq", port);
+ if (retval)
+ dev_err(port->dev, "failed to request asc_irq\n");
+
+ return retval;
+}
+
+static void free_irq_intel(struct uart_port *port)
+{
+ struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
+
+ free_irq(ltq_port->common_irq, port);
+}
+
static int __init
lqasc_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
struct uart_port *port;
- struct resource *mmres, irqres[3];
+ struct resource *mmres;
int line;
int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ret = of_irq_to_resource_table(node, irqres, 3);
- if (!mmres || (ret != 3)) {
+ if (!mmres) {
dev_err(&pdev->dev,
- "failed to get memory/irq for serial port\n");
+ "failed to get memory for serial port\n");
return -ENODEV;
}
+ ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+ GFP_KERNEL);
+ if (!ltq_port)
+ return -ENOMEM;
+
+ port = &ltq_port->port;
+
+ ltq_port->soc = of_device_get_match_data(&pdev->dev);
+ ret = ltq_port->soc->fetch_irq(&pdev->dev, ltq_port);
+ if (ret)
+ return ret;
+
/* get serial id */
line = of_alias_get_id(node, "serial");
if (line < 0) {
@@ -727,13 +864,6 @@ lqasc_probe(struct platform_device *pdev)
return -EBUSY;
}
- ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
- GFP_KERNEL);
- if (!ltq_port)
- return -ENOMEM;
-
- port = &ltq_port->port;
-
port->iotype = SERIAL_IO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &lqasc_pops;
@@ -742,7 +872,6 @@ lqasc_probe(struct platform_device *pdev)
port->line = line;
port->dev = &pdev->dev;
/* unused, just to be backward-compatible */
- port->irq = irqres[0].start;
port->mapbase = mmres->start;
if (IS_ENABLED(CONFIG_LANTIQ) && !IS_ENABLED(CONFIG_COMMON_CLK))
@@ -762,10 +891,7 @@ lqasc_probe(struct platform_device *pdev)
else
ltq_port->clk = devm_clk_get(&pdev->dev, "asc");
- ltq_port->tx_irq = irqres[0].start;
- ltq_port->rx_irq = irqres[1].start;
- ltq_port->err_irq = irqres[2].start;
-
+ spin_lock_init(&ltq_port->lock);
lqasc_port[line] = ltq_port;
platform_set_drvdata(pdev, ltq_port);
@@ -774,8 +900,21 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static const struct ltq_soc_data soc_data_lantiq = {
+ .fetch_irq = fetch_irq_lantiq,
+ .request_irq = request_irq_lantiq,
+ .free_irq = free_irq_lantiq,
+};
+
+static const struct ltq_soc_data soc_data_intel = {
+ .fetch_irq = fetch_irq_intel,
+ .request_irq = request_irq_intel,
+ .free_irq = free_irq_intel,
+};
+
static const struct of_device_id ltq_asc_match[] = {
- { .compatible = DRVNAME },
+ { .compatible = "lantiq,asc", .data = &soc_data_lantiq },
+ { .compatible = "intel,lgm-asc", .data = &soc_data_intel },
{},
};
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index f4e27d0ad947..9a836dcac157 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -25,8 +25,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/of.h>
-#include <mach/platform.h>
-#include <mach/hardware.h>
+#include <linux/sizes.h>
+#include <linux/soc/nxp/lpc32xx-misc.h>
/*
* High Speed UART register offsets
@@ -81,6 +81,8 @@
#define LPC32XX_HSU_TX_TL8B (0x2 << 0)
#define LPC32XX_HSU_TX_TL16B (0x3 << 0)
+#define LPC32XX_MAIN_OSC_FREQ 13000000
+
#define MODNAME "lpc32xx_hsuart"
struct lpc32xx_hsuart_port {
@@ -151,8 +153,6 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
local_irq_restore(flags);
}
-static void lpc32xx_loopback_set(resource_size_t mapbase, int state);
-
static int __init lpc32xx_hsuart_console_setup(struct console *co,
char *options)
{
@@ -439,35 +439,6 @@ static void serial_lpc32xx_break_ctl(struct uart_port *port,
spin_unlock_irqrestore(&port->lock, flags);
}
-/* LPC3250 Errata HSUART.1: Hang workaround via loopback mode on inactivity */
-static void lpc32xx_loopback_set(resource_size_t mapbase, int state)
-{
- int bit;
- u32 tmp;
-
- switch (mapbase) {
- case LPC32XX_HS_UART1_BASE:
- bit = 0;
- break;
- case LPC32XX_HS_UART2_BASE:
- bit = 1;
- break;
- case LPC32XX_HS_UART7_BASE:
- bit = 6;
- break;
- default:
- WARN(1, "lpc32xx_hs: Warning: Unknown port at %08x\n", mapbase);
- return;
- }
-
- tmp = readl(LPC32XX_UARTCTL_CLOOP);
- if (state)
- tmp |= (1 << bit);
- else
- tmp &= ~(1 << bit);
- writel(tmp, LPC32XX_UARTCTL_CLOOP);
-}
-
/* port->lock is not held. */
static int serial_lpc32xx_startup(struct uart_port *port)
{
@@ -687,11 +658,8 @@ static int serial_hs_lpc32xx_probe(struct platform_device *pdev)
p->port.membase = NULL;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Error getting irq for HS UART port %d\n",
- uarts_registered);
+ if (ret < 0)
return ret;
- }
p->port.irq = ret;
p->port.iotype = UPIO_MEM32;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index e6c48a99bd85..8434bd5a8ec7 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -689,7 +689,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
* tail.
*/
uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT,
- one->rx_buf[rxlen], flag);
+ one->rx_buf[rxlen-1], flag);
} else {
if (unlikely(rxlen >= port->fifosize)) {
@@ -955,17 +955,43 @@ static void max310x_set_termios(struct uart_port *port,
/* Configure flow control */
max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]);
max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
- if (termios->c_cflag & CRTSCTS)
+
+ /* Disable transmitter before enabling AutoCTS or auto transmitter
+ * flow control
+ */
+ if (termios->c_cflag & CRTSCTS || termios->c_iflag & IXOFF) {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TXDIS_BIT,
+ MAX310X_MODE1_TXDIS_BIT);
+ }
+
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
+
+ if (termios->c_cflag & CRTSCTS) {
+ /* Enable AUTORTS and AUTOCTS */
+ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
flow |= MAX310X_FLOWCTRL_AUTOCTS_BIT |
MAX310X_FLOWCTRL_AUTORTS_BIT;
+ }
if (termios->c_iflag & IXON)
flow |= MAX310X_FLOWCTRL_SWFLOW3_BIT |
MAX310X_FLOWCTRL_SWFLOWEN_BIT;
- if (termios->c_iflag & IXOFF)
+ if (termios->c_iflag & IXOFF) {
+ port->status |= UPSTAT_AUTOXOFF;
flow |= MAX310X_FLOWCTRL_SWFLOW1_BIT |
MAX310X_FLOWCTRL_SWFLOWEN_BIT;
+ }
max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow);
+ /* Enable transmitter after disabling AutoCTS and auto transmitter
+ * flow control
+ */
+ if (!(termios->c_cflag & CRTSCTS) && !(termios->c_iflag & IXOFF)) {
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TXDIS_BIT,
+ 0);
+ }
+
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 0xffff,
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 7e7b1559fa36..c12a12556339 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -884,10 +884,8 @@ static int mvebu_uart_probe(struct platform_device *pdev)
if (platform_irq_count(pdev) == 1) {
/* Old bindings: no name on the single unamed UART0 IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to get UART IRQ\n");
+ if (irq < 0)
return irq;
- }
mvuart->irq[UART_IRQ_SUM] = irq;
} else {
@@ -897,18 +895,14 @@ static int mvebu_uart_probe(struct platform_device *pdev)
* uart-sum of UART0 port.
*/
irq = platform_get_irq_byname(pdev, "uart-rx");
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to get 'uart-rx' IRQ\n");
+ if (irq < 0)
return irq;
- }
mvuart->irq[UART_RX_IRQ] = irq;
irq = platform_get_irq_byname(pdev, "uart-tx");
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to get 'uart-tx' IRQ\n");
+ if (irq < 0)
return irq;
- }
mvuart->irq[UART_TX_IRQ] = irq;
}
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4c188f4079b3..e34525970682 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -969,10 +969,8 @@ err_out:
}
-#define RTS_AT_AUART() IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(s->gpios, \
- UART_GPIO_RTS))
-#define CTS_AT_AUART() IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(s->gpios, \
- UART_GPIO_CTS))
+#define RTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_RTS)
+#define CTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_CTS)
static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *termios,
struct ktermios *old)
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
deleted file mode 100644
index b3556863491f..000000000000
--- a/drivers/tty/serial/netx-serial.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/platform_device.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <mach/netx-regs.h>
-
-/* We've been assigned a range on the "Low-density serial ports" major */
-#define SERIAL_NX_MAJOR 204
-#define MINOR_START 170
-
-enum uart_regs {
- UART_DR = 0x00,
- UART_SR = 0x04,
- UART_LINE_CR = 0x08,
- UART_BAUDDIV_MSB = 0x0c,
- UART_BAUDDIV_LSB = 0x10,
- UART_CR = 0x14,
- UART_FR = 0x18,
- UART_IIR = 0x1c,
- UART_ILPR = 0x20,
- UART_RTS_CR = 0x24,
- UART_RTS_LEAD = 0x28,
- UART_RTS_TRAIL = 0x2c,
- UART_DRV_ENABLE = 0x30,
- UART_BRM_CR = 0x34,
- UART_RXFIFO_IRQLEVEL = 0x38,
- UART_TXFIFO_IRQLEVEL = 0x3c,
-};
-
-#define SR_FE (1<<0)
-#define SR_PE (1<<1)
-#define SR_BE (1<<2)
-#define SR_OE (1<<3)
-
-#define LINE_CR_BRK (1<<0)
-#define LINE_CR_PEN (1<<1)
-#define LINE_CR_EPS (1<<2)
-#define LINE_CR_STP2 (1<<3)
-#define LINE_CR_FEN (1<<4)
-#define LINE_CR_5BIT (0<<5)
-#define LINE_CR_6BIT (1<<5)
-#define LINE_CR_7BIT (2<<5)
-#define LINE_CR_8BIT (3<<5)
-#define LINE_CR_BITS_MASK (3<<5)
-
-#define CR_UART_EN (1<<0)
-#define CR_SIREN (1<<1)
-#define CR_SIRLP (1<<2)
-#define CR_MSIE (1<<3)
-#define CR_RIE (1<<4)
-#define CR_TIE (1<<5)
-#define CR_RTIE (1<<6)
-#define CR_LBE (1<<7)
-
-#define FR_CTS (1<<0)
-#define FR_DSR (1<<1)
-#define FR_DCD (1<<2)
-#define FR_BUSY (1<<3)
-#define FR_RXFE (1<<4)
-#define FR_TXFF (1<<5)
-#define FR_RXFF (1<<6)
-#define FR_TXFE (1<<7)
-
-#define IIR_MIS (1<<0)
-#define IIR_RIS (1<<1)
-#define IIR_TIS (1<<2)
-#define IIR_RTIS (1<<3)
-#define IIR_MASK 0xf
-
-#define RTS_CR_AUTO (1<<0)
-#define RTS_CR_RTS (1<<1)
-#define RTS_CR_COUNT (1<<2)
-#define RTS_CR_MOD2 (1<<3)
-#define RTS_CR_RTS_POL (1<<4)
-#define RTS_CR_CTS_CTR (1<<5)
-#define RTS_CR_CTS_POL (1<<6)
-#define RTS_CR_STICK (1<<7)
-
-#define UART_PORT_SIZE 0x40
-#define DRIVER_NAME "netx-uart"
-
-struct netx_port {
- struct uart_port port;
-};
-
-static void netx_stop_tx(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val & ~CR_TIE, port->membase + UART_CR);
-}
-
-static void netx_stop_rx(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val & ~CR_RIE, port->membase + UART_CR);
-}
-
-static void netx_enable_ms(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val | CR_MSIE, port->membase + UART_CR);
-}
-
-static inline void netx_transmit_buffer(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- if (port->x_char) {
- writel(port->x_char, port->membase + UART_DR);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
- netx_stop_tx(port);
- return;
- }
-
- do {
- /* send xmit->buf[xmit->tail]
- * out the port here */
- writel(xmit->buf[xmit->tail], port->membase + UART_DR);
- xmit->tail = (xmit->tail + 1) &
- (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (!(readl(port->membase + UART_FR) & FR_TXFF));
-
- if (uart_circ_empty(xmit))
- netx_stop_tx(port);
-}
-
-static void netx_start_tx(struct uart_port *port)
-{
- writel(
- readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR);
-
- if (!(readl(port->membase + UART_FR) & FR_TXFF))
- netx_transmit_buffer(port);
-}
-
-static unsigned int netx_tx_empty(struct uart_port *port)
-{
- return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT;
-}
-
-static void netx_txint(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- netx_stop_tx(port);
- return;
- }
-
- netx_transmit_buffer(port);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-}
-
-static void netx_rxint(struct uart_port *port, unsigned long *flags)
-{
- unsigned char rx, flg, status;
-
- while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
- rx = readl(port->membase + UART_DR);
- flg = TTY_NORMAL;
- port->icount.rx++;
- status = readl(port->membase + UART_SR);
- if (status & SR_BE) {
- writel(0, port->membase + UART_SR);
- if (uart_handle_break(port))
- continue;
- }
-
- if (unlikely(status & (SR_FE | SR_PE | SR_OE))) {
-
- if (status & SR_PE)
- port->icount.parity++;
- else if (status & SR_FE)
- port->icount.frame++;
- if (status & SR_OE)
- port->icount.overrun++;
-
- status &= port->read_status_mask;
-
- if (status & SR_BE)
- flg = TTY_BREAK;
- else if (status & SR_PE)
- flg = TTY_PARITY;
- else if (status & SR_FE)
- flg = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(port, rx))
- continue;
-
- uart_insert_char(port, status, SR_OE, rx, flg);
- }
-
- spin_unlock_irqrestore(&port->lock, *flags);
- tty_flip_buffer_push(&port->state->port);
- spin_lock_irqsave(&port->lock, *flags);
-}
-
-static irqreturn_t netx_int(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- unsigned long flags;
- unsigned char status;
-
- spin_lock_irqsave(&port->lock,flags);
-
- status = readl(port->membase + UART_IIR) & IIR_MASK;
- while (status) {
- if (status & IIR_RIS)
- netx_rxint(port, &flags);
- if (status & IIR_TIS)
- netx_txint(port);
- if (status & IIR_MIS) {
- if (readl(port->membase + UART_FR) & FR_CTS)
- uart_handle_cts_change(port, 1);
- else
- uart_handle_cts_change(port, 0);
- }
- writel(0, port->membase + UART_IIR);
- status = readl(port->membase + UART_IIR) & IIR_MASK;
- }
-
- spin_unlock_irqrestore(&port->lock,flags);
- return IRQ_HANDLED;
-}
-
-static unsigned int netx_get_mctrl(struct uart_port *port)
-{
- unsigned int ret = TIOCM_DSR | TIOCM_CAR;
-
- if (readl(port->membase + UART_FR) & FR_CTS)
- ret |= TIOCM_CTS;
-
- return ret;
-}
-
-static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- unsigned int val;
-
- /* FIXME: Locking needed ? */
- if (mctrl & TIOCM_RTS) {
- val = readl(port->membase + UART_RTS_CR);
- writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
- }
-}
-
-static void netx_break_ctl(struct uart_port *port, int break_state)
-{
- unsigned int line_cr;
- spin_lock_irq(&port->lock);
-
- line_cr = readl(port->membase + UART_LINE_CR);
- if (break_state != 0)
- line_cr |= LINE_CR_BRK;
- else
- line_cr &= ~LINE_CR_BRK;
- writel(line_cr, port->membase + UART_LINE_CR);
-
- spin_unlock_irq(&port->lock);
-}
-
-static int netx_startup(struct uart_port *port)
-{
- int ret;
-
- ret = request_irq(port->irq, netx_int, 0,
- DRIVER_NAME, port);
- if (ret) {
- dev_err(port->dev, "unable to grab irq%d\n",port->irq);
- goto exit;
- }
-
- writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN,
- port->membase + UART_LINE_CR);
-
- writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN,
- port->membase + UART_CR);
-
-exit:
- return ret;
-}
-
-static void netx_shutdown(struct uart_port *port)
-{
- writel(0, port->membase + UART_CR) ;
-
- free_irq(port->irq, port);
-}
-
-static void
-netx_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- unsigned int baud, quot;
- unsigned char old_cr;
- unsigned char line_cr = LINE_CR_FEN;
- unsigned char rts_cr = 0;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- line_cr |= LINE_CR_5BIT;
- break;
- case CS6:
- line_cr |= LINE_CR_6BIT;
- break;
- case CS7:
- line_cr |= LINE_CR_7BIT;
- break;
- case CS8:
- line_cr |= LINE_CR_8BIT;
- break;
- }
-
- if (termios->c_cflag & CSTOPB)
- line_cr |= LINE_CR_STP2;
-
- if (termios->c_cflag & PARENB) {
- line_cr |= LINE_CR_PEN;
- if (!(termios->c_cflag & PARODD))
- line_cr |= LINE_CR_EPS;
- }
-
- if (termios->c_cflag & CRTSCTS)
- rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
-
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = baud * 4096;
- quot /= 1000;
- quot *= 256;
- quot /= 100000;
-
- spin_lock_irq(&port->lock);
-
- uart_update_timeout(port, termios->c_cflag, baud);
-
- old_cr = readl(port->membase + UART_CR);
-
- /* disable interrupts */
- writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
- port->membase + UART_CR);
-
- /* drain transmitter */
- while (readl(port->membase + UART_FR) & FR_BUSY);
-
- /* disable UART */
- writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
-
- /* modem status interrupts */
- old_cr &= ~CR_MSIE;
- if (UART_ENABLE_MS(port, termios->c_cflag))
- old_cr |= CR_MSIE;
-
- writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
- writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
- writel(line_cr, port->membase + UART_LINE_CR);
-
- writel(rts_cr, port->membase + UART_RTS_CR);
-
- /*
- * Characters to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= SR_PE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= SR_BE;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= SR_PE;
- }
-
- port->read_status_mask = 0;
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= SR_BE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= SR_PE | SR_FE;
-
- writel(old_cr, port->membase + UART_CR);
-
- spin_unlock_irq(&port->lock);
-}
-
-static const char *netx_type(struct uart_port *port)
-{
- return port->type == PORT_NETX ? "NETX" : NULL;
-}
-
-static void netx_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, UART_PORT_SIZE);
-}
-
-static int netx_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, UART_PORT_SIZE,
- DRIVER_NAME) != NULL ? 0 : -EBUSY;
-}
-
-static void netx_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0)
- port->type = PORT_NETX;
-}
-
-static int
-netx_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- int ret = 0;
-
- if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX)
- ret = -EINVAL;
-
- return ret;
-}
-
-static struct uart_ops netx_pops = {
- .tx_empty = netx_tx_empty,
- .set_mctrl = netx_set_mctrl,
- .get_mctrl = netx_get_mctrl,
- .stop_tx = netx_stop_tx,
- .start_tx = netx_start_tx,
- .stop_rx = netx_stop_rx,
- .enable_ms = netx_enable_ms,
- .break_ctl = netx_break_ctl,
- .startup = netx_startup,
- .shutdown = netx_shutdown,
- .set_termios = netx_set_termios,
- .type = netx_type,
- .release_port = netx_release_port,
- .request_port = netx_request_port,
- .config_port = netx_config_port,
- .verify_port = netx_verify_port,
-};
-
-static struct netx_port netx_ports[] = {
- {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART0),
- .mapbase = NETX_PA_UART0,
- .irq = NETX_IRQ_UART0,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 0,
- },
- }, {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART1),
- .mapbase = NETX_PA_UART1,
- .irq = NETX_IRQ_UART1,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 1,
- },
- }, {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART2),
- .mapbase = NETX_PA_UART2,
- .irq = NETX_IRQ_UART2,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 2,
- },
- }
-};
-
-#ifdef CONFIG_SERIAL_NETX_CONSOLE
-
-static void netx_console_putchar(struct uart_port *port, int ch)
-{
- while (readl(port->membase + UART_FR) & FR_BUSY);
- writel(ch, port->membase + UART_DR);
-}
-
-static void
-netx_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_port *port = &netx_ports[co->index].port;
- unsigned char cr_save;
-
- cr_save = readl(port->membase + UART_CR);
- writel(cr_save | CR_UART_EN, port->membase + UART_CR);
-
- uart_console_write(port, s, count, netx_console_putchar);
-
- while (readl(port->membase + UART_FR) & FR_BUSY);
- writel(cr_save, port->membase + UART_CR);
-}
-
-static void __init
-netx_console_get_options(struct uart_port *port, int *baud,
- int *parity, int *bits, int *flow)
-{
- unsigned char line_cr;
-
- *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) |
- readl(port->membase + UART_BAUDDIV_LSB);
- *baud *= 1000;
- *baud /= 4096;
- *baud *= 1000;
- *baud /= 256;
- *baud *= 100;
-
- line_cr = readl(port->membase + UART_LINE_CR);
- *parity = 'n';
- if (line_cr & LINE_CR_PEN) {
- if (line_cr & LINE_CR_EPS)
- *parity = 'e';
- else
- *parity = 'o';
- }
-
- switch (line_cr & LINE_CR_BITS_MASK) {
- case LINE_CR_8BIT:
- *bits = 8;
- break;
- case LINE_CR_7BIT:
- *bits = 7;
- break;
- case LINE_CR_6BIT:
- *bits = 6;
- break;
- case LINE_CR_5BIT:
- *bits = 5;
- break;
- }
-
- if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO)
- *flow = 'r';
-}
-
-static int __init
-netx_console_setup(struct console *co, char *options)
-{
- struct netx_port *sport;
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- /*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports))
- co->index = 0;
- sport = &netx_ports[co->index];
-
- if (options) {
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- } else {
- /* if the UART is enabled, assume it has been correctly setup
- * by the bootloader and get the options
- */
- if (readl(sport->port.membase + UART_CR) & CR_UART_EN) {
- netx_console_get_options(&sport->port, &baud,
- &parity, &bits, &flow);
- }
-
- }
-
- return uart_set_options(&sport->port, co, baud, parity, bits, flow);
-}
-
-static struct uart_driver netx_reg;
-static struct console netx_console = {
- .name = "ttyNX",
- .write = netx_console_write,
- .device = uart_console_device,
- .setup = netx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &netx_reg,
-};
-
-static int __init netx_console_init(void)
-{
- register_console(&netx_console);
- return 0;
-}
-console_initcall(netx_console_init);
-
-#define NETX_CONSOLE &netx_console
-#else
-#define NETX_CONSOLE NULL
-#endif
-
-static struct uart_driver netx_reg = {
- .owner = THIS_MODULE,
- .driver_name = DRIVER_NAME,
- .dev_name = "ttyNX",
- .major = SERIAL_NX_MAJOR,
- .minor = MINOR_START,
- .nr = ARRAY_SIZE(netx_ports),
- .cons = NETX_CONSOLE,
-};
-
-static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_suspend_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static int serial_netx_resume(struct platform_device *pdev)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_resume_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static int serial_netx_probe(struct platform_device *pdev)
-{
- struct uart_port *port = &netx_ports[pdev->id].port;
-
- dev_info(&pdev->dev, "initialising\n");
-
- port->dev = &pdev->dev;
-
- writel(1, port->membase + UART_RXFIFO_IRQLEVEL);
- uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port);
- platform_set_drvdata(pdev, &netx_ports[pdev->id]);
-
- return 0;
-}
-
-static int serial_netx_remove(struct platform_device *pdev)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_remove_one_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static struct platform_driver serial_netx_driver = {
- .probe = serial_netx_probe,
- .remove = serial_netx_remove,
-
- .suspend = serial_netx_suspend,
- .resume = serial_netx_resume,
-
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-static int __init netx_serial_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Serial: NetX driver\n");
-
- ret = uart_register_driver(&netx_reg);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&serial_netx_driver);
- if (ret != 0)
- uart_unregister_driver(&netx_reg);
-
- return 0;
-}
-
-static void __exit netx_serial_exit(void)
-{
- platform_driver_unregister(&serial_netx_driver);
- uart_unregister_driver(&netx_reg);
-}
-
-module_init(netx_serial_init);
-module_exit(netx_serial_exit);
-
-MODULE_AUTHOR("Sascha Hauer");
-MODULE_DESCRIPTION("NetX serial port driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 29a6dc6a8d23..03963af77b15 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -662,10 +662,8 @@ static int owl_uart_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not get irq\n");
+ if (irq < 0)
return irq;
- }
if (owl_uart_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 35e5f9c5d5be..14c6306bc462 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -198,10 +198,8 @@ static int qcom_geni_serial_request_port(struct uart_port *uport)
{
struct platform_device *pdev = to_platform_device(uport->dev);
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- uport->membase = devm_ioremap_resource(&pdev->dev, res);
+ uport->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(uport->membase))
return PTR_ERR(uport->membase);
port->se.base = uport->membase;
@@ -920,12 +918,13 @@ static unsigned long get_clk_cfg(unsigned long clk_freq)
return 0;
}
-static unsigned long get_clk_div_rate(unsigned int baud, unsigned int *clk_div)
+static unsigned long get_clk_div_rate(unsigned int baud,
+ unsigned int sampling_rate, unsigned int *clk_div)
{
unsigned long ser_clk;
unsigned long desired_clk;
- desired_clk = baud * UART_OVERSAMPLING;
+ desired_clk = baud * sampling_rate;
ser_clk = get_clk_cfg(desired_clk);
if (!ser_clk) {
pr_err("%s: Can't find matching DFS entry for baud %d\n",
@@ -951,12 +950,20 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
u32 ser_clk_cfg;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
unsigned long clk_rate;
+ u32 ver, sampling_rate;
qcom_geni_serial_stop_rx(uport);
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->baud = baud;
- clk_rate = get_clk_div_rate(baud, &clk_div);
+
+ sampling_rate = UART_OVERSAMPLING;
+ /* Sampling rate is halved for IP versions >= 2.5 */
+ ver = geni_se_get_qup_hw_version(&port->se);
+ if (GENI_SE_VERSION_MAJOR(ver) >= 2 && GENI_SE_VERSION_MINOR(ver) >= 5)
+ sampling_rate /= 2;
+
+ clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
if (!clk_rate)
goto out_restart_rx;
@@ -1291,10 +1298,8 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+ if (irq < 0)
return irq;
- }
uport->irq = irq;
uport->private_data = drv;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index 284623eefaeb..c1b0d7662ef9 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -735,10 +735,8 @@ static int rda_uart_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not get irq\n");
+ if (irq < 0)
return irq;
- }
if (rda_uart_ports[pdev->id]) {
dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 68a24a14f6b7..d2b77aae42ae 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -961,7 +961,6 @@ static int sccnxp_probe(struct platform_device *pdev)
if (!s->poll) {
s->irq = platform_get_irq(pdev, 0);
if (s->irq < 0) {
- dev_err(&pdev->dev, "Missing irq resource data\n");
ret = -ENXIO;
goto err_out;
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d5269aaaf9b2..2f599515c133 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -4,7 +4,7 @@
*
* High-speed serial driver for NVIDIA Tegra SoCs
*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2019, NVIDIA CORPORATION. All rights reserved.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
*/
@@ -62,7 +62,7 @@
#define TEGRA_UART_TX_TRIG_4B 0x20
#define TEGRA_UART_TX_TRIG_1B 0x30
-#define TEGRA_UART_MAXIMUM 5
+#define TEGRA_UART_MAXIMUM 8
/* Default UART setting when started: 115200 no parity, stop, 8 data bits */
#define TEGRA_UART_DEFAULT_BAUD 115200
@@ -72,6 +72,8 @@
#define TEGRA_TX_PIO 1
#define TEGRA_TX_DMA 2
+#define TEGRA_UART_FCR_IIR_FIFO_EN 0x40
+
/**
* tegra_uart_chip_data: SOC specific data.
*
@@ -84,6 +86,17 @@ struct tegra_uart_chip_data {
bool tx_fifo_full_status;
bool allow_txfifo_reset_fifo_mode;
bool support_clk_src_div;
+ bool fifo_mode_enable_status;
+ int uart_max_port;
+ int max_dma_burst_bytes;
+ int error_tolerance_low_range;
+ int error_tolerance_high_range;
+};
+
+struct tegra_baud_tolerance {
+ u32 lower_range_baud;
+ u32 upper_range_baud;
+ s32 tolerance;
};
struct tegra_uart_port {
@@ -122,10 +135,18 @@ struct tegra_uart_port {
dma_cookie_t rx_cookie;
unsigned int tx_bytes_requested;
unsigned int rx_bytes_requested;
+ struct tegra_baud_tolerance *baud_tolerance;
+ int n_adjustable_baud_rates;
+ int required_rate;
+ int configured_rate;
+ bool use_rx_pio;
+ bool use_tx_pio;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
+static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
+ bool dma_to_memory);
static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
unsigned long reg)
@@ -192,16 +213,34 @@ static void set_dtr(struct tegra_uart_port *tup, bool active)
}
}
+static void set_loopbk(struct tegra_uart_port *tup, bool active)
+{
+ unsigned long mcr = tup->mcr_shadow;
+
+ if (active)
+ mcr |= UART_MCR_LOOP;
+ else
+ mcr &= ~UART_MCR_LOOP;
+
+ if (mcr != tup->mcr_shadow) {
+ tegra_uart_write(tup, mcr, UART_MCR);
+ tup->mcr_shadow = mcr;
+ }
+}
+
static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- int dtr_enable;
+ int enable;
tup->rts_active = !!(mctrl & TIOCM_RTS);
set_rts(tup, tup->rts_active);
- dtr_enable = !!(mctrl & TIOCM_DTR);
- set_dtr(tup, dtr_enable);
+ enable = !!(mctrl & TIOCM_DTR);
+ set_dtr(tup, enable);
+
+ enable = !!(mctrl & TIOCM_LOOP);
+ set_loopbk(tup, enable);
}
static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
@@ -243,9 +282,28 @@ static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
tup->current_baud));
}
+static int tegra_uart_wait_fifo_mode_enabled(struct tegra_uart_port *tup)
+{
+ unsigned long iir;
+ unsigned int tmout = 100;
+
+ do {
+ iir = tegra_uart_read(tup, UART_IIR);
+ if (iir & TEGRA_UART_FCR_IIR_FIFO_EN)
+ return 0;
+ udelay(1);
+ } while (--tmout);
+
+ return -ETIMEDOUT;
+}
+
static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
{
unsigned long fcr = tup->fcr_shadow;
+ unsigned int lsr, tmout = 10000;
+
+ if (tup->rts_active)
+ set_rts(tup, false);
if (tup->cdata->allow_txfifo_reset_fifo_mode) {
fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
@@ -258,6 +316,8 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
tegra_uart_write(tup, fcr, UART_FCR);
fcr |= UART_FCR_ENABLE_FIFO;
tegra_uart_write(tup, fcr, UART_FCR);
+ if (tup->cdata->fifo_mode_enable_status)
+ tegra_uart_wait_fifo_mode_enabled(tup);
}
/* Dummy read to ensure the write is posted */
@@ -269,6 +329,47 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
* to propagate, otherwise data could be lost.
*/
tegra_uart_wait_cycle_time(tup, 32);
+
+ do {
+ lsr = tegra_uart_read(tup, UART_LSR);
+ if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+ break;
+ udelay(1);
+ } while (--tmout);
+
+ if (tup->rts_active)
+ set_rts(tup, true);
+}
+
+static long tegra_get_tolerance_rate(struct tegra_uart_port *tup,
+ unsigned int baud, long rate)
+{
+ int i;
+
+ for (i = 0; i < tup->n_adjustable_baud_rates; ++i) {
+ if (baud >= tup->baud_tolerance[i].lower_range_baud &&
+ baud <= tup->baud_tolerance[i].upper_range_baud)
+ return (rate + (rate *
+ tup->baud_tolerance[i].tolerance) / 10000);
+ }
+
+ return rate;
+}
+
+static int tegra_check_rate_in_range(struct tegra_uart_port *tup)
+{
+ long diff;
+
+ diff = ((long)(tup->configured_rate - tup->required_rate) * 10000)
+ / tup->required_rate;
+ if (diff < (tup->cdata->error_tolerance_low_range * 100) ||
+ diff > (tup->cdata->error_tolerance_high_range * 100)) {
+ dev_err(tup->uport.dev,
+ "configured baud rate is out of range by %ld", diff);
+ return -EIO;
+ }
+
+ return 0;
}
static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
@@ -276,6 +377,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
unsigned long rate;
unsigned int divisor;
unsigned long lcr;
+ unsigned long flags;
int ret;
if (tup->current_baud == baud)
@@ -283,18 +385,28 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
if (tup->cdata->support_clk_src_div) {
rate = baud * 16;
+ tup->required_rate = rate;
+
+ if (tup->n_adjustable_baud_rates)
+ rate = tegra_get_tolerance_rate(tup, baud, rate);
+
ret = clk_set_rate(tup->uart_clk, rate);
if (ret < 0) {
dev_err(tup->uport.dev,
"clk_set_rate() failed for rate %lu\n", rate);
return ret;
}
+ tup->configured_rate = clk_get_rate(tup->uart_clk);
divisor = 1;
+ ret = tegra_check_rate_in_range(tup);
+ if (ret < 0)
+ return ret;
} else {
rate = clk_get_rate(tup->uart_clk);
divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
}
+ spin_lock_irqsave(&tup->uport.lock, flags);
lcr = tup->lcr_shadow;
lcr |= UART_LCR_DLAB;
tegra_uart_write(tup, lcr, UART_LCR);
@@ -307,6 +419,7 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
+ spin_unlock_irqrestore(&tup->uport.lock, flags);
tup->current_baud = baud;
@@ -336,13 +449,21 @@ static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
tup->uport.icount.frame++;
dev_err(tup->uport.dev, "Got frame errors\n");
} else if (lsr & UART_LSR_BI) {
- dev_err(tup->uport.dev, "Got Break\n");
- tup->uport.icount.brk++;
- /* If FIFO read error without any data, reset Rx FIFO */
+ /*
+ * Break error
+ * If FIFO read error without any data, reset Rx FIFO
+ */
if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
+ if (tup->uport.ignore_status_mask & UART_LSR_BI)
+ return TTY_BREAK;
+ flag = TTY_BREAK;
+ tup->uport.icount.brk++;
+ dev_dbg(tup->uport.dev, "Got Break\n");
}
+ uart_insert_char(&tup->uport, lsr, UART_LSR_OE, 0, flag);
}
+
return flag;
}
@@ -440,12 +561,15 @@ static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
unsigned long count;
struct circ_buf *xmit = &tup->uport.state->xmit;
+ if (!tup->current_baud)
+ return;
+
tail = (unsigned long)&xmit->buf[xmit->tail];
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!count)
return;
- if (count < TEGRA_UART_MIN_DMA)
+ if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA)
tegra_uart_start_pio_tx(tup, count);
else if (BYTES_TO_ALIGN(tail) > 0)
tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
@@ -521,11 +645,17 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
break;
flag = tegra_uart_decode_rx_error(tup, lsr);
+ if (flag != TTY_NORMAL)
+ continue;
+
ch = (unsigned char) tegra_uart_read(tup, UART_RX);
tup->uport.icount.rx++;
if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
tty_insert_flip_char(tty, ch, flag);
+
+ if (tup->uport.ignore_status_mask & UART_LSR_DR)
+ continue;
} while (1);
}
@@ -544,6 +674,10 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
dev_err(tup->uport.dev, "No tty port\n");
return;
}
+
+ if (tup->uport.ignore_status_mask & UART_LSR_DR)
+ return;
+
dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
copied = tty_insert_flip_string(tty,
@@ -668,6 +802,18 @@ static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
}
+static void do_handle_rx_pio(struct tegra_uart_port *tup)
+{
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &tup->uport.state->port;
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+}
+
static irqreturn_t tegra_uart_isr(int irq, void *data)
{
struct tegra_uart_port *tup = data;
@@ -681,7 +827,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
while (1) {
iir = tegra_uart_read(tup, UART_IIR);
if (iir & UART_IIR_NO_INT) {
- if (is_rx_int) {
+ if (!tup->use_rx_pio && is_rx_int) {
tegra_uart_handle_rx_dma(tup);
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
@@ -709,7 +855,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
case 4: /* End of data */
case 6: /* Rx timeout */
case 2: /* Receive */
- if (!is_rx_int) {
+ if (!tup->use_rx_pio && !is_rx_int) {
is_rx_int = true;
/* Disable Rx interrupts */
ier = tup->ier_shadow;
@@ -719,6 +865,8 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
UART_IER_RTOIE | TEGRA_UART_IER_EORD);
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
+ } else {
+ do_handle_rx_pio(tup);
}
break;
@@ -737,6 +885,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct tty_port *port = &tup->uport.state->port;
struct dma_tx_state state;
unsigned long ier;
@@ -754,9 +903,13 @@ static void tegra_uart_stop_rx(struct uart_port *u)
tup->ier_shadow = ier;
tegra_uart_write(tup, ier, UART_IER);
tup->rx_in_progress = 0;
- dmaengine_terminate_all(tup->rx_dma_chan);
- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
- tegra_uart_rx_buffer_push(tup, state.residue);
+ if (tup->rx_dma_chan && !tup->use_rx_pio) {
+ dmaengine_terminate_all(tup->rx_dma_chan);
+ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+ tegra_uart_rx_buffer_push(tup, state.residue);
+ } else {
+ tegra_uart_handle_rx_pio(tup, port);
+ }
}
static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
@@ -804,6 +957,14 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
tup->current_baud = 0;
spin_unlock_irqrestore(&tup->uport.lock, flags);
+ tup->rx_in_progress = 0;
+ tup->tx_in_progress = 0;
+
+ if (!tup->use_rx_pio)
+ tegra_uart_dma_channel_free(tup, true);
+ if (!tup->use_tx_pio)
+ tegra_uart_dma_channel_free(tup, false);
+
clk_disable_unprepare(tup->uart_clk);
}
@@ -846,35 +1007,60 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* programmed in the DMA registers.
*/
tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
- tup->fcr_shadow |= UART_FCR_R_TRIG_01;
+
+ if (tup->use_rx_pio) {
+ tup->fcr_shadow |= UART_FCR_R_TRIG_11;
+ } else {
+ if (tup->cdata->max_dma_burst_bytes == 8)
+ tup->fcr_shadow |= UART_FCR_R_TRIG_10;
+ else
+ tup->fcr_shadow |= UART_FCR_R_TRIG_01;
+ }
+
tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
- /*
- * For all tegra devices (up to t210), there is a hardware issue that
- * requires software to wait for 3 UART clock periods after enabling
- * the TX fifo, otherwise data could be lost.
- */
- tegra_uart_wait_cycle_time(tup, 3);
+ if (tup->cdata->fifo_mode_enable_status) {
+ ret = tegra_uart_wait_fifo_mode_enabled(tup);
+ dev_err(tup->uport.dev, "FIFO mode not enabled\n");
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For all tegra devices (up to t210), there is a hardware
+ * issue that requires software to wait for 3 UART clock
+ * periods after enabling the TX fifo, otherwise data could
+ * be lost.
+ */
+ tegra_uart_wait_cycle_time(tup, 3);
+ }
/*
* Initialize the UART with default configuration
* (115200, N, 8, 1) so that the receive DMA buffer may be
* enqueued
*/
- tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
- tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
- tup->fcr_shadow |= UART_FCR_DMA_SELECT;
- tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
-
- ret = tegra_uart_start_rx_dma(tup);
+ ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
if (ret < 0) {
- dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
+ dev_err(tup->uport.dev, "Failed to set baud rate\n");
return ret;
}
+ if (!tup->use_rx_pio) {
+ tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
+ tup->fcr_shadow |= UART_FCR_DMA_SELECT;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+
+ ret = tegra_uart_start_rx_dma(tup);
+ if (ret < 0) {
+ dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
+ return ret;
+ }
+ } else {
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+ }
tup->rx_in_progress = 1;
/*
@@ -895,7 +1081,12 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
* both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
* then the EORD.
*/
- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
+ if (!tup->use_rx_pio)
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
+ TEGRA_UART_IER_EORD;
+ else
+ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
+
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
return 0;
}
@@ -952,7 +1143,7 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
}
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- dma_sconfig.src_maxburst = 4;
+ dma_sconfig.src_maxburst = tup->cdata->max_dma_burst_bytes;
tup->rx_dma_chan = dma_chan;
tup->rx_dma_buf_virt = dma_buf;
tup->rx_dma_buf_phys = dma_phys;
@@ -990,16 +1181,22 @@ static int tegra_uart_startup(struct uart_port *u)
struct tegra_uart_port *tup = to_tegra_uport(u);
int ret;
- ret = tegra_uart_dma_channel_allocate(tup, false);
- if (ret < 0) {
- dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
- return ret;
+ if (!tup->use_tx_pio) {
+ ret = tegra_uart_dma_channel_allocate(tup, false);
+ if (ret < 0) {
+ dev_err(u->dev, "Tx Dma allocation failed, err = %d\n",
+ ret);
+ return ret;
+ }
}
- ret = tegra_uart_dma_channel_allocate(tup, true);
- if (ret < 0) {
- dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
- goto fail_rx_dma;
+ if (!tup->use_rx_pio) {
+ ret = tegra_uart_dma_channel_allocate(tup, true);
+ if (ret < 0) {
+ dev_err(u->dev, "Rx Dma allocation failed, err = %d\n",
+ ret);
+ goto fail_rx_dma;
+ }
}
ret = tegra_uart_hw_init(tup);
@@ -1017,9 +1214,11 @@ static int tegra_uart_startup(struct uart_port *u)
return 0;
fail_hw_init:
- tegra_uart_dma_channel_free(tup, true);
+ if (!tup->use_rx_pio)
+ tegra_uart_dma_channel_free(tup, true);
fail_rx_dma:
- tegra_uart_dma_channel_free(tup, false);
+ if (!tup->use_tx_pio)
+ tegra_uart_dma_channel_free(tup, false);
return ret;
}
@@ -1041,12 +1240,6 @@ static void tegra_uart_shutdown(struct uart_port *u)
struct tegra_uart_port *tup = to_tegra_uport(u);
tegra_uart_hw_deinit(tup);
-
- tup->rx_in_progress = 0;
- tup->tx_in_progress = 0;
-
- tegra_uart_dma_channel_free(tup, true);
- tegra_uart_dma_channel_free(tup, false);
free_irq(u->irq, tup);
}
@@ -1071,6 +1264,7 @@ static void tegra_uart_set_termios(struct uart_port *u,
struct clk *parent_clk = clk_get_parent(tup->uart_clk);
unsigned long parent_clk_rate = clk_get_rate(parent_clk);
int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
+ int ret;
max_divider *= 16;
spin_lock_irqsave(&u->lock, flags);
@@ -1143,7 +1337,11 @@ static void tegra_uart_set_termios(struct uart_port *u,
parent_clk_rate/max_divider,
parent_clk_rate/16);
spin_unlock_irqrestore(&u->lock, flags);
- tegra_set_baudrate(tup, baud);
+ ret = tegra_set_baudrate(tup, baud);
+ if (ret < 0) {
+ dev_err(tup->uport.dev, "Failed to set baud rate\n");
+ return;
+ }
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
spin_lock_irqsave(&u->lock, flags);
@@ -1172,6 +1370,13 @@ static void tegra_uart_set_termios(struct uart_port *u,
tegra_uart_write(tup, tup->ier_shadow, UART_IER);
tegra_uart_read(tup, UART_IER);
+ tup->uport.ignore_status_mask = 0;
+ /* Ignore all characters if CREAD is not set */
+ if ((termios->c_cflag & CREAD) == 0)
+ tup->uport.ignore_status_mask |= UART_LSR_DR;
+ if (termios->c_iflag & IGNBRK)
+ tup->uport.ignore_status_mask |= UART_LSR_BI;
+
spin_unlock_irqrestore(&u->lock, flags);
}
@@ -1211,6 +1416,11 @@ static int tegra_uart_parse_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
int port;
+ int ret;
+ int index;
+ u32 pval;
+ int count;
+ int n_entries;
port = of_alias_get_id(np, "serial");
if (port < 0) {
@@ -1221,6 +1431,54 @@ static int tegra_uart_parse_dt(struct platform_device *pdev,
tup->enable_modem_interrupt = of_property_read_bool(np,
"nvidia,enable-modem-interrupt");
+
+ index = of_property_match_string(np, "dma-names", "rx");
+ if (index < 0) {
+ tup->use_rx_pio = true;
+ dev_info(&pdev->dev, "RX in PIO mode\n");
+ }
+ index = of_property_match_string(np, "dma-names", "tx");
+ if (index < 0) {
+ tup->use_tx_pio = true;
+ dev_info(&pdev->dev, "TX in PIO mode\n");
+ }
+
+ n_entries = of_property_count_u32_elems(np, "nvidia,adjust-baud-rates");
+ if (n_entries > 0) {
+ tup->n_adjustable_baud_rates = n_entries / 3;
+ tup->baud_tolerance =
+ devm_kzalloc(&pdev->dev, (tup->n_adjustable_baud_rates) *
+ sizeof(*tup->baud_tolerance), GFP_KERNEL);
+ if (!tup->baud_tolerance)
+ return -ENOMEM;
+ for (count = 0, index = 0; count < n_entries; count += 3,
+ index++) {
+ ret =
+ of_property_read_u32_index(np,
+ "nvidia,adjust-baud-rates",
+ count, &pval);
+ if (!ret)
+ tup->baud_tolerance[index].lower_range_baud =
+ pval;
+ ret =
+ of_property_read_u32_index(np,
+ "nvidia,adjust-baud-rates",
+ count + 1, &pval);
+ if (!ret)
+ tup->baud_tolerance[index].upper_range_baud =
+ pval;
+ ret =
+ of_property_read_u32_index(np,
+ "nvidia,adjust-baud-rates",
+ count + 2, &pval);
+ if (!ret)
+ tup->baud_tolerance[index].tolerance =
+ (s32)pval;
+ }
+ } else {
+ tup->n_adjustable_baud_rates = 0;
+ }
+
return 0;
}
@@ -1228,12 +1486,44 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = {
.tx_fifo_full_status = false,
.allow_txfifo_reset_fifo_mode = true,
.support_clk_src_div = false,
+ .fifo_mode_enable_status = false,
+ .uart_max_port = 5,
+ .max_dma_burst_bytes = 4,
+ .error_tolerance_low_range = 0,
+ .error_tolerance_high_range = 4,
};
static struct tegra_uart_chip_data tegra30_uart_chip_data = {
.tx_fifo_full_status = true,
.allow_txfifo_reset_fifo_mode = false,
.support_clk_src_div = true,
+ .fifo_mode_enable_status = false,
+ .uart_max_port = 5,
+ .max_dma_burst_bytes = 4,
+ .error_tolerance_low_range = 0,
+ .error_tolerance_high_range = 4,
+};
+
+static struct tegra_uart_chip_data tegra186_uart_chip_data = {
+ .tx_fifo_full_status = true,
+ .allow_txfifo_reset_fifo_mode = false,
+ .support_clk_src_div = true,
+ .fifo_mode_enable_status = true,
+ .uart_max_port = 8,
+ .max_dma_burst_bytes = 8,
+ .error_tolerance_low_range = 0,
+ .error_tolerance_high_range = 4,
+};
+
+static struct tegra_uart_chip_data tegra194_uart_chip_data = {
+ .tx_fifo_full_status = true,
+ .allow_txfifo_reset_fifo_mode = false,
+ .support_clk_src_div = true,
+ .fifo_mode_enable_status = true,
+ .uart_max_port = 8,
+ .max_dma_burst_bytes = 8,
+ .error_tolerance_low_range = -2,
+ .error_tolerance_high_range = 2,
};
static const struct of_device_id tegra_uart_of_match[] = {
@@ -1244,6 +1534,12 @@ static const struct of_device_id tegra_uart_of_match[] = {
.compatible = "nvidia,tegra20-hsuart",
.data = &tegra20_uart_chip_data,
}, {
+ .compatible = "nvidia,tegra186-hsuart",
+ .data = &tegra186_uart_chip_data,
+ }, {
+ .compatible = "nvidia,tegra194-hsuart",
+ .data = &tegra194_uart_chip_data,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
@@ -1307,10 +1603,8 @@ static int tegra_uart_probe(struct platform_device *pdev)
u->iotype = UPIO_MEM32;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Couldn't get IRQ\n");
+ if (ret < 0)
return ret;
- }
u->irq = ret;
u->regshift = 2;
ret = uart_add_one_port(&tegra_uart_driver, u);
@@ -1365,11 +1659,22 @@ static struct platform_driver tegra_uart_platform_driver = {
static int __init tegra_uart_init(void)
{
int ret;
+ struct device_node *node;
+ const struct of_device_id *match = NULL;
+ const struct tegra_uart_chip_data *cdata = NULL;
+
+ node = of_find_matching_node(NULL, tegra_uart_of_match);
+ if (node)
+ match = of_match_node(tegra_uart_of_match, node);
+ if (match)
+ cdata = match->data;
+ if (cdata)
+ tegra_uart_driver.nr = cdata->uart_max_port;
ret = uart_register_driver(&tegra_uart_driver);
if (ret < 0) {
pr_err("Could not register %s driver\n",
- tegra_uart_driver.driver_name);
+ tegra_uart_driver.driver_name);
return ret;
}
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
deleted file mode 100644
index b461d791188c..000000000000
--- a/drivers/tty/serial/serial_ks8695.c
+++ /dev/null
@@ -1,698 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for KS8695 serial ports
- *
- * Based on drivers/serial/serial_amba.c, by Kam Lee.
- *
- * Copyright 2002-2005 Micrel Inc.
- */
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-#include <mach/regs-uart.h>
-#include <mach/regs-irq.h>
-
-#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/serial_core.h>
-
-
-#define SERIAL_KS8695_MAJOR 204
-#define SERIAL_KS8695_MINOR 16
-#define SERIAL_KS8695_DEVNAME "ttyAM"
-
-#define SERIAL_KS8695_NR 1
-
-/*
- * Access macros for the KS8695 UART
- */
-#define UART_GET_CHAR(p) (__raw_readl((p)->membase + KS8695_URRB) & 0xFF)
-#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + KS8695_URTH)
-#define UART_GET_FCR(p) __raw_readl((p)->membase + KS8695_URFC)
-#define UART_PUT_FCR(p, c) __raw_writel((c), (p)->membase + KS8695_URFC)
-#define UART_GET_MSR(p) __raw_readl((p)->membase + KS8695_URMS)
-#define UART_GET_LSR(p) __raw_readl((p)->membase + KS8695_URLS)
-#define UART_GET_LCR(p) __raw_readl((p)->membase + KS8695_URLC)
-#define UART_PUT_LCR(p, c) __raw_writel((c), (p)->membase + KS8695_URLC)
-#define UART_GET_MCR(p) __raw_readl((p)->membase + KS8695_URMC)
-#define UART_PUT_MCR(p, c) __raw_writel((c), (p)->membase + KS8695_URMC)
-#define UART_GET_BRDR(p) __raw_readl((p)->membase + KS8695_URBD)
-#define UART_PUT_BRDR(p, c) __raw_writel((c), (p)->membase + KS8695_URBD)
-
-#define KS8695_CLR_TX_INT() __raw_writel(1 << KS8695_IRQ_UART_TX, KS8695_IRQ_VA + KS8695_INTST)
-
-#define UART_DUMMY_LSR_RX 0x100
-#define UART_PORT_SIZE (KS8695_USR - KS8695_URRB + 4)
-
-static inline int tx_enabled(struct uart_port *port)
-{
- return port->unused[0] & 1;
-}
-
-static inline int rx_enabled(struct uart_port *port)
-{
- return port->unused[0] & 2;
-}
-
-static inline int ms_enabled(struct uart_port *port)
-{
- return port->unused[0] & 4;
-}
-
-static inline void ms_enable(struct uart_port *port, int enabled)
-{
- if(enabled)
- port->unused[0] |= 4;
- else
- port->unused[0] &= ~4;
-}
-
-static inline void rx_enable(struct uart_port *port, int enabled)
-{
- if(enabled)
- port->unused[0] |= 2;
- else
- port->unused[0] &= ~2;
-}
-
-static inline void tx_enable(struct uart_port *port, int enabled)
-{
- if(enabled)
- port->unused[0] |= 1;
- else
- port->unused[0] &= ~1;
-}
-
-
-#ifdef SUPPORT_SYSRQ
-static struct console ks8695_console;
-#endif
-
-static void ks8695uart_stop_tx(struct uart_port *port)
-{
- if (tx_enabled(port)) {
- /* use disable_irq_nosync() and not disable_irq() to avoid self
- * imposed deadlock by not waiting for irq handler to end,
- * since this ks8695uart_stop_tx() is called from interrupt context.
- */
- disable_irq_nosync(KS8695_IRQ_UART_TX);
- tx_enable(port, 0);
- }
-}
-
-static void ks8695uart_start_tx(struct uart_port *port)
-{
- if (!tx_enabled(port)) {
- enable_irq(KS8695_IRQ_UART_TX);
- tx_enable(port, 1);
- }
-}
-
-static void ks8695uart_stop_rx(struct uart_port *port)
-{
- if (rx_enabled(port)) {
- disable_irq(KS8695_IRQ_UART_RX);
- rx_enable(port, 0);
- }
-}
-
-static void ks8695uart_enable_ms(struct uart_port *port)
-{
- if (!ms_enabled(port)) {
- enable_irq(KS8695_IRQ_UART_MODEM_STATUS);
- ms_enable(port,1);
- }
-}
-
-static void ks8695uart_disable_ms(struct uart_port *port)
-{
- if (ms_enabled(port)) {
- disable_irq(KS8695_IRQ_UART_MODEM_STATUS);
- ms_enable(port,0);
- }
-}
-
-static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- unsigned int status, ch, lsr, flg, max_count = 256;
-
- status = UART_GET_LSR(port); /* clears pending LSR interrupts */
- while ((status & URLS_URDR) && max_count--) {
- ch = UART_GET_CHAR(port);
- flg = TTY_NORMAL;
-
- port->icount.rx++;
-
- /*
- * Note that the error handling code is
- * out of the main execution path
- */
- lsr = UART_GET_LSR(port) | UART_DUMMY_LSR_RX;
- if (unlikely(lsr & (URLS_URBI | URLS_URPE | URLS_URFE | URLS_URROE))) {
- if (lsr & URLS_URBI) {
- lsr &= ~(URLS_URFE | URLS_URPE);
- port->icount.brk++;
- if (uart_handle_break(port))
- goto ignore_char;
- }
- if (lsr & URLS_URPE)
- port->icount.parity++;
- if (lsr & URLS_URFE)
- port->icount.frame++;
- if (lsr & URLS_URROE)
- port->icount.overrun++;
-
- lsr &= port->read_status_mask;
-
- if (lsr & URLS_URBI)
- flg = TTY_BREAK;
- else if (lsr & URLS_URPE)
- flg = TTY_PARITY;
- else if (lsr & URLS_URFE)
- flg = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, URLS_URROE, ch, flg);
-
-ignore_char:
- status = UART_GET_LSR(port);
- }
- tty_flip_buffer_push(&port->state->port);
-
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- struct circ_buf *xmit = &port->state->xmit;
- unsigned int count;
-
- if (port->x_char) {
- KS8695_CLR_TX_INT();
- UART_PUT_CHAR(port, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return IRQ_HANDLED;
- }
-
- if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
- ks8695uart_stop_tx(port);
- return IRQ_HANDLED;
- }
-
- count = 16; /* fifo size */
- while (!uart_circ_empty(xmit) && (count-- > 0)) {
- KS8695_CLR_TX_INT();
- UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
-
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- ks8695uart_stop_tx(port);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- unsigned int status;
-
- /*
- * clear modem interrupt by reading MSR
- */
- status = UART_GET_MSR(port);
-
- if (status & URMS_URDDCD)
- uart_handle_dcd_change(port, status & URMS_URDDCD);
-
- if (status & URMS_URDDST)
- port->icount.dsr++;
-
- if (status & URMS_URDCTS)
- uart_handle_cts_change(port, status & URMS_URDCTS);
-
- if (status & URMS_URTERI)
- port->icount.rng++;
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-
- return IRQ_HANDLED;
-}
-
-static unsigned int ks8695uart_tx_empty(struct uart_port *port)
-{
- return (UART_GET_LSR(port) & URLS_URTE) ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int ks8695uart_get_mctrl(struct uart_port *port)
-{
- unsigned int result = 0;
- unsigned int status;
-
- status = UART_GET_MSR(port);
- if (status & URMS_URDCD)
- result |= TIOCM_CAR;
- if (status & URMS_URDSR)
- result |= TIOCM_DSR;
- if (status & URMS_URCTS)
- result |= TIOCM_CTS;
- if (status & URMS_URRI)
- result |= TIOCM_RI;
-
- return result;
-}
-
-static void ks8695uart_set_mctrl(struct uart_port *port, u_int mctrl)
-{
- unsigned int mcr;
-
- mcr = UART_GET_MCR(port);
- if (mctrl & TIOCM_RTS)
- mcr |= URMC_URRTS;
- else
- mcr &= ~URMC_URRTS;
-
- if (mctrl & TIOCM_DTR)
- mcr |= URMC_URDTR;
- else
- mcr &= ~URMC_URDTR;
-
- UART_PUT_MCR(port, mcr);
-}
-
-static void ks8695uart_break_ctl(struct uart_port *port, int break_state)
-{
- unsigned int lcr;
-
- lcr = UART_GET_LCR(port);
-
- if (break_state == -1)
- lcr |= URLC_URSBC;
- else
- lcr &= ~URLC_URSBC;
-
- UART_PUT_LCR(port, lcr);
-}
-
-static int ks8695uart_startup(struct uart_port *port)
-{
- int retval;
-
- irq_modify_status(KS8695_IRQ_UART_TX, IRQ_NOREQUEST, IRQ_NOAUTOEN);
- tx_enable(port, 0);
- rx_enable(port, 1);
- ms_enable(port, 1);
-
- /*
- * Allocate the IRQ
- */
- retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, 0, "UART TX", port);
- if (retval)
- goto err_tx;
-
- retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, 0, "UART RX", port);
- if (retval)
- goto err_rx;
-
- retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, 0, "UART LineStatus", port);
- if (retval)
- goto err_ls;
-
- retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, 0, "UART ModemStatus", port);
- if (retval)
- goto err_ms;
-
- return 0;
-
-err_ms:
- free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
-err_ls:
- free_irq(KS8695_IRQ_UART_RX, port);
-err_rx:
- free_irq(KS8695_IRQ_UART_TX, port);
-err_tx:
- return retval;
-}
-
-static void ks8695uart_shutdown(struct uart_port *port)
-{
- /*
- * Free the interrupt
- */
- free_irq(KS8695_IRQ_UART_RX, port);
- free_irq(KS8695_IRQ_UART_TX, port);
- free_irq(KS8695_IRQ_UART_MODEM_STATUS, port);
- free_irq(KS8695_IRQ_UART_LINE_STATUS, port);
-
- /* disable break condition and fifos */
- UART_PUT_LCR(port, UART_GET_LCR(port) & ~URLC_URSBC);
- UART_PUT_FCR(port, UART_GET_FCR(port) & ~URFC_URFE);
-}
-
-static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old)
-{
- unsigned int lcr, fcr = 0;
- unsigned long flags;
- unsigned int baud, quot;
-
- /*
- * Ask the core to calculate the divisor for us.
- */
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- lcr = URCL_5;
- break;
- case CS6:
- lcr = URCL_6;
- break;
- case CS7:
- lcr = URCL_7;
- break;
- default:
- lcr = URCL_8;
- break;
- }
-
- /* stop bits */
- if (termios->c_cflag & CSTOPB)
- lcr |= URLC_URSB;
-
- /* parity */
- if (termios->c_cflag & PARENB) {
- if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */
- if (termios->c_cflag & PARODD)
- lcr |= URPE_MARK;
- else
- lcr |= URPE_SPACE;
- }
- else if (termios->c_cflag & PARODD)
- lcr |= URPE_ODD;
- else
- lcr |= URPE_EVEN;
- }
-
- if (port->fifosize > 1)
- fcr = URFC_URFRT_8 | URFC_URTFR | URFC_URRFR | URFC_URFE;
-
- spin_lock_irqsave(&port->lock, flags);
-
- /*
- * Update the per-port timeout.
- */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- port->read_status_mask = URLS_URROE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= (URLS_URFE | URLS_URPE);
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= URLS_URBI;
-
- /*
- * Characters to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= (URLS_URFE | URLS_URPE);
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= URLS_URBI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= URLS_URROE;
- }
-
- /*
- * Ignore all characters if CREAD is not set.
- */
- if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= UART_DUMMY_LSR_RX;
-
- /* first, disable everything */
- if (UART_ENABLE_MS(port, termios->c_cflag))
- ks8695uart_enable_ms(port);
- else
- ks8695uart_disable_ms(port);
-
- /* Set baud rate */
- UART_PUT_BRDR(port, quot);
-
- UART_PUT_LCR(port, lcr);
- UART_PUT_FCR(port, fcr);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static const char *ks8695uart_type(struct uart_port *port)
-{
- return port->type == PORT_KS8695 ? "KS8695" : NULL;
-}
-
-/*
- * Release the memory region(s) being used by 'port'
- */
-static void ks8695uart_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, UART_PORT_SIZE);
-}
-
-/*
- * Request the memory region(s) being used by 'port'
- */
-static int ks8695uart_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, UART_PORT_SIZE,
- "serial_ks8695") != NULL ? 0 : -EBUSY;
-}
-
-/*
- * Configure/autoconfigure the port.
- */
-static void ks8695uart_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE) {
- port->type = PORT_KS8695;
- ks8695uart_request_port(port);
- }
-}
-
-/*
- * verify the new serial_struct (for TIOCSSERIAL).
- */
-static int ks8695uart_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- int ret = 0;
-
- if (ser->type != PORT_UNKNOWN && ser->type != PORT_KS8695)
- ret = -EINVAL;
- if (ser->irq != port->irq)
- ret = -EINVAL;
- if (ser->baud_base < 9600)
- ret = -EINVAL;
- return ret;
-}
-
-static struct uart_ops ks8695uart_pops = {
- .tx_empty = ks8695uart_tx_empty,
- .set_mctrl = ks8695uart_set_mctrl,
- .get_mctrl = ks8695uart_get_mctrl,
- .stop_tx = ks8695uart_stop_tx,
- .start_tx = ks8695uart_start_tx,
- .stop_rx = ks8695uart_stop_rx,
- .enable_ms = ks8695uart_enable_ms,
- .break_ctl = ks8695uart_break_ctl,
- .startup = ks8695uart_startup,
- .shutdown = ks8695uart_shutdown,
- .set_termios = ks8695uart_set_termios,
- .type = ks8695uart_type,
- .release_port = ks8695uart_release_port,
- .request_port = ks8695uart_request_port,
- .config_port = ks8695uart_config_port,
- .verify_port = ks8695uart_verify_port,
-};
-
-static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
- {
- .membase = KS8695_UART_VA,
- .mapbase = KS8695_UART_PA,
- .iotype = SERIAL_IO_MEM,
- .irq = KS8695_IRQ_UART_TX,
- .uartclk = KS8695_CLOCK_RATE * 16,
- .fifosize = 16,
- .ops = &ks8695uart_pops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- }
-};
-
-#ifdef CONFIG_SERIAL_KS8695_CONSOLE
-static void ks8695_console_putchar(struct uart_port *port, int ch)
-{
- while (!(UART_GET_LSR(port) & URLS_URTHRE))
- barrier();
-
- UART_PUT_CHAR(port, ch);
-}
-
-static void ks8695_console_write(struct console *co, const char *s, u_int count)
-{
- struct uart_port *port = ks8695uart_ports + co->index;
-
- uart_console_write(port, s, count, ks8695_console_putchar);
-}
-
-static void __init ks8695_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits)
-{
- unsigned int lcr;
-
- lcr = UART_GET_LCR(port);
-
- switch (lcr & URLC_PARITY) {
- case URPE_ODD:
- *parity = 'o';
- break;
- case URPE_EVEN:
- *parity = 'e';
- break;
- default:
- *parity = 'n';
- }
-
- switch (lcr & URLC_URCL) {
- case URCL_5:
- *bits = 5;
- break;
- case URCL_6:
- *bits = 6;
- break;
- case URCL_7:
- *bits = 7;
- break;
- default:
- *bits = 8;
- }
-
- *baud = port->uartclk / (UART_GET_BRDR(port) & 0x0FFF);
- *baud /= 16;
- *baud &= 0xFFFFFFF0;
-}
-
-static int __init ks8695_console_setup(struct console *co, char *options)
-{
- struct uart_port *port;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- /*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- port = uart_get_console(ks8695uart_ports, SERIAL_KS8695_NR, co);
-
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- ks8695_console_get_options(port, &baud, &parity, &bits);
-
- return uart_set_options(port, co, baud, parity, bits, flow);
-}
-
-static struct uart_driver ks8695_reg;
-
-static struct console ks8695_console = {
- .name = SERIAL_KS8695_DEVNAME,
- .write = ks8695_console_write,
- .device = uart_console_device,
- .setup = ks8695_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &ks8695_reg,
-};
-
-static int __init ks8695_console_init(void)
-{
- add_preferred_console(SERIAL_KS8695_DEVNAME, 0, NULL);
- register_console(&ks8695_console);
- return 0;
-}
-
-console_initcall(ks8695_console_init);
-
-#define KS8695_CONSOLE &ks8695_console
-#else
-#define KS8695_CONSOLE NULL
-#endif
-
-static struct uart_driver ks8695_reg = {
- .owner = THIS_MODULE,
- .driver_name = "serial_ks8695",
- .dev_name = SERIAL_KS8695_DEVNAME,
- .major = SERIAL_KS8695_MAJOR,
- .minor = SERIAL_KS8695_MINOR,
- .nr = SERIAL_KS8695_NR,
- .cons = KS8695_CONSOLE,
-};
-
-static int __init ks8695uart_init(void)
-{
- int i, ret;
-
- printk(KERN_INFO "Serial: Micrel KS8695 UART driver\n");
-
- ret = uart_register_driver(&ks8695_reg);
- if (ret)
- return ret;
-
- for (i = 0; i < SERIAL_KS8695_NR; i++)
- uart_add_one_port(&ks8695_reg, &ks8695uart_ports[0]);
-
- return 0;
-}
-
-static void __exit ks8695uart_exit(void)
-{
- int i;
-
- for (i = 0; i < SERIAL_KS8695_NR; i++)
- uart_remove_one_port(&ks8695_reg, &ks8695uart_ports[0]);
- uart_unregister_driver(&ks8695_reg);
-}
-
-module_init(ks8695uart_init);
-module_exit(ks8695uart_exit);
-
-MODULE_DESCRIPTION("KS8695 serial port driver");
-MODULE_AUTHOR("Micrel Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 2b400189be91..d9074303c88e 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -27,16 +27,21 @@ struct mctrl_gpios {
static const struct {
const char *name;
unsigned int mctrl;
- bool dir_out;
+ enum gpiod_flags flags;
} mctrl_gpios_desc[UART_GPIO_MAX] = {
- { "cts", TIOCM_CTS, false, },
- { "dsr", TIOCM_DSR, false, },
- { "dcd", TIOCM_CD, false, },
- { "rng", TIOCM_RNG, false, },
- { "rts", TIOCM_RTS, true, },
- { "dtr", TIOCM_DTR, true, },
+ { "cts", TIOCM_CTS, GPIOD_IN, },
+ { "dsr", TIOCM_DSR, GPIOD_IN, },
+ { "dcd", TIOCM_CD, GPIOD_IN, },
+ { "rng", TIOCM_RNG, GPIOD_IN, },
+ { "rts", TIOCM_RTS, GPIOD_OUT_LOW, },
+ { "dtr", TIOCM_DTR, GPIOD_OUT_LOW, },
};
+static bool mctrl_gpio_flags_is_dir_out(unsigned int idx)
+{
+ return mctrl_gpios_desc[idx].flags & GPIOD_FLAGS_BIT_DIR_OUT;
+}
+
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
@@ -48,7 +53,7 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
return;
for (i = 0; i < UART_GPIO_MAX; i++)
- if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
+ if (gpios->gpio[i] && mctrl_gpio_flags_is_dir_out(i)) {
desc_array[count] = gpios->gpio[i];
__assign_bit(count, values,
mctrl & mctrl_gpios_desc[i].mctrl);
@@ -73,7 +78,7 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
return *mctrl;
for (i = 0; i < UART_GPIO_MAX; i++) {
- if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) {
+ if (gpios->gpio[i] && !mctrl_gpio_flags_is_dir_out(i)) {
if (gpiod_get_value(gpios->gpio[i]))
*mctrl |= mctrl_gpios_desc[i].mctrl;
else
@@ -94,7 +99,7 @@ mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
return *mctrl;
for (i = 0; i < UART_GPIO_MAX; i++) {
- if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
+ if (gpios->gpio[i] && mctrl_gpio_flags_is_dir_out(i)) {
if (gpiod_get_value(gpios->gpio[i]))
*mctrl |= mctrl_gpios_desc[i].mctrl;
else
@@ -116,7 +121,6 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
return ERR_PTR(-ENOMEM);
for (i = 0; i < UART_GPIO_MAX; i++) {
- enum gpiod_flags flags;
char *gpio_str;
bool present;
@@ -131,15 +135,11 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
if (!present)
continue;
- if (mctrl_gpios_desc[i].dir_out)
- flags = GPIOD_OUT_LOW;
- else
- flags = GPIOD_IN;
-
gpios->gpio[i] =
devm_gpiod_get_index_optional(dev,
mctrl_gpios_desc[i].name,
- idx, flags);
+ idx,
+ mctrl_gpios_desc[i].flags);
if (IS_ERR(gpios->gpio[i]))
return ERR_CAST(gpios->gpio[i]);
@@ -200,7 +200,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
for (i = 0; i < UART_GPIO_MAX; ++i) {
int ret;
- if (!gpios->gpio[i] || mctrl_gpios_desc[i].dir_out)
+ if (!gpios->gpio[i] || mctrl_gpio_flags_is_dir_out(i))
continue;
ret = gpiod_to_irq(gpios->gpio[i]);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index b7d3cca48ede..1b2ff503b2c2 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -114,19 +114,19 @@ static inline
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline
struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index d18c680aa64b..4e754a4850e6 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1092,9 +1092,8 @@ static void rx_fifo_timer_fn(struct timer_list *t)
scif_set_rtrg(port, 1);
}
-static ssize_t rx_trigger_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t rx_fifo_trigger_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
@@ -1102,10 +1101,9 @@ static ssize_t rx_trigger_show(struct device *dev,
return sprintf(buf, "%d\n", sci->rx_trigger);
}
-static ssize_t rx_trigger_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t rx_fifo_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
@@ -1123,7 +1121,7 @@ static ssize_t rx_trigger_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(rx_fifo_trigger, 0644, rx_trigger_show, rx_trigger_store);
+static DEVICE_ATTR_RW(rx_fifo_trigger);
static ssize_t rx_fifo_timeout_show(struct device *dev,
struct device_attribute *attr,
@@ -2101,12 +2099,12 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
if (s->autorts) {
if (sci_get_cts(port))
mctrl |= TIOCM_CTS;
- } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
+ } else if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS)) {
mctrl |= TIOCM_CTS;
}
- if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
+ if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR))
mctrl |= TIOCM_DSR;
- if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
+ if (!mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD))
mctrl |= TIOCM_CAR;
return mctrl;
@@ -3152,14 +3150,10 @@ static int sci_remove(struct platform_device *dev)
sci_cleanup_single(port);
- if (port->port.fifosize > 1) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
- }
- if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_timeout.attr);
- }
+ if (port->port.fifosize > 1)
+ device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
+ device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
return 0;
}
@@ -3287,14 +3281,12 @@ static int sci_probe_single(struct platform_device *dev,
return ret;
sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
- if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
+ if (IS_ERR(sciport->gpios))
return PTR_ERR(sciport->gpios);
if (sciport->has_rtscts) {
- if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
- UART_GPIO_CTS)) ||
- !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
- UART_GPIO_RTS))) {
+ if (mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_CTS) ||
+ mctrl_gpio_to_gpiod(sciport->gpios, UART_GPIO_RTS)) {
dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
return -EINVAL;
}
@@ -3347,19 +3339,17 @@ static int sci_probe(struct platform_device *dev)
return ret;
if (sp->port.fifosize > 1) {
- ret = sysfs_create_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
+ ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_trigger);
if (ret)
return ret;
}
if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
sp->port.type == PORT_HSCIF) {
- ret = sysfs_create_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_timeout.attr);
+ ret = device_create_file(&dev->dev, &dev_attr_rx_fifo_timeout);
if (ret) {
if (sp->port.fifosize > 1) {
- sysfs_remove_file(&dev->dev.kobj,
- &dev_attr_rx_fifo_trigger.attr);
+ device_remove_file(&dev->dev,
+ &dev_attr_rx_fifo_trigger);
}
return ret;
}
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index be4687814353..d5f81b98e4d7 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -896,10 +896,8 @@ static int sifive_serial_probe(struct platform_device *pdev)
int irq, id, r;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "could not acquire interrupt\n");
+ if (irq < 0)
return -EPROBE_DEFER;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, mem);
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
deleted file mode 100644
index 283493358a62..000000000000
--- a/drivers/tty/serial/sn_console.c
+++ /dev/null
@@ -1,1036 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * C-Brick Serial Port (and console) driver for SGI Altix machines.
- *
- * This driver is NOT suitable for talking to the l1-controller for
- * anything other than 'console activities' --- please use the l1
- * driver for that.
- *
- *
- * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
- *
- * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/sysrq.h>
-#include <linux/circ_buf.h>
-#include <linux/serial_reg.h>
-#include <linux/delay.h> /* for mdelay */
-#include <linux/miscdevice.h>
-#include <linux/serial_core.h>
-
-#include <asm/io.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn_sal.h>
-
-/* number of characters we can transmit to the SAL console at a time */
-#define SN_SAL_MAX_CHARS 120
-
-/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
- * avoid losing chars, (always has to be a power of 2) */
-#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
-
-#define SN_SAL_UART_FIFO_DEPTH 16
-#define SN_SAL_UART_FIFO_SPEED_CPS (9600/10)
-
-/* sn_transmit_chars() calling args */
-#define TRANSMIT_BUFFERED 0
-#define TRANSMIT_RAW 1
-
-/* To use dynamic numbers only and not use the assigned major and minor,
- * define the following.. */
- /* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */
-#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
-
-/* Device name we're using */
-#define DEVICE_NAME "ttySG"
-#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
-/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
-#define DEVICE_MAJOR 204
-#define DEVICE_MINOR 40
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static char sysrq_serial_str[] = "\eSYS";
-static char *sysrq_serial_ptr = sysrq_serial_str;
-static unsigned long sysrq_requested;
-#endif /* CONFIG_MAGIC_SYSRQ */
-
-/*
- * Port definition - this kinda drives it all
- */
-struct sn_cons_port {
- struct timer_list sc_timer;
- struct uart_port sc_port;
- struct sn_sal_ops {
- int (*sal_puts_raw) (const char *s, int len);
- int (*sal_puts) (const char *s, int len);
- int (*sal_getc) (void);
- int (*sal_input_pending) (void);
- void (*sal_wakeup_transmit) (struct sn_cons_port *, int);
- } *sc_ops;
- unsigned long sc_interrupt_timeout;
- int sc_is_asynch;
-};
-
-static struct sn_cons_port sal_console_port;
-static int sn_process_input;
-
-/* Only used if USE_DYNAMIC_MINOR is set to 1 */
-static struct miscdevice misc; /* used with misc_register for dynamic */
-
-extern void early_sn_setup(void);
-
-#undef DEBUG
-#ifdef DEBUG
-static int sn_debug_printf(const char *fmt, ...);
-#define DPRINTF(x...) sn_debug_printf(x)
-#else
-#define DPRINTF(x...) do { } while (0)
-#endif
-
-/* Prototypes */
-static int snt_hw_puts_raw(const char *, int);
-static int snt_hw_puts_buffered(const char *, int);
-static int snt_poll_getc(void);
-static int snt_poll_input_pending(void);
-static int snt_intr_getc(void);
-static int snt_intr_input_pending(void);
-static void sn_transmit_chars(struct sn_cons_port *, int);
-
-/* A table for polling:
- */
-static struct sn_sal_ops poll_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_raw,
- .sal_getc = snt_poll_getc,
- .sal_input_pending = snt_poll_input_pending
-};
-
-/* A table for interrupts enabled */
-static struct sn_sal_ops intr_ops = {
- .sal_puts_raw = snt_hw_puts_raw,
- .sal_puts = snt_hw_puts_buffered,
- .sal_getc = snt_intr_getc,
- .sal_input_pending = snt_intr_input_pending,
- .sal_wakeup_transmit = sn_transmit_chars
-};
-
-/* the console does output in two distinctly different ways:
- * synchronous (raw) and asynchronous (buffered). initially, early_printk
- * does synchronous output. any data written goes directly to the SAL
- * to be output (incidentally, it is internally buffered by the SAL)
- * after interrupts and timers are initialized and available for use,
- * the console init code switches to asynchronous output. this is
- * also the earliest opportunity to begin polling for console input.
- * after console initialization, console output and tty (serial port)
- * output is buffered and sent to the SAL asynchronously (either by
- * timer callback or by UART interrupt) */
-
-/* routines for running the console in polling mode */
-
-/**
- * snt_poll_getc - Get a character from the console in polling mode
- *
- */
-static int snt_poll_getc(void)
-{
- int ch;
-
- ia64_sn_console_getc(&ch);
- return ch;
-}
-
-/**
- * snt_poll_input_pending - Check if any input is waiting - polling mode.
- *
- */
-static int snt_poll_input_pending(void)
-{
- int status, input;
-
- status = ia64_sn_console_check(&input);
- return !status && input;
-}
-
-/* routines for an interrupt driven console (normal) */
-
-/**
- * snt_intr_getc - Get a character from the console, interrupt mode
- *
- */
-static int snt_intr_getc(void)
-{
- return ia64_sn_console_readc();
-}
-
-/**
- * snt_intr_input_pending - Check if input is pending, interrupt mode
- *
- */
-static int snt_intr_input_pending(void)
-{
- return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
-}
-
-/* these functions are polled and interrupt */
-
-/**
- * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int snt_hw_puts_raw(const char *s, int len)
-{
- /* this will call the PROM and not return until this is done */
- return ia64_sn_console_putb(s, len);
-}
-
-/**
- * snt_hw_puts_buffered - Send string to console, polled or interrupt mode
- * @s: String
- * @len: Length
- *
- */
-static int snt_hw_puts_buffered(const char *s, int len)
-{
- /* queue data to the PROM */
- return ia64_sn_console_xmit_chars((char *)s, len);
-}
-
-/* uart interface structs
- * These functions are associated with the uart_port that the serial core
- * infrastructure calls.
- *
- * Note: Due to how the console works, many routines are no-ops.
- */
-
-/**
- * snp_type - What type of console are we?
- * @port: Port to operate with (we ignore since we only have one port)
- *
- */
-static const char *snp_type(struct uart_port *port)
-{
- return ("SGI SN L1");
-}
-
-/**
- * snp_tx_empty - Is the transmitter empty? We pretend we're always empty
- * @port: Port to operate on (we ignore since we only have one port)
- *
- */
-static unsigned int snp_tx_empty(struct uart_port *port)
-{
- return 1;
-}
-
-/**
- * snp_stop_tx - stop the transmitter - no-op for us
- * @port: Port to operat eon - we ignore - no-op function
- *
- */
-static void snp_stop_tx(struct uart_port *port)
-{
-}
-
-/**
- * snp_release_port - Free i/o and resources for port - no-op for us
- * @port: Port to operate on - we ignore - no-op function
- *
- */
-static void snp_release_port(struct uart_port *port)
-{
-}
-
-/**
- * snp_shutdown - shut down the port - free irq and disable - no-op for us
- * @port: Port to shut down - we ignore
- *
- */
-static void snp_shutdown(struct uart_port *port)
-{
-}
-
-/**
- * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console
- * @port: Port to operate on - we ignore
- * @mctrl: Lines to set/unset - we ignore
- *
- */
-static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-}
-
-/**
- * snp_get_mctrl - get contorl line info, we just return a static value
- * @port: port to operate on - we only have one port so we ignore this
- *
- */
-static unsigned int snp_get_mctrl(struct uart_port *port)
-{
- return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
-}
-
-/**
- * snp_stop_rx - Stop the receiver - we ignor ethis
- * @port: Port to operate on - we ignore
- *
- */
-static void snp_stop_rx(struct uart_port *port)
-{
-}
-
-/**
- * snp_start_tx - Start transmitter
- * @port: Port to operate on
- *
- */
-static void snp_start_tx(struct uart_port *port)
-{
- if (sal_console_port.sc_ops->sal_wakeup_transmit)
- sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port,
- TRANSMIT_BUFFERED);
-
-}
-
-/**
- * snp_break_ctl - handle breaks - ignored by us
- * @port: Port to operate on
- * @break_state: Break state
- *
- */
-static void snp_break_ctl(struct uart_port *port, int break_state)
-{
-}
-
-/**
- * snp_startup - Start up the serial port - always return 0 (We're always on)
- * @port: Port to operate on
- *
- */
-static int snp_startup(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_set_termios - set termios stuff - we ignore these
- * @port: port to operate on
- * @termios: New settings
- * @termios: Old
- *
- */
-static void
-snp_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
-}
-
-/**
- * snp_request_port - allocate resources for port - ignored by us
- * @port: port to operate on
- *
- */
-static int snp_request_port(struct uart_port *port)
-{
- return 0;
-}
-
-/**
- * snp_config_port - allocate resources, set up - we ignore, we're always on
- * @port: Port to operate on
- * @flags: flags used for port setup
- *
- */
-static void snp_config_port(struct uart_port *port, int flags)
-{
-}
-
-/* Associate the uart functions above - given to serial core */
-
-static const struct uart_ops sn_console_ops = {
- .tx_empty = snp_tx_empty,
- .set_mctrl = snp_set_mctrl,
- .get_mctrl = snp_get_mctrl,
- .stop_tx = snp_stop_tx,
- .start_tx = snp_start_tx,
- .stop_rx = snp_stop_rx,
- .break_ctl = snp_break_ctl,
- .startup = snp_startup,
- .shutdown = snp_shutdown,
- .set_termios = snp_set_termios,
- .pm = NULL,
- .type = snp_type,
- .release_port = snp_release_port,
- .request_port = snp_request_port,
- .config_port = snp_config_port,
- .verify_port = NULL,
-};
-
-/* End of uart struct functions and defines */
-
-#ifdef DEBUG
-
-/**
- * sn_debug_printf - close to hardware debugging printf
- * @fmt: printf format
- *
- * This is as "close to the metal" as we can get, used when the driver
- * itself may be broken.
- *
- */
-static int sn_debug_printf(const char *fmt, ...)
-{
- static char printk_buf[1024];
- int printed_len;
- va_list args;
-
- va_start(args, fmt);
- printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-
- if (!sal_console_port.sc_ops) {
- sal_console_port.sc_ops = &poll_ops;
- early_sn_setup();
- }
- sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len);
-
- va_end(args);
- return printed_len;
-}
-#endif /* DEBUG */
-
-/*
- * Interrupt handling routines.
- */
-
-/**
- * sn_receive_chars - Grab characters, pass them to tty layer
- * @port: Port to operate on
- * @flags: irq flags
- *
- * Note: If we're not registered with the serial core infrastructure yet,
- * we don't try to send characters to it...
- *
- */
-static void
-sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
-{
- struct tty_port *tport = NULL;
- int ch;
-
- if (!port) {
- printk(KERN_ERR "sn_receive_chars - port NULL so can't receive\n");
- return;
- }
-
- if (!port->sc_ops) {
- printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receive\n");
- return;
- }
-
- if (port->sc_port.state) {
- /* The serial_core stuffs are initialized, use them */
- tport = &port->sc_port.state->port;
- }
-
- while (port->sc_ops->sal_input_pending()) {
- ch = port->sc_ops->sal_getc();
- if (ch < 0) {
- printk(KERN_ERR "sn_console: An error occurred while "
- "obtaining data from the console (0x%0x)\n", ch);
- break;
- }
-#ifdef CONFIG_MAGIC_SYSRQ
- if (sysrq_requested) {
- unsigned long sysrq_timeout = sysrq_requested + HZ*5;
-
- sysrq_requested = 0;
- if (ch && time_before(jiffies, sysrq_timeout)) {
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- handle_sysrq(ch);
- spin_lock_irqsave(&port->sc_port.lock, flags);
- /* ignore actual sysrq command char */
- continue;
- }
- }
- if (ch == *sysrq_serial_ptr) {
- if (!(*++sysrq_serial_ptr)) {
- sysrq_requested = jiffies;
- sysrq_serial_ptr = sysrq_serial_str;
- }
- /*
- * ignore the whole sysrq string except for the
- * leading escape
- */
- if (ch != '\e')
- continue;
- }
- else
- sysrq_serial_ptr = sysrq_serial_str;
-#endif /* CONFIG_MAGIC_SYSRQ */
-
- /* record the character to pass up to the tty layer */
- if (tport) {
- if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0)
- break;
- }
- port->sc_port.icount.rx++;
- }
-
- if (tport)
- tty_flip_buffer_push(tport);
-}
-
-/**
- * sn_transmit_chars - grab characters from serial core, send off
- * @port: Port to operate on
- * @raw: Transmit raw or buffered
- *
- * Note: If we're early, before we're registered with serial core, the
- * writes are going through sn_sal_console_write because that's how
- * register_console has been set up. We currently could have asynch
- * polls calling this function due to sn_sal_switch_to_asynch but we can
- * ignore them until we register with the serial core stuffs.
- *
- */
-static void sn_transmit_chars(struct sn_cons_port *port, int raw)
-{
- int xmit_count, tail, head, loops, ii;
- int result;
- char *start;
- struct circ_buf *xmit;
-
- if (!port)
- return;
-
- BUG_ON(!port->sc_is_asynch);
-
- if (port->sc_port.state) {
- /* We're initialized, using serial core infrastructure */
- xmit = &port->sc_port.state->xmit;
- } else {
- /* Probably sn_sal_switch_to_asynch has been run but serial core isn't
- * initialized yet. Just return. Writes are going through
- * sn_sal_console_write (due to register_console) at this time.
- */
- return;
- }
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) {
- /* Nothing to do. */
- ia64_sn_console_intr_disable(SAL_CONSOLE_INTR_XMIT);
- return;
- }
-
- head = xmit->head;
- tail = xmit->tail;
- start = &xmit->buf[tail];
-
- /* twice around gets the tail to the end of the buffer and
- * then to the head, if needed */
- loops = (head < tail) ? 2 : 1;
-
- for (ii = 0; ii < loops; ii++) {
- xmit_count = (head < tail) ?
- (UART_XMIT_SIZE - tail) : (head - tail);
-
- if (xmit_count > 0) {
- if (raw == TRANSMIT_RAW)
- result =
- port->sc_ops->sal_puts_raw(start,
- xmit_count);
- else
- result =
- port->sc_ops->sal_puts(start, xmit_count);
-#ifdef DEBUG
- if (!result)
- DPRINTF("`");
-#endif
- if (result > 0) {
- xmit_count -= result;
- port->sc_port.icount.tx += result;
- tail += result;
- tail &= UART_XMIT_SIZE - 1;
- xmit->tail = tail;
- start = &xmit->buf[tail];
- }
- }
- }
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&port->sc_port);
-
- if (uart_circ_empty(xmit))
- snp_stop_tx(&port->sc_port); /* no-op for us */
-}
-
-/**
- * sn_sal_interrupt - Handle console interrupts
- * @irq: irq #, useful for debug statements
- * @dev_id: our pointer to our port (sn_cons_port which contains the uart port)
- *
- */
-static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
-{
- struct sn_cons_port *port = (struct sn_cons_port *)dev_id;
- unsigned long flags;
- int status = ia64_sn_console_intr_status();
-
- if (!port)
- return IRQ_NONE;
-
- spin_lock_irqsave(&port->sc_port.lock, flags);
- if (status & SAL_CONSOLE_INTR_RECV) {
- sn_receive_chars(port, flags);
- }
- if (status & SAL_CONSOLE_INTR_XMIT) {
- sn_transmit_chars(port, TRANSMIT_BUFFERED);
- }
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- return IRQ_HANDLED;
-}
-
-/**
- * sn_sal_timer_poll - this function handles polled console mode
- * @data: A pointer to our sn_cons_port (which contains the uart port)
- *
- * data is the pointer that init_timer will store for us. This function is
- * associated with init_timer to see if there is any console traffic.
- * Obviously not used in interrupt mode
- *
- */
-static void sn_sal_timer_poll(struct timer_list *t)
-{
- struct sn_cons_port *port = from_timer(port, t, sc_timer);
- unsigned long flags;
-
- if (!port)
- return;
-
- if (!port->sc_port.irq) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- if (sn_process_input)
- sn_receive_chars(port, flags);
- sn_transmit_chars(port, TRANSMIT_RAW);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- mod_timer(&port->sc_timer,
- jiffies + port->sc_interrupt_timeout);
- }
-}
-
-/*
- * Boot-time initialization code
- */
-
-/**
- * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch)
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * So this is used by sn_sal_serial_console_init (early on, before we're
- * registered with serial core). It's also used by sn_sal_init
- * right after we've registered with serial core. The later only happens
- * if we didn't already come through here via sn_sal_serial_console_init.
- *
- */
-static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
-{
- unsigned long flags;
-
- if (!port)
- return;
-
- DPRINTF("sn_console: about to switch to asynchronous console\n");
-
- /* without early_printk, we may be invoked late enough to race
- * with other cpus doing console IO at this point, however
- * console interrupts will never be enabled */
- spin_lock_irqsave(&port->sc_port.lock, flags);
-
- /* early_printk invocation may have done this for us */
- if (!port->sc_ops)
- port->sc_ops = &poll_ops;
-
- /* we can't turn on the console interrupt (as request_irq
- * calls kmalloc, which isn't set up yet), so we rely on a
- * timer to poll for input and push data from the console
- * buffer.
- */
- timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
-
- if (IS_RUNNING_ON_SIMULATOR())
- port->sc_interrupt_timeout = 6;
- else {
- /* 960cps / 16 char FIFO = 60HZ
- * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
- port->sc_interrupt_timeout =
- HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
- }
- mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout);
-
- port->sc_is_asynch = 1;
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-}
-
-/**
- * sn_sal_switch_to_interrupts - Switch to interrupt driven mode
- * @port: Our sn_cons_port (which contains the uart port)
- *
- * In sn_sal_init, after we're registered with serial core and
- * the port is added, this function is called to switch us to interrupt
- * mode. We were previously in asynch/polling mode (using init_timer).
- *
- * We attempt to switch to interrupt mode here by calling
- * request_irq. If that works out, we enable receive interrupts.
- */
-static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
-{
- unsigned long flags;
-
- if (port) {
- DPRINTF("sn_console: switching to interrupt driven console\n");
-
- if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
- IRQF_SHARED,
- "SAL console driver", port) >= 0) {
- spin_lock_irqsave(&port->sc_port.lock, flags);
- port->sc_port.irq = SGI_UART_VECTOR;
- port->sc_ops = &intr_ops;
- irq_set_handler(port->sc_port.irq, handle_level_irq);
-
- /* turn on receive interrupts */
- ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- }
- else {
- printk(KERN_INFO
- "sn_console: console proceeding in polled mode\n");
- }
- }
-}
-
-/*
- * Kernel console definitions
- */
-
-static void sn_sal_console_write(struct console *, const char *, unsigned);
-static int sn_sal_console_setup(struct console *, char *);
-static struct uart_driver sal_console_uart;
-extern struct tty_driver *uart_console_device(struct console *, int *);
-
-static struct console sal_console = {
- .name = DEVICE_NAME,
- .write = sn_sal_console_write,
- .device = uart_console_device,
- .setup = sn_sal_console_setup,
- .index = -1, /* unspecified */
- .data = &sal_console_uart,
-};
-
-#define SAL_CONSOLE &sal_console
-
-static struct uart_driver sal_console_uart = {
- .owner = THIS_MODULE,
- .driver_name = "sn_console",
- .dev_name = DEVICE_NAME,
- .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
- .minor = 0,
- .nr = 1, /* one port */
- .cons = SAL_CONSOLE,
-};
-
-/**
- * sn_sal_init - When the kernel loads us, get us rolling w/ serial core
- *
- * Before this is called, we've been printing kernel messages in a special
- * early mode not making use of the serial core infrastructure. When our
- * driver is loaded for real, we register the driver and port with serial
- * core and try to enable interrupt driven mode.
- *
- */
-static int __init sn_sal_init(void)
-{
- int retval;
-
- if (!ia64_platform_is("sn2"))
- return 0;
-
- printk(KERN_INFO "sn_console: Console driver init\n");
-
- if (USE_DYNAMIC_MINOR == 1) {
- misc.minor = MISC_DYNAMIC_MINOR;
- misc.name = DEVICE_NAME_DYNAMIC;
- retval = misc_register(&misc);
- if (retval != 0) {
- printk(KERN_WARNING "Failed to register console "
- "device using misc_register.\n");
- return -ENODEV;
- }
- sal_console_uart.major = MISC_MAJOR;
- sal_console_uart.minor = misc.minor;
- } else {
- sal_console_uart.major = DEVICE_MAJOR;
- sal_console_uart.minor = DEVICE_MINOR;
- }
-
- /* We register the driver and the port before switching to interrupts
- * or async above so the proper uart structures are populated */
-
- if (uart_register_driver(&sal_console_uart) < 0) {
- printk
- ("ERROR sn_sal_init failed uart_register_driver, line %d\n",
- __LINE__);
- return -ENODEV;
- }
-
- spin_lock_init(&sal_console_port.sc_port.lock);
-
- /* Setup the port struct with the minimum needed */
- sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */
- sal_console_port.sc_port.type = PORT_16550A;
- sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS;
- sal_console_port.sc_port.ops = &sn_console_ops;
- sal_console_port.sc_port.line = 0;
-
- if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
- /* error - not sure what I'd do - so I'll do nothing */
- printk(KERN_ERR "%s: unable to add port\n", __func__);
- }
-
- /* when this driver is compiled in, the console initialization
- * will have already switched us into asynchronous operation
- * before we get here through the initcalls */
- if (!sal_console_port.sc_is_asynch) {
- sn_sal_switch_to_asynch(&sal_console_port);
- }
-
- /* at this point (device_init) we can try to turn on interrupts */
- if (!IS_RUNNING_ON_SIMULATOR()) {
- sn_sal_switch_to_interrupts(&sal_console_port);
- }
- sn_process_input = 1;
- return 0;
-}
-device_initcall(sn_sal_init);
-
-/**
- * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
- * @puts_raw : puts function to do the writing
- * @s: input string
- * @count: length
- *
- * We need a \r ahead of every \n for direct writes through
- * ia64_sn_console_putb (what sal_puts_raw below actually does).
- *
- */
-
-static void puts_raw_fixed(int (*puts_raw) (const char *s, int len),
- const char *s, int count)
-{
- const char *s1;
-
- /* Output '\r' before each '\n' */
- while ((s1 = memchr(s, '\n', count)) != NULL) {
- puts_raw(s, s1 - s);
- puts_raw("\r\n", 2);
- count -= s1 + 1 - s;
- s = s1 + 1;
- }
- puts_raw(s, count);
-}
-
-/**
- * sn_sal_console_write - Print statements before serial core available
- * @console: Console to operate on - we ignore since we have just one
- * @s: String to send
- * @count: length
- *
- * This is referenced in the console struct. It is used for early
- * console printing before we register with serial core and for things
- * such as kdb. The console_lock must be held when we get here.
- *
- * This function has some code for trying to print output even if the lock
- * is held. We try to cover the case where a lock holder could have died.
- * We don't use this special case code if we're not registered with serial
- * core yet. After we're registered with serial core, the only time this
- * function would be used is for high level kernel output like magic sys req,
- * kdb, and printk's.
- */
-static void
-sn_sal_console_write(struct console *co, const char *s, unsigned count)
-{
- unsigned long flags = 0;
- struct sn_cons_port *port = &sal_console_port;
- static int stole_lock = 0;
-
- BUG_ON(!port->sc_is_asynch);
-
- /* We can't look at the xmit buffer if we're not registered with serial core
- * yet. So only do the fancy recovery after registering
- */
- if (!port->sc_port.state) {
- /* Not yet registered with serial core - simple case */
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- return;
- }
-
- /* somebody really wants this output, might be an
- * oops, kdb, panic, etc. make sure they get it. */
- if (!spin_trylock_irqsave(&port->sc_port.lock, flags)) {
- int lhead = port->sc_port.state->xmit.head;
- int ltail = port->sc_port.state->xmit.tail;
- int counter, got_lock = 0;
-
- /*
- * We attempt to determine if someone has died with the
- * lock. We wait ~20 secs after the head and tail ptrs
- * stop moving and assume the lock holder is not functional
- * and plow ahead. If the lock is freed within the time out
- * period we re-get the lock and go ahead normally. We also
- * remember if we have plowed ahead so that we don't have
- * to wait out the time out period again - the asumption
- * is that we will time out again.
- */
-
- for (counter = 0; counter < 150; mdelay(125), counter++) {
- if (stole_lock)
- break;
-
- if (spin_trylock_irqsave(&port->sc_port.lock, flags)) {
- got_lock = 1;
- break;
- } else {
- /* still locked */
- if ((lhead != port->sc_port.state->xmit.head)
- || (ltail !=
- port->sc_port.state->xmit.tail)) {
- lhead =
- port->sc_port.state->xmit.head;
- ltail =
- port->sc_port.state->xmit.tail;
- counter = 0;
- }
- }
- }
- /* flush anything in the serial core xmit buffer, raw */
- sn_transmit_chars(port, 1);
- if (got_lock) {
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
- stole_lock = 0;
- } else {
- /* fell thru */
- stole_lock = 1;
- }
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- } else {
- stole_lock = 0;
- sn_transmit_chars(port, 1);
- spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- }
-}
-
-
-/**
- * sn_sal_console_setup - Set up console for early printing
- * @co: Console to work with
- * @options: Options to set
- *
- * Altix console doesn't do anything with baud rates, etc, anyway.
- *
- * This isn't required since not providing the setup function in the
- * console struct is ok. However, other patches like KDB plop something
- * here so providing it is easier.
- *
- */
-static int sn_sal_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-/**
- * sn_sal_console_write_early - simple early output routine
- * @co - console struct
- * @s - string to print
- * @count - count
- *
- * Simple function to provide early output, before even
- * sn_sal_serial_console_init is called. Referenced in the
- * console struct registerd in sn_serial_console_early_setup.
- *
- */
-static void __init
-sn_sal_console_write_early(struct console *co, const char *s, unsigned count)
-{
- puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count);
-}
-
-/* Used for very early console printing - again, before
- * sn_sal_serial_console_init is run */
-static struct console sal_console_early __initdata = {
- .name = "sn_sal",
- .write = sn_sal_console_write_early,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/**
- * sn_serial_console_early_setup - Sets up early console output support
- *
- * Register a console early on... This is for output before even
- * sn_sal_serial_cosnole_init is called. This function is called from
- * setup.c. This allows us to do really early polled writes. When
- * sn_sal_serial_console_init is called, this console is unregistered
- * and a new one registered.
- */
-int __init sn_serial_console_early_setup(void)
-{
- if (!ia64_platform_is("sn2"))
- return -1;
-
- sal_console_port.sc_ops = &poll_ops;
- spin_lock_init(&sal_console_port.sc_port.lock);
- early_sn_setup(); /* Find SAL entry points */
- register_console(&sal_console_early);
-
- return 0;
-}
-
-/**
- * sn_sal_serial_console_init - Early console output - set up for register
- *
- * This function is called when regular console init happens. Because we
- * support even earlier console output with sn_serial_console_early_setup
- * (called from setup.c directly), this function unregisters the really
- * early console.
- *
- * Note: Even if setup.c doesn't register sal_console_early, unregistering
- * it here doesn't hurt anything.
- *
- */
-static int __init sn_sal_serial_console_init(void)
-{
- if (ia64_platform_is("sn2")) {
- sn_sal_switch_to_asynch(&sal_console_port);
- DPRINTF("sn_sal_serial_console_init : register console\n");
- register_console(&sal_console);
- unregister_console(&sal_console_early);
- }
- return 0;
-}
-
-console_initcall(sn_sal_serial_console_init);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 73d71a4e6c0c..771d11196523 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -79,6 +79,7 @@
/* control register 1 */
#define SPRD_CTL1 0x001C
#define SPRD_DMA_EN BIT(15)
+#define SPRD_LOOPBACK_EN BIT(14)
#define RX_HW_FLOW_CTL_THLD BIT(6)
#define RX_HW_FLOW_CTL_EN BIT(7)
#define TX_HW_FLOW_CTL_EN BIT(8)
@@ -164,7 +165,14 @@ static unsigned int sprd_get_mctrl(struct uart_port *port)
static void sprd_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* nothing to do */
+ u32 val = serial_in(port, SPRD_CTL1);
+
+ if (mctrl & TIOCM_LOOP)
+ val |= SPRD_LOOPBACK_EN;
+ else
+ val &= ~SPRD_LOOPBACK_EN;
+
+ serial_out(port, SPRD_CTL1, val);
}
static void sprd_stop_rx(struct uart_port *port)
@@ -609,7 +617,7 @@ static inline void sprd_rx(struct uart_port *port)
if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
SPRD_LSR_FE | SPRD_LSR_OE))
- if (handle_lsr_errors(port, &lsr, &flag))
+ if (handle_lsr_errors(port, &flag, &lsr))
continue;
if (uart_handle_sysrq_char(port, ch))
continue;
@@ -975,7 +983,7 @@ static void sprd_console_write(struct console *co, const char *s,
static int __init sprd_console_setup(struct console *co, char *options)
{
- struct uart_port *port;
+ struct sprd_uart_port *sprd_uart_port;
int baud = 115200;
int bits = 8;
int parity = 'n';
@@ -984,15 +992,17 @@ static int __init sprd_console_setup(struct console *co, char *options)
if (co->index >= UART_NR_MAX || co->index < 0)
co->index = 0;
- port = &sprd_port[co->index]->port;
- if (port == NULL) {
+ sprd_uart_port = sprd_port[co->index];
+ if (!sprd_uart_port || !sprd_uart_port->port.membase) {
pr_info("serial port %d not yet initialized\n", co->index);
return -ENODEV;
}
+
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- return uart_set_options(port, co, baud, parity, bits, flow);
+ return uart_set_options(&sprd_uart_port->port, co, baud,
+ parity, bits, flow);
}
static struct uart_driver sprd_uart_driver;
@@ -1006,6 +1016,13 @@ static struct console sprd_console = {
.data = &sprd_uart_driver,
};
+static int __init sprd_serial_console_init(void)
+{
+ register_console(&sprd_console);
+ return 0;
+}
+console_initcall(sprd_serial_console_init);
+
#define SPRD_CONSOLE (&sprd_console)
/* Support for earlycon */
@@ -1094,6 +1111,16 @@ static int sprd_remove(struct platform_device *dev)
return 0;
}
+static bool sprd_uart_is_console(struct uart_port *uport)
+{
+ struct console *cons = sprd_uart_driver.cons;
+
+ if (cons && cons->index >= 0 && cons->index == uport->line)
+ return true;
+
+ return false;
+}
+
static int sprd_clk_init(struct uart_port *uport)
{
struct clk *clk_uart, *clk_parent;
@@ -1120,10 +1147,17 @@ static int sprd_clk_init(struct uart_port *uport)
u->clk = devm_clk_get(uport->dev, "enable");
if (IS_ERR(u->clk)) {
- if (PTR_ERR(u->clk) != -EPROBE_DEFER)
- dev_err(uport->dev, "uart%d can't get enable clock\n",
- uport->line);
- return PTR_ERR(u->clk);
+ if (PTR_ERR(u->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(uport->dev, "uart%d can't get enable clock\n",
+ uport->line);
+
+ /* To keep console alive even if the error occurred */
+ if (!sprd_uart_is_console(uport))
+ return PTR_ERR(u->clk);
+
+ u->clk = NULL;
}
return 0;
@@ -1173,10 +1207,8 @@ static int sprd_probe(struct platform_device *pdev)
up->mapbase = res->start;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "not provide irq resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
up->irq = irq;
/*
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 24a2261f879a..df90747ee3a8 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -881,13 +882,13 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
switch (state) {
case UART_PM_STATE_ON:
- clk_prepare_enable(stm32port->clk);
+ pm_runtime_get_sync(port->dev);
break;
case UART_PM_STATE_OFF:
spin_lock_irqsave(&port->lock, flags);
stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
spin_unlock_irqrestore(&port->lock, flags);
- clk_disable_unprepare(stm32port->clk);
+ pm_runtime_put_sync(port->dev);
break;
}
}
@@ -927,11 +928,8 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->fifosize = stm32port->info->cfg.fifosize;
ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get event IRQ: %d\n", ret);
- return ret ? ret : -ENODEV;
- }
+ if (ret <= 0)
+ return ret ? : -ENODEV;
port->irq = ret;
port->rs485_config = stm32_config_rs485;
@@ -940,14 +938,8 @@ static int stm32_init_port(struct stm32_port *stm32port,
if (stm32port->info->cfg.has_wakeup) {
stm32port->wakeirq = platform_get_irq(pdev, 1);
- if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO) {
- if (stm32port->wakeirq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Can't get event wake IRQ: %d\n",
- stm32port->wakeirq);
- return stm32port->wakeirq ? stm32port->wakeirq :
- -ENODEV;
- }
+ if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
+ return stm32port->wakeirq ? : -ENODEV;
}
stm32port->fifoen = stm32port->info->cfg.has_fifo;
@@ -1185,6 +1177,11 @@ static int stm32_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &stm32port->port);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
err_wirq:
@@ -1206,6 +1203,9 @@ static int stm32_serial_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
struct stm32_port *stm32_port = to_stm32_port(port);
struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ int err;
+
+ pm_runtime_get_sync(&pdev->dev);
stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
@@ -1234,7 +1234,12 @@ static int stm32_serial_remove(struct platform_device *pdev)
clk_disable_unprepare(stm32_port->clk);
- return uart_remove_one_port(&stm32_usart_driver, port);
+ err = uart_remove_one_port(&stm32_usart_driver, port);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return err;
}
@@ -1337,8 +1342,8 @@ static struct uart_driver stm32_usart_driver = {
.cons = STM32_SERIAL_CONSOLE,
};
-#ifdef CONFIG_PM_SLEEP
-static void stm32_serial_enable_wakeup(struct uart_port *port, bool enable)
+static void __maybe_unused stm32_serial_enable_wakeup(struct uart_port *port,
+ bool enable)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
@@ -1362,7 +1367,7 @@ static void stm32_serial_enable_wakeup(struct uart_port *port, bool enable)
}
}
-static int stm32_serial_suspend(struct device *dev)
+static int __maybe_unused stm32_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
@@ -1373,21 +1378,46 @@ static int stm32_serial_suspend(struct device *dev)
else
stm32_serial_enable_wakeup(port, false);
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
-static int stm32_serial_resume(struct device *dev)
+static int __maybe_unused stm32_serial_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
+ pinctrl_pm_select_default_state(dev);
+
if (device_may_wakeup(dev))
stm32_serial_enable_wakeup(port, false);
return uart_resume_port(&stm32_usart_driver, port);
}
-#endif /* CONFIG_PM_SLEEP */
+
+static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct stm32_port *stm32port = container_of(port,
+ struct stm32_port, port);
+
+ clk_disable_unprepare(stm32port->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct stm32_port *stm32port = container_of(port,
+ struct stm32_port, port);
+
+ return clk_prepare_enable(stm32port->clk);
+}
static const struct dev_pm_ops stm32_serial_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_serial_runtime_suspend,
+ stm32_serial_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(stm32_serial_suspend, stm32_serial_resume)
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f145946f659b..da4563aaaf5c 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1194,7 +1194,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = console_port;
- unsigned long flags;
+ unsigned long flags = 0;
unsigned int imr, ctrl;
int locked = 1;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 566728fbaf3c..802c1210558f 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2952,17 +2952,11 @@ void do_SAK(struct tty_struct *tty)
EXPORT_SYMBOL(do_SAK);
-static int dev_match_devt(struct device *dev, const void *data)
-{
- const dev_t *devt = data;
- return dev->devt == *devt;
-}
-
/* Must put_device() after it's unused! */
static struct device *tty_get_device(struct tty_struct *tty)
{
dev_t devt = tty_devnum(tty);
- return class_find_device(tty_class, NULL, &devt, dev_match_devt);
+ return class_find_device_by_devt(tty_class, devt);
}
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 717292c1c0df..60ff236a3d63 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
tsk = waiter->task;
- smp_mb();
- waiter->task = NULL;
+ smp_store_release(&waiter->task, NULL);
wake_up_process(tsk);
put_task_struct(tsk);
}
@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task))
break;
if (!timeout)
break;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ec92f36ab5c4..34aa39d1aed9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
- int bind = con_is_bound(con->con);
+ int bind;
+
+ console_lock();
+ bind = con_is_bound(con->con);
+ console_unlock();
return snprintf(buf, PAGE_SIZE, "%i\n", bind);
}
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f32cef94aa82..ebcf1434e296 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -200,10 +200,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
if (!uioinfo->irq) {
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (ret < 0)
goto bad1;
- }
uioinfo->irq = ret;
}
uiomem = &uioinfo->mem[0];
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index 450e2f5c9b43..be8a6905f507 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -71,6 +71,13 @@ static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(reg_br, 0664, reg_show, reg_store);
static DEVICE_ATTR(reg_or, 0664, reg_show, reg_store);
+static struct attribute *uio_fsl_elbc_gpcm_attrs[] = {
+ &dev_attr_reg_br.attr,
+ &dev_attr_reg_or.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(uio_fsl_elbc_gpcm);
+
static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -411,25 +418,12 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
/* store private data */
platform_set_drvdata(pdev, info);
- /* create sysfs files */
- ret = device_create_file(priv->dev, &dev_attr_reg_br);
- if (ret)
- goto out_err3;
- ret = device_create_file(priv->dev, &dev_attr_reg_or);
- if (ret)
- goto out_err4;
-
dev_info(priv->dev,
"eLBC/GPCM device (%s) at 0x%llx, bank %d, irq=%d\n",
priv->name, (unsigned long long)res.start, priv->bank,
irq != NO_IRQ ? irq : -1);
return 0;
-out_err4:
- device_remove_file(priv->dev, &dev_attr_reg_br);
-out_err3:
- platform_set_drvdata(pdev, NULL);
- uio_unregister_device(info);
out_err2:
if (priv->shutdown)
priv->shutdown(info, true);
@@ -448,8 +442,6 @@ static int uio_fsl_elbc_gpcm_remove(struct platform_device *pdev)
struct uio_info *info = platform_get_drvdata(pdev);
struct fsl_elbc_gpcm *priv = info->priv;
- device_remove_file(priv->dev, &dev_attr_reg_or);
- device_remove_file(priv->dev, &dev_attr_reg_br);
platform_set_drvdata(pdev, NULL);
uio_unregister_device(info);
if (priv->shutdown)
@@ -474,6 +466,7 @@ static struct platform_driver uio_fsl_elbc_gpcm_driver = {
.driver = {
.name = "fsl,elbc-gpcm-uio",
.of_match_table = uio_fsl_elbc_gpcm_match,
+ .dev_groups = uio_fsl_elbc_gpcm_groups,
},
.probe = uio_fsl_elbc_gpcm_probe,
.remove = uio_fsl_elbc_gpcm_remove,
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 10688d79d180..1303b165055b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -102,12 +102,15 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
+ struct device_node *node = pdev->dev.of_node;
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
- if (pdev->dev.of_node) {
+ if (node) {
+ const char *name;
+
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
GFP_KERNEL);
@@ -115,8 +118,13 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
- uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
- pdev->dev.of_node);
+
+ if (!of_property_read_string(node, "linux,uio-name", &name))
+ uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
+ else
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%pOFn", node);
+
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 6e59d370ef81..275568abc670 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -36,8 +36,7 @@ menuconfig USB_SUPPORT
if USB_SUPPORT
-config USB_COMMON
- tristate
+source "drivers/usb/common/Kconfig"
config USB_ARCH_HAS_HCD
def_bool y
@@ -98,8 +97,6 @@ source "drivers/usb/core/Kconfig"
source "drivers/usb/mon/Kconfig"
-source "drivers/usb/wusbcore/Kconfig"
-
source "drivers/usb/host/Kconfig"
source "drivers/usb/renesas_usbhs/Kconfig"
@@ -114,6 +111,8 @@ source "drivers/usb/usbip/Kconfig"
endif
+source "drivers/usb/cdns3/Kconfig"
+
source "drivers/usb/mtu3/Kconfig"
source "drivers/usb/musb/Kconfig"
@@ -175,36 +174,4 @@ source "drivers/usb/typec/Kconfig"
source "drivers/usb/roles/Kconfig"
-config USB_LED_TRIG
- bool "USB LED Triggers"
- depends on LEDS_CLASS && LEDS_TRIGGERS
- select USB_COMMON
- help
- This option adds LED triggers for USB host and/or gadget activity.
-
- Say Y here if you are working on a system with led-class supported
- LEDs and you want to use them as activity indicators for USB host or
- gadget.
-
-config USB_ULPI_BUS
- tristate "USB ULPI PHY interface support"
- select USB_COMMON
- help
- UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
- USB 2.0 PHY interface. The ULPI specification defines a standard set
- of registers that can be used to detect the vendor and product which
- allows ULPI to be handled as a bus. This module is the driver for that
- bus.
-
- The ULPI interfaces (the buses) are registered by the drivers for USB
- controllers which support ULPI register access and have ULPI PHY
- attached to them. The ULPI PHY drivers themselves are normal PHY
- drivers.
-
- ULPI PHYs provide often functions such as ADP sensing/probing (OTG
- protocol) and USB charger detection.
-
- To compile this driver as a module, choose M here: the module will
- be called ulpi.
-
endif # USB_SUPPORT
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index ecc2de1ffaae..1c1c1d659394 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_USB_ISP1760) += isp1760/
+obj-$(CONFIG_USB_CDNS3) += cdns3/
+
obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_USB_MTU3) += mtu3/
@@ -35,8 +37,6 @@ obj-$(CONFIG_USB_MAX3421_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
-obj-$(CONFIG_USB_WUSB) += wusbcore/
-
obj-$(CONFIG_USB_ACM) += class/
obj-$(CONFIG_USB_PRINTER) += class/
obj-$(CONFIG_USB_WDM) += class/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e57a2be8754a..5d41f85a7445 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -539,6 +539,37 @@ CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
+static struct attribute *cxacru_attrs[] = {
+ &dev_attr_adsl_config.attr,
+ &dev_attr_adsl_state.attr,
+ &dev_attr_adsl_controller_version.attr,
+ &dev_attr_adsl_headend_environment.attr,
+ &dev_attr_adsl_headend.attr,
+ &dev_attr_modulation.attr,
+ &dev_attr_line_startable.attr,
+ &dev_attr_downstream_hec_errors.attr,
+ &dev_attr_upstream_hec_errors.attr,
+ &dev_attr_downstream_fec_errors.attr,
+ &dev_attr_upstream_fec_errors.attr,
+ &dev_attr_downstream_crc_errors.attr,
+ &dev_attr_upstream_crc_errors.attr,
+ &dev_attr_startup_attempts.attr,
+ &dev_attr_downstream_bits_per_frame.attr,
+ &dev_attr_upstream_bits_per_frame.attr,
+ &dev_attr_transmitter_power.attr,
+ &dev_attr_downstream_attenuation.attr,
+ &dev_attr_upstream_attenuation.attr,
+ &dev_attr_downstream_snr_margin.attr,
+ &dev_attr_upstream_snr_margin.attr,
+ &dev_attr_mac_address.attr,
+ &dev_attr_line_status.attr,
+ &dev_attr_link_status.attr,
+ &dev_attr_upstream_rate.attr,
+ &dev_attr_downstream_rate.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cxacru);
+
/* the following three functions are stolen from drivers/usb/core/message.c */
static void cxacru_blocking_completion(struct urb *urb)
{
@@ -736,17 +767,6 @@ static int cxacru_card_status(struct cxacru_data *instance)
return 0;
}
-static void cxacru_remove_device_files(struct usbatm_data *usbatm_instance,
- struct atm_dev *atm_dev)
-{
- struct usb_interface *intf = usbatm_instance->usb_intf;
-
- #define CXACRU_DEVICE_REMOVE_FILE(_name) \
- device_remove_file(&intf->dev, &dev_attr_##_name);
- CXACRU_ALL_FILES(REMOVE);
- #undef CXACRU_DEVICE_REMOVE_FILE
-}
-
static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
struct atm_dev *atm_dev)
{
@@ -765,13 +785,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
return ret;
}
- #define CXACRU_DEVICE_CREATE_FILE(_name) \
- ret = device_create_file(&intf->dev, &dev_attr_##_name); \
- if (unlikely(ret)) \
- goto fail_sysfs;
- CXACRU_ALL_FILES(CREATE);
- #undef CXACRU_DEVICE_CREATE_FILE
-
/* start ADSL */
mutex_lock(&instance->adsl_state_serialize);
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
@@ -804,11 +817,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
-
-fail_sysfs:
- usb_err(usbatm_instance, "cxacru_atm_start: device_create_file failed (%d)\n", ret);
- cxacru_remove_device_files(usbatm_instance, atm_dev);
- return ret;
}
static void cxacru_poll_status(struct work_struct *work)
@@ -1332,7 +1340,6 @@ static struct usbatm_driver cxacru_driver = {
.heavy_init = cxacru_heavy_init,
.unbind = cxacru_unbind,
.atm_start = cxacru_atm_start,
- .atm_stop = cxacru_remove_device_files,
.bulk_in = CXACRU_EP_DATA,
.bulk_out = CXACRU_EP_DATA,
.rx_padding = 3,
@@ -1364,7 +1371,8 @@ static struct usb_driver cxacru_usb_driver = {
.name = cxacru_driver_name,
.probe = cxacru_usb_probe,
.disconnect = usbatm_usb_disconnect,
- .id_table = cxacru_usb_ids
+ .id_table = cxacru_usb_ids,
+ .dev_groups = cxacru_groups,
};
module_usb_driver(cxacru_usb_driver);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 8faa51b1a520..8b0ea8c70d73 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2458,7 +2458,7 @@ static int claim_interface(struct usb_device *usb_dev,
return ret;
}
-static struct attribute *attrs[] = {
+static struct attribute *uea_attrs[] = {
&dev_attr_stat_status.attr,
&dev_attr_stat_mflags.attr,
&dev_attr_stat_human_status.attr,
@@ -2479,9 +2479,7 @@ static struct attribute *attrs[] = {
&dev_attr_stat_firmid.attr,
NULL,
};
-static const struct attribute_group attr_grp = {
- .attrs = attrs,
-};
+ATTRIBUTE_GROUPS(uea);
static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
const struct usb_device_id *id)
@@ -2550,18 +2548,12 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
}
}
- ret = sysfs_create_group(&intf->dev.kobj, &attr_grp);
- if (ret < 0)
- goto error;
-
ret = uea_boot(sc);
if (ret < 0)
- goto error_rm_grp;
+ goto error;
return 0;
-error_rm_grp:
- sysfs_remove_group(&intf->dev.kobj, &attr_grp);
error:
kfree(sc);
return ret;
@@ -2571,7 +2563,6 @@ static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
- sysfs_remove_group(&intf->dev.kobj, &attr_grp);
uea_stop(sc);
kfree(sc);
}
@@ -2721,6 +2712,7 @@ static struct usb_driver uea_driver = {
.id_table = uea_ids,
.probe = uea_probe,
.disconnect = uea_disconnect,
+ .dev_groups = uea_groups,
};
MODULE_DEVICE_TABLE(usb, uea_ids);
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
new file mode 100644
index 000000000000..d0331613a355
--- /dev/null
+++ b/drivers/usb/cdns3/Kconfig
@@ -0,0 +1,46 @@
+config USB_CDNS3
+ tristate "Cadence USB3 Dual-Role Controller"
+ depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ select USB_ROLE_SWITCH
+ help
+ Say Y here if your system has a Cadence USB3 dual-role controller.
+ It supports: dual-role switch, Host-only, and Peripheral-only.
+
+ If you choose to build this driver is a dynamically linked
+ as module, the module will be called cdns3.ko.
+
+if USB_CDNS3
+
+config USB_CDNS3_GADGET
+ bool "Cadence USB3 device controller"
+ depends on USB_GADGET=y || USB_GADGET=USB_CDNS3
+ help
+ Say Y here to enable device controller functionality of the
+ Cadence USBSS-DEV driver.
+
+ This controller supports FF, HS and SS mode. It doesn't support
+ LS and SSP mode.
+
+config USB_CDNS3_HOST
+ bool "Cadence USB3 host controller"
+ depends on USB=y || USB=USB_CDNS3
+ help
+ Say Y here to enable host controller functionality of the
+ Cadence driver.
+
+ Host controller is compliant with XHCI so it will use
+ standard XHCI driver.
+
+config USB_CDNS3_PCI_WRAP
+ tristate "Cadence USB3 support on PCIe-based platforms"
+ depends on USB_PCI && ACPI
+ default USB_CDNS3
+ help
+ If you're using the USBSS Core IP with a PCIe, please say
+ 'Y' or 'M' here.
+
+ If you choose to build this driver as module it will
+ be dynamically linked and module will be called cdns3-pci.ko
+
+endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
new file mode 100644
index 000000000000..a703547350bb
--- /dev/null
+++ b/drivers/usb/cdns3/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
+cdns3-y := core.o drd.o
+
+obj-$(CONFIG_USB_CDNS3) += cdns3.o
+cdns3-$(CONFIG_USB_CDNS3_GADGET) += gadget.o ep0.o
+
+ifneq ($(CONFIG_USB_CDNS3_GADGET),)
+cdns3-$(CONFIG_TRACING) += trace.o
+endif
+
+cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
+
+obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
new file mode 100644
index 000000000000..c41ddb61b857
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS PCI Glue driver
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+struct cdns3_wrap {
+ struct platform_device *plat_dev;
+ struct resource dev_res[6];
+ int devfn;
+};
+
+#define RES_IRQ_HOST_ID 0
+#define RES_IRQ_PERIPHERAL_ID 1
+#define RES_IRQ_OTG_ID 2
+#define RES_HOST_ID 3
+#define RES_DEV_ID 4
+#define RES_DRD_ID 5
+
+#define PCI_BAR_HOST 0
+#define PCI_BAR_DEV 2
+#define PCI_BAR_OTG 0
+
+#define PCI_DEV_FN_HOST_DEVICE 0
+#define PCI_DEV_FN_OTG 1
+
+#define PCI_DRIVER_NAME "cdns3-pci-usbss"
+#define PLAT_DRIVER_NAME "cdns-usb3"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0x0100
+
+static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
+{
+ struct pci_dev *func;
+
+ /*
+ * Gets the second function.
+ * It's little tricky, but this platform has two function.
+ * The fist keeps resources for Host/Device while the second
+ * keeps resources for DRD/OTG.
+ */
+ func = pci_get_device(pdev->vendor, pdev->device, NULL);
+ if (unlikely(!func))
+ return NULL;
+
+ if (func->devfn == pdev->devfn) {
+ func = pci_get_device(pdev->vendor, pdev->device, func);
+ if (unlikely(!func))
+ return NULL;
+ }
+
+ return func;
+}
+
+static int cdns3_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct platform_device_info plat_info;
+ struct cdns3_wrap *wrap;
+ struct resource *res;
+ struct pci_dev *func;
+ int err;
+
+ /*
+ * for GADGET/HOST PCI (devfn) function number is 0,
+ * for OTG PCI (devfn) function number is 1
+ */
+ if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ pdev->devfn != PCI_DEV_FN_OTG))
+ return -EINVAL;
+
+ func = cdns3_get_second_fun(pdev);
+ if (unlikely(!func))
+ return -EINVAL;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_is_enabled(func)) {
+ wrap = pci_get_drvdata(func);
+ } else {
+ wrap = kzalloc(sizeof(*wrap), GFP_KERNEL);
+ if (!wrap) {
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ }
+
+ res = wrap->dev_res;
+
+ if (pdev->devfn == PCI_DEV_FN_HOST_DEVICE) {
+ /* function 0: host(BAR_0) + device(BAR_1).*/
+ dev_dbg(&pdev->dev, "Initialize Device resources\n");
+ res[RES_DEV_ID].start = pci_resource_start(pdev, PCI_BAR_DEV);
+ res[RES_DEV_ID].end = pci_resource_end(pdev, PCI_BAR_DEV);
+ res[RES_DEV_ID].name = "dev";
+ res[RES_DEV_ID].flags = IORESOURCE_MEM;
+ dev_dbg(&pdev->dev, "USBSS-DEV physical base addr: %pa\n",
+ &res[RES_DEV_ID].start);
+
+ res[RES_HOST_ID].start = pci_resource_start(pdev, PCI_BAR_HOST);
+ res[RES_HOST_ID].end = pci_resource_end(pdev, PCI_BAR_HOST);
+ res[RES_HOST_ID].name = "xhci";
+ res[RES_HOST_ID].flags = IORESOURCE_MEM;
+ dev_dbg(&pdev->dev, "USBSS-XHCI physical base addr: %pa\n",
+ &res[RES_HOST_ID].start);
+
+ /* Interrupt for XHCI */
+ wrap->dev_res[RES_IRQ_HOST_ID].start = pdev->irq;
+ wrap->dev_res[RES_IRQ_HOST_ID].name = "host";
+ wrap->dev_res[RES_IRQ_HOST_ID].flags = IORESOURCE_IRQ;
+
+ /* Interrupt device. It's the same as for HOST. */
+ wrap->dev_res[RES_IRQ_PERIPHERAL_ID].start = pdev->irq;
+ wrap->dev_res[RES_IRQ_PERIPHERAL_ID].name = "peripheral";
+ wrap->dev_res[RES_IRQ_PERIPHERAL_ID].flags = IORESOURCE_IRQ;
+ } else {
+ res[RES_DRD_ID].start = pci_resource_start(pdev, PCI_BAR_OTG);
+ res[RES_DRD_ID].end = pci_resource_end(pdev, PCI_BAR_OTG);
+ res[RES_DRD_ID].name = "otg";
+ res[RES_DRD_ID].flags = IORESOURCE_MEM;
+ dev_dbg(&pdev->dev, "USBSS-DRD physical base addr: %pa\n",
+ &res[RES_DRD_ID].start);
+
+ /* Interrupt for OTG/DRD. */
+ wrap->dev_res[RES_IRQ_OTG_ID].start = pdev->irq;
+ wrap->dev_res[RES_IRQ_OTG_ID].name = "otg";
+ wrap->dev_res[RES_IRQ_OTG_ID].flags = IORESOURCE_IRQ;
+ }
+
+ if (pci_is_enabled(func)) {
+ /* set up platform device info */
+ memset(&plat_info, 0, sizeof(plat_info));
+ plat_info.parent = &pdev->dev;
+ plat_info.fwnode = pdev->dev.fwnode;
+ plat_info.name = PLAT_DRIVER_NAME;
+ plat_info.id = pdev->devfn;
+ wrap->devfn = pdev->devfn;
+ plat_info.res = wrap->dev_res;
+ plat_info.num_res = ARRAY_SIZE(wrap->dev_res);
+ plat_info.dma_mask = pdev->dma_mask;
+ /* register platform device */
+ wrap->plat_dev = platform_device_register_full(&plat_info);
+ if (IS_ERR(wrap->plat_dev)) {
+ pci_disable_device(pdev);
+ kfree(wrap);
+ return PTR_ERR(wrap->plat_dev);
+ }
+ }
+
+ pci_set_drvdata(pdev, wrap);
+ return err;
+}
+
+static void cdns3_pci_remove(struct pci_dev *pdev)
+{
+ struct cdns3_wrap *wrap;
+ struct pci_dev *func;
+
+ func = cdns3_get_second_fun(pdev);
+
+ wrap = (struct cdns3_wrap *)pci_get_drvdata(pdev);
+ if (wrap->devfn == pdev->devfn)
+ platform_device_unregister(wrap->plat_dev);
+
+ if (!pci_is_enabled(func))
+ kfree(wrap);
+}
+
+static const struct pci_device_id cdns3_pci_ids[] = {
+ { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { 0, }
+};
+
+static struct pci_driver cdns3_pci_driver = {
+ .name = PCI_DRIVER_NAME,
+ .id_table = cdns3_pci_ids,
+ .probe = cdns3_pci_probe,
+ .remove = cdns3_pci_remove,
+};
+
+module_pci_driver(cdns3_pci_driver);
+MODULE_DEVICE_TABLE(pci, cdns3_pci_ids);
+
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USBSS PCI wrapperr");
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
new file mode 100644
index 000000000000..06f1e105be4e
--- /dev/null
+++ b/drivers/usb/cdns3/core.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver.
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2017-2018 NXP
+ * Copyright (C) 2019 Texas Instruments
+ *
+ * Author: Peter Chen <peter.chen@nxp.com>
+ * Pawel Laszczak <pawell@cadence.com>
+ * Roger Quadros <rogerq@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+
+#include "gadget.h"
+#include "core.h"
+#include "host-export.h"
+#include "gadget-export.h"
+#include "drd.h"
+
+static int cdns3_idle_init(struct cdns3 *cdns);
+
+static inline
+struct cdns3_role_driver *cdns3_get_current_role_driver(struct cdns3 *cdns)
+{
+ WARN_ON(!cdns->roles[cdns->role]);
+ return cdns->roles[cdns->role];
+}
+
+static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
+{
+ int ret;
+
+ if (WARN_ON(role > USB_ROLE_DEVICE))
+ return 0;
+
+ mutex_lock(&cdns->mutex);
+ cdns->role = role;
+ mutex_unlock(&cdns->mutex);
+
+ if (!cdns->roles[role])
+ return -ENXIO;
+
+ if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE)
+ return 0;
+
+ mutex_lock(&cdns->mutex);
+ ret = cdns->roles[role]->start(cdns);
+ if (!ret)
+ cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE;
+ mutex_unlock(&cdns->mutex);
+
+ return ret;
+}
+
+static void cdns3_role_stop(struct cdns3 *cdns)
+{
+ enum usb_role role = cdns->role;
+
+ if (WARN_ON(role > USB_ROLE_DEVICE))
+ return;
+
+ if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE)
+ return;
+
+ mutex_lock(&cdns->mutex);
+ cdns->roles[role]->stop(cdns);
+ cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE;
+ mutex_unlock(&cdns->mutex);
+}
+
+static void cdns3_exit_roles(struct cdns3 *cdns)
+{
+ cdns3_role_stop(cdns);
+ cdns3_drd_exit(cdns);
+}
+
+static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns);
+
+/**
+ * cdns3_core_init_role - initialize role of operation
+ * @cdns: Pointer to cdns3 structure
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_core_init_role(struct cdns3 *cdns)
+{
+ struct device *dev = cdns->dev;
+ enum usb_dr_mode best_dr_mode;
+ enum usb_dr_mode dr_mode;
+ int ret = 0;
+
+ dr_mode = usb_get_dr_mode(dev);
+ cdns->role = USB_ROLE_NONE;
+
+ /*
+ * If driver can't read mode by means of usb_get_dr_mode function then
+ * chooses mode according with Kernel configuration. This setting
+ * can be restricted later depending on strap pin configuration.
+ */
+ if (dr_mode == USB_DR_MODE_UNKNOWN) {
+ if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
+
+ /*
+ * At this point cdns->dr_mode contains strap configuration.
+ * Driver try update this setting considering kernel configuration
+ */
+ best_dr_mode = cdns->dr_mode;
+
+ ret = cdns3_idle_init(cdns);
+ if (ret)
+ return ret;
+
+ if (dr_mode == USB_DR_MODE_OTG) {
+ best_dr_mode = cdns->dr_mode;
+ } else if (cdns->dr_mode == USB_DR_MODE_OTG) {
+ best_dr_mode = dr_mode;
+ } else if (cdns->dr_mode != dr_mode) {
+ dev_err(dev, "Incorrect DRD configuration\n");
+ return -EINVAL;
+ }
+
+ dr_mode = best_dr_mode;
+
+ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
+ ret = cdns3_host_init(cdns);
+ if (ret) {
+ dev_err(dev, "Host initialization failed with %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
+ ret = cdns3_gadget_init(cdns);
+ if (ret) {
+ dev_err(dev, "Device initialization failed with %d\n",
+ ret);
+ goto err;
+ }
+ }
+
+ cdns->dr_mode = dr_mode;
+
+ ret = cdns3_drd_update_mode(cdns);
+ if (ret)
+ goto err;
+
+ if (cdns->dr_mode != USB_DR_MODE_OTG) {
+ ret = cdns3_hw_role_switch(cdns);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+err:
+ cdns3_exit_roles(cdns);
+ return ret;
+}
+
+/**
+ * cdsn3_hw_role_state_machine - role switch state machine based on hw events.
+ * @cdns: Pointer to controller structure.
+ *
+ * Returns next role to be entered based on hw events.
+ */
+static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns)
+{
+ enum usb_role role;
+ int id, vbus;
+
+ if (cdns->dr_mode != USB_DR_MODE_OTG)
+ goto not_otg;
+
+ id = cdns3_get_id(cdns);
+ vbus = cdns3_get_vbus(cdns);
+
+ /*
+ * Role change state machine
+ * Inputs: ID, VBUS
+ * Previous state: cdns->role
+ * Next state: role
+ */
+ role = cdns->role;
+
+ switch (role) {
+ case USB_ROLE_NONE:
+ /*
+ * Driver treats USB_ROLE_NONE synonymous to IDLE state from
+ * controller specification.
+ */
+ if (!id)
+ role = USB_ROLE_HOST;
+ else if (vbus)
+ role = USB_ROLE_DEVICE;
+ break;
+ case USB_ROLE_HOST: /* from HOST, we can only change to NONE */
+ if (id)
+ role = USB_ROLE_NONE;
+ break;
+ case USB_ROLE_DEVICE: /* from GADGET, we can only change to NONE*/
+ if (!vbus)
+ role = USB_ROLE_NONE;
+ break;
+ }
+
+ dev_dbg(cdns->dev, "role %d -> %d\n", cdns->role, role);
+
+ return role;
+
+not_otg:
+ if (cdns3_is_host(cdns))
+ role = USB_ROLE_HOST;
+ if (cdns3_is_device(cdns))
+ role = USB_ROLE_DEVICE;
+
+ return role;
+}
+
+static int cdns3_idle_role_start(struct cdns3 *cdns)
+{
+ return 0;
+}
+
+static void cdns3_idle_role_stop(struct cdns3 *cdns)
+{
+ /* Program Lane swap and bring PHY out of RESET */
+ phy_reset(cdns->usb3_phy);
+}
+
+static int cdns3_idle_init(struct cdns3 *cdns)
+{
+ struct cdns3_role_driver *rdrv;
+
+ rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = cdns3_idle_role_start;
+ rdrv->stop = cdns3_idle_role_stop;
+ rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->suspend = NULL;
+ rdrv->resume = NULL;
+ rdrv->name = "idle";
+
+ cdns->roles[USB_ROLE_NONE] = rdrv;
+
+ return 0;
+}
+
+/**
+ * cdns3_hw_role_switch - switch roles based on HW state
+ * @cdns3: controller
+ */
+int cdns3_hw_role_switch(struct cdns3 *cdns)
+{
+ enum usb_role real_role, current_role;
+ int ret = 0;
+
+ /* Do nothing if role based on syfs. */
+ if (cdns->role_override)
+ return 0;
+
+ pm_runtime_get_sync(cdns->dev);
+
+ current_role = cdns->role;
+ real_role = cdsn3_hw_role_state_machine(cdns);
+
+ /* Do nothing if nothing changed */
+ if (current_role == real_role)
+ goto exit;
+
+ cdns3_role_stop(cdns);
+
+ dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role);
+
+ ret = cdns3_role_start(cdns, real_role);
+ if (ret) {
+ /* Back to current role */
+ dev_err(cdns->dev, "set %d has failed, back to %d\n",
+ real_role, current_role);
+ ret = cdns3_role_start(cdns, current_role);
+ if (ret)
+ dev_err(cdns->dev, "back to %d failed too\n",
+ current_role);
+ }
+exit:
+ pm_runtime_put_sync(cdns->dev);
+ return ret;
+}
+
+/**
+ * cdsn3_role_get - get current role of controller.
+ *
+ * @dev: Pointer to device structure
+ *
+ * Returns role
+ */
+static enum usb_role cdns3_role_get(struct device *dev)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+
+ return cdns->role;
+}
+
+/**
+ * cdns3_role_set - set current role of controller.
+ *
+ * @dev: pointer to device object
+ * @role - the previous role
+ * Handles below events:
+ * - Role switch for dual-role devices
+ * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
+ */
+static int cdns3_role_set(struct device *dev, enum usb_role role)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ int ret = 0;
+
+ pm_runtime_get_sync(cdns->dev);
+
+ /*
+ * FIXME: switch role framework should be extended to meet
+ * requirements. Driver assumes that role can be controlled
+ * by SW or HW. Temporary workaround is to use USB_ROLE_NONE to
+ * switch from SW to HW control.
+ *
+ * For dr_mode == USB_DR_MODE_OTG:
+ * if user sets USB_ROLE_HOST or USB_ROLE_DEVICE then driver
+ * sets role_override flag and forces that role.
+ * if user sets USB_ROLE_NONE, driver clears role_override and lets
+ * HW state machine take over.
+ *
+ * For dr_mode != USB_DR_MODE_OTG:
+ * Assumptions:
+ * 1. Restricted user control between NONE and dr_mode.
+ * 2. Driver doesn't need to rely on role_override flag.
+ * 3. Driver needs to ensure that HW state machine is never called
+ * if dr_mode != USB_DR_MODE_OTG.
+ */
+ if (role == USB_ROLE_NONE)
+ cdns->role_override = 0;
+ else
+ cdns->role_override = 1;
+
+ /*
+ * HW state might have changed so driver need to trigger
+ * HW state machine if dr_mode == USB_DR_MODE_OTG.
+ */
+ if (!cdns->role_override && cdns->dr_mode == USB_DR_MODE_OTG) {
+ cdns3_hw_role_switch(cdns);
+ goto pm_put;
+ }
+
+ if (cdns->role == role)
+ goto pm_put;
+
+ if (cdns->dr_mode == USB_DR_MODE_HOST) {
+ switch (role) {
+ case USB_ROLE_NONE:
+ case USB_ROLE_HOST:
+ break;
+ default:
+ ret = -EPERM;
+ goto pm_put;
+ }
+ }
+
+ if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ switch (role) {
+ case USB_ROLE_NONE:
+ case USB_ROLE_DEVICE:
+ break;
+ default:
+ ret = -EPERM;
+ goto pm_put;
+ }
+ }
+
+ cdns3_role_stop(cdns);
+ ret = cdns3_role_start(cdns, role);
+ if (ret) {
+ dev_err(cdns->dev, "set role %d has failed\n", role);
+ ret = -EPERM;
+ }
+
+pm_put:
+ pm_runtime_put_sync(cdns->dev);
+ return ret;
+}
+
+static const struct usb_role_switch_desc cdns3_switch_desc = {
+ .set = cdns3_role_set,
+ .get = cdns3_role_get,
+ .allow_userspace_control = true,
+};
+
+/**
+ * cdns3_probe - probe for cdns3 core device
+ * @pdev: Pointer to cdns3 core platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cdns3 *cdns;
+ void __iomem *regs;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "error setting dma mask: %d\n", ret);
+ return -ENODEV;
+ }
+
+ cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
+ if (!cdns)
+ return -ENOMEM;
+
+ cdns->dev = dev;
+
+ platform_set_drvdata(pdev, cdns);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
+ if (!res) {
+ dev_err(dev, "missing host IRQ\n");
+ return -ENODEV;
+ }
+
+ cdns->xhci_res[0] = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
+ if (!res) {
+ dev_err(dev, "couldn't get xhci resource\n");
+ return -ENXIO;
+ }
+
+ cdns->xhci_res[1] = *res;
+
+ cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
+ if (cdns->dev_irq == -EPROBE_DEFER)
+ return cdns->dev_irq;
+
+ if (cdns->dev_irq < 0)
+ dev_err(dev, "couldn't get peripheral irq\n");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dev");
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ cdns->dev_regs = regs;
+
+ cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
+ if (cdns->otg_irq == -EPROBE_DEFER)
+ return cdns->otg_irq;
+
+ if (cdns->otg_irq < 0) {
+ dev_err(dev, "couldn't get otg irq\n");
+ return cdns->otg_irq;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
+ if (!res) {
+ dev_err(dev, "couldn't get otg resource\n");
+ return -ENXIO;
+ }
+
+ cdns->otg_res = *res;
+
+ mutex_init(&cdns->mutex);
+
+ cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
+ if (IS_ERR(cdns->usb2_phy))
+ return PTR_ERR(cdns->usb2_phy);
+
+ ret = phy_init(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
+ if (IS_ERR(cdns->usb3_phy))
+ return PTR_ERR(cdns->usb3_phy);
+
+ ret = phy_init(cdns->usb3_phy);
+ if (ret)
+ goto err1;
+
+ ret = phy_power_on(cdns->usb2_phy);
+ if (ret)
+ goto err2;
+
+ ret = phy_power_on(cdns->usb3_phy);
+ if (ret)
+ goto err3;
+
+ cdns->role_sw = usb_role_switch_register(dev, &cdns3_switch_desc);
+ if (IS_ERR(cdns->role_sw)) {
+ ret = PTR_ERR(cdns->role_sw);
+ dev_warn(dev, "Unable to register Role Switch\n");
+ goto err4;
+ }
+
+ ret = cdns3_drd_init(cdns);
+ if (ret)
+ goto err5;
+
+ ret = cdns3_core_init_role(cdns);
+ if (ret)
+ goto err5;
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /*
+ * The controller needs less time between bus and controller suspend,
+ * and we also needs a small delay to avoid frequently entering low
+ * power mode.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 20);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_use_autosuspend(dev);
+ dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
+
+ return 0;
+err5:
+ cdns3_drd_exit(cdns);
+ usb_role_switch_unregister(cdns->role_sw);
+err4:
+ phy_power_off(cdns->usb3_phy);
+
+err3:
+ phy_power_off(cdns->usb2_phy);
+err2:
+ phy_exit(cdns->usb3_phy);
+err1:
+ phy_exit(cdns->usb2_phy);
+
+ return ret;
+}
+
+/**
+ * cdns3_remove - unbind drd driver and clean up
+ * @pdev: Pointer to Linux platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_remove(struct platform_device *pdev)
+{
+ struct cdns3 *cdns = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ cdns3_exit_roles(cdns);
+ usb_role_switch_unregister(cdns->role_sw);
+ phy_power_off(cdns->usb2_phy);
+ phy_power_off(cdns->usb3_phy);
+ phy_exit(cdns->usb2_phy);
+ phy_exit(cdns->usb3_phy);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int cdns3_suspend(struct device *dev)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (cdns->role == USB_ROLE_HOST)
+ return 0;
+
+ if (pm_runtime_status_suspended(dev))
+ pm_runtime_resume(dev);
+
+ if (cdns->roles[cdns->role]->suspend) {
+ spin_lock_irqsave(&cdns->gadget_dev->lock, flags);
+ cdns->roles[cdns->role]->suspend(cdns, false);
+ spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags);
+ }
+
+ return 0;
+}
+
+static int cdns3_resume(struct device *dev)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (cdns->role == USB_ROLE_HOST)
+ return 0;
+
+ if (cdns->roles[cdns->role]->resume) {
+ spin_lock_irqsave(&cdns->gadget_dev->lock, flags);
+ cdns->roles[cdns->role]->resume(cdns, false);
+ spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags);
+ }
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops cdns3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cdns3_match[] = {
+ { .compatible = "cdns,usb3" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_cdns3_match);
+#endif
+
+static struct platform_driver cdns3_driver = {
+ .probe = cdns3_probe,
+ .remove = cdns3_remove,
+ .driver = {
+ .name = "cdns-usb3",
+ .of_match_table = of_match_ptr(of_cdns3_match),
+ .pm = &cdns3_pm_ops,
+ },
+};
+
+module_platform_driver(cdns3_driver);
+
+MODULE_ALIAS("platform:cdns3");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
new file mode 100644
index 000000000000..969eb94de204
--- /dev/null
+++ b/drivers/usb/cdns3/core.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USBSS DRD Header File.
+ *
+ * Copyright (C) 2017-2018 NXP
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Authors: Peter Chen <peter.chen@nxp.com>
+ * Pawel Laszczak <pawell@cadence.com>
+ */
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+
+#ifndef __LINUX_CDNS3_CORE_H
+#define __LINUX_CDNS3_CORE_H
+
+struct cdns3;
+
+/**
+ * struct cdns3_role_driver - host/gadget role driver
+ * @start: start this role
+ * @stop: stop this role
+ * @suspend: suspend callback for this role
+ * @resume: resume callback for this role
+ * @irq: irq handler for this role
+ * @name: role name string (host/gadget)
+ * @state: current state
+ */
+struct cdns3_role_driver {
+ int (*start)(struct cdns3 *cdns);
+ void (*stop)(struct cdns3 *cdns);
+ int (*suspend)(struct cdns3 *cdns, bool do_wakeup);
+ int (*resume)(struct cdns3 *cdns, bool hibernated);
+ const char *name;
+#define CDNS3_ROLE_STATE_INACTIVE 0
+#define CDNS3_ROLE_STATE_ACTIVE 1
+ int state;
+};
+
+#define CDNS3_XHCI_RESOURCES_NUM 2
+/**
+ * struct cdns3 - Representation of Cadence USB3 DRD controller.
+ * @dev: pointer to Cadence device struct
+ * @xhci_regs: pointer to base of xhci registers
+ * @xhci_res: the resource for xhci
+ * @dev_regs: pointer to base of dev registers
+ * @otg_res: the resource for otg
+ * @otg_v0_regs: pointer to base of v0 otg registers
+ * @otg_v1_regs: pointer to base of v1 otg registers
+ * @otg_regs: pointer to base of otg registers
+ * @otg_irq: irq number for otg controller
+ * @dev_irq: irq number for device controller
+ * @roles: array of supported roles for this controller
+ * @role: current role
+ * @host_dev: the child host device pointer for cdns3 core
+ * @gadget_dev: the child gadget device pointer for cdns3 core
+ * @usb2_phy: pointer to USB2 PHY
+ * @usb3_phy: pointer to USB3 PHY
+ * @mutex: the mutex for concurrent code at driver
+ * @dr_mode: supported mode of operation it can be only Host, only Device
+ * or OTG mode that allow to switch between Device and Host mode.
+ * This field based on firmware setting, kernel configuration
+ * and hardware configuration.
+ * @role_sw: pointer to role switch object.
+ * @role_override: set 1 if role rely on SW.
+ */
+struct cdns3 {
+ struct device *dev;
+ void __iomem *xhci_regs;
+ struct resource xhci_res[CDNS3_XHCI_RESOURCES_NUM];
+ struct cdns3_usb_regs __iomem *dev_regs;
+
+ struct resource otg_res;
+ struct cdns3_otg_legacy_regs *otg_v0_regs;
+ struct cdns3_otg_regs *otg_v1_regs;
+ struct cdns3_otg_common_regs *otg_regs;
+#define CDNS3_CONTROLLER_V0 0
+#define CDNS3_CONTROLLER_V1 1
+ u32 version;
+
+ int otg_irq;
+ int dev_irq;
+ struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1];
+ enum usb_role role;
+ struct platform_device *host_dev;
+ struct cdns3_device *gadget_dev;
+ struct phy *usb2_phy;
+ struct phy *usb3_phy;
+ /* mutext used in workqueue*/
+ struct mutex mutex;
+ enum usb_dr_mode dr_mode;
+ struct usb_role_switch *role_sw;
+ int role_override;
+};
+
+int cdns3_hw_role_switch(struct cdns3 *cdns);
+
+#endif /* __LINUX_CDNS3_CORE_H */
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/debug.h
new file mode 100644
index 000000000000..2c9afbfe988b
--- /dev/null
+++ b/drivers/usb/cdns3/debug.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USBSS DRD Driver.
+ * Debug header file.
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+#ifndef __LINUX_CDNS3_DEBUG
+#define __LINUX_CDNS3_DEBUG
+
+#include "core.h"
+
+static inline char *cdns3_decode_usb_irq(char *str,
+ enum usb_device_speed speed,
+ u32 usb_ists)
+{
+ int ret;
+
+ ret = sprintf(str, "IRQ %08x = ", usb_ists);
+
+ if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
+ ret += sprintf(str + ret, "Connection %s\n",
+ usb_speed_string(speed));
+ }
+ if (usb_ists & USB_ISTS_DIS2I || usb_ists & USB_ISTS_DISI)
+ ret += sprintf(str + ret, "Disconnection ");
+ if (usb_ists & USB_ISTS_L2ENTI)
+ ret += sprintf(str + ret, "suspended ");
+ if (usb_ists & USB_ISTS_L1ENTI)
+ ret += sprintf(str + ret, "L1 enter ");
+ if (usb_ists & USB_ISTS_L1EXTI)
+ ret += sprintf(str + ret, "L1 exit ");
+ if (usb_ists & USB_ISTS_L2ENTI)
+ ret += sprintf(str + ret, "L2 enter ");
+ if (usb_ists & USB_ISTS_L2EXTI)
+ ret += sprintf(str + ret, "L2 exit ");
+ if (usb_ists & USB_ISTS_U3EXTI)
+ ret += sprintf(str + ret, "U3 exit ");
+ if (usb_ists & USB_ISTS_UWRESI)
+ ret += sprintf(str + ret, "Warm Reset ");
+ if (usb_ists & USB_ISTS_UHRESI)
+ ret += sprintf(str + ret, "Hot Reset ");
+ if (usb_ists & USB_ISTS_U2RESI)
+ ret += sprintf(str + ret, "Reset");
+
+ return str;
+}
+
+static inline char *cdns3_decode_ep_irq(char *str,
+ u32 ep_sts,
+ const char *ep_name)
+{
+ int ret;
+
+ ret = sprintf(str, "IRQ for %s: %08x ", ep_name, ep_sts);
+
+ if (ep_sts & EP_STS_SETUP)
+ ret += sprintf(str + ret, "SETUP ");
+ if (ep_sts & EP_STS_IOC)
+ ret += sprintf(str + ret, "IOC ");
+ if (ep_sts & EP_STS_ISP)
+ ret += sprintf(str + ret, "ISP ");
+ if (ep_sts & EP_STS_DESCMIS)
+ ret += sprintf(str + ret, "DESCMIS ");
+ if (ep_sts & EP_STS_STREAMR)
+ ret += sprintf(str + ret, "STREAMR ");
+ if (ep_sts & EP_STS_MD_EXIT)
+ ret += sprintf(str + ret, "MD_EXIT ");
+ if (ep_sts & EP_STS_TRBERR)
+ ret += sprintf(str + ret, "TRBERR ");
+ if (ep_sts & EP_STS_NRDY)
+ ret += sprintf(str + ret, "NRDY ");
+ if (ep_sts & EP_STS_PRIME)
+ ret += sprintf(str + ret, "PRIME ");
+ if (ep_sts & EP_STS_SIDERR)
+ ret += sprintf(str + ret, "SIDERRT ");
+ if (ep_sts & EP_STS_OUTSMM)
+ ret += sprintf(str + ret, "OUTSMM ");
+ if (ep_sts & EP_STS_ISOERR)
+ ret += sprintf(str + ret, "ISOERR ");
+ if (ep_sts & EP_STS_IOT)
+ ret += sprintf(str + ret, "IOT ");
+
+ return str;
+}
+
+static inline char *cdns3_decode_epx_irq(char *str,
+ char *ep_name,
+ u32 ep_sts)
+{
+ return cdns3_decode_ep_irq(str, ep_sts, ep_name);
+}
+
+static inline char *cdns3_decode_ep0_irq(char *str,
+ int dir,
+ u32 ep_sts)
+{
+ return cdns3_decode_ep_irq(str, ep_sts,
+ dir ? "ep0IN" : "ep0OUT");
+}
+
+/**
+ * Debug a transfer ring.
+ *
+ * Prints out all TRBs in the endpoint ring, even those after the Link TRB.
+ *.
+ */
+static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep,
+ struct cdns3_trb *ring, char *str)
+{
+ dma_addr_t addr = priv_ep->trb_pool_dma;
+ struct cdns3_trb *trb;
+ int trb_per_sector;
+ int ret = 0;
+ int i;
+
+ trb_per_sector = GET_TRBS_PER_SEGMENT(priv_ep->type);
+
+ trb = &priv_ep->trb_pool[priv_ep->dequeue];
+ ret += sprintf(str + ret, "\n\t\tRing contents for %s:", priv_ep->name);
+
+ ret += sprintf(str + ret,
+ "\n\t\tRing deq index: %d, trb: %p (virt), 0x%llx (dma)\n",
+ priv_ep->dequeue, trb,
+ (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb));
+
+ trb = &priv_ep->trb_pool[priv_ep->enqueue];
+ ret += sprintf(str + ret,
+ "\t\tRing enq index: %d, trb: %p (virt), 0x%llx (dma)\n",
+ priv_ep->enqueue, trb,
+ (unsigned long long)cdns3_trb_virt_to_dma(priv_ep, trb));
+
+ ret += sprintf(str + ret,
+ "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
+ priv_ep->free_trbs, priv_ep->ccs, priv_ep->pcs);
+
+ if (trb_per_sector > TRBS_PER_SEGMENT)
+ trb_per_sector = TRBS_PER_SEGMENT;
+
+ if (trb_per_sector > TRBS_PER_SEGMENT) {
+ sprintf(str + ret, "\t\tTo big transfer ring %d\n",
+ trb_per_sector);
+ return str;
+ }
+
+ for (i = 0; i < trb_per_sector; ++i) {
+ trb = &ring[i];
+ ret += sprintf(str + ret,
+ "\t\t@%pad %08x %08x %08x\n", &addr,
+ le32_to_cpu(trb->buffer),
+ le32_to_cpu(trb->length),
+ le32_to_cpu(trb->control));
+ addr += sizeof(*trb);
+ }
+
+ return str;
+}
+
+#endif /*__LINUX_CDNS3_DEBUG*/
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
new file mode 100644
index 000000000000..16ad485f0b69
--- /dev/null
+++ b/drivers/usb/cdns3/drd.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver.
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2019 Texas Instruments
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ * Roger Quadros <rogerq@ti.com>
+ *
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/usb/otg.h>
+
+#include "gadget.h"
+#include "drd.h"
+#include "core.h"
+
+/**
+ * cdns3_set_mode - change mode of OTG Core
+ * @cdns: pointer to context structure
+ * @mode: selected mode from cdns_role
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+{
+ int ret = 0;
+ u32 reg;
+
+ switch (mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ break;
+ case USB_DR_MODE_HOST:
+ break;
+ case USB_DR_MODE_OTG:
+ dev_dbg(cdns->dev, "Set controller to OTG mode\n");
+ if (cdns->version == CDNS3_CONTROLLER_V1) {
+ reg = readl(&cdns->otg_v1_regs->override);
+ reg |= OVERRIDE_IDPULLUP;
+ writel(reg, &cdns->otg_v1_regs->override);
+ } else {
+ reg = readl(&cdns->otg_v0_regs->ctrl1);
+ reg |= OVERRIDE_IDPULLUP_V0;
+ writel(reg, &cdns->otg_v0_regs->ctrl1);
+ }
+
+ /*
+ * Hardware specification says: "ID_VALUE must be valid within
+ * 50ms after idpullup is set to '1" so driver must wait
+ * 50ms before reading this pin.
+ */
+ usleep_range(50000, 60000);
+ break;
+ default:
+ dev_err(cdns->dev, "Unsupported mode of operation %d\n", mode);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int cdns3_get_id(struct cdns3 *cdns)
+{
+ int id;
+
+ id = readl(&cdns->otg_regs->sts) & OTGSTS_ID_VALUE;
+ dev_dbg(cdns->dev, "OTG ID: %d", id);
+
+ return id;
+}
+
+int cdns3_get_vbus(struct cdns3 *cdns)
+{
+ int vbus;
+
+ vbus = !!(readl(&cdns->otg_regs->sts) & OTGSTS_VBUS_VALID);
+ dev_dbg(cdns->dev, "OTG VBUS: %d", vbus);
+
+ return vbus;
+}
+
+int cdns3_is_host(struct cdns3 *cdns)
+{
+ if (cdns->dr_mode == USB_DR_MODE_HOST)
+ return 1;
+ else if (!cdns3_get_id(cdns))
+ return 1;
+
+ return 0;
+}
+
+int cdns3_is_device(struct cdns3 *cdns)
+{
+ if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL)
+ return 1;
+ else if (cdns->dr_mode == USB_DR_MODE_OTG)
+ if (cdns3_get_id(cdns))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * cdns3_otg_disable_irq - Disable all OTG interrupts
+ * @cdns: Pointer to controller context structure
+ */
+static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+{
+ writel(0, &cdns->otg_regs->ien);
+}
+
+/**
+ * cdns3_otg_enable_irq - enable id and sess_valid interrupts
+ * @cdns: Pointer to controller context structure
+ */
+static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+{
+ writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
+ OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
+}
+
+/**
+ * cdns3_drd_switch_host - start/stop host
+ * @cdns: Pointer to controller context structure
+ * @on: 1 for start, 0 for stop
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+int cdns3_drd_switch_host(struct cdns3 *cdns, int on)
+{
+ int ret, val;
+ u32 reg = OTGCMD_OTG_DIS;
+
+ /* switch OTG core */
+ if (on) {
+ writel(OTGCMD_HOST_BUS_REQ | reg, &cdns->otg_regs->cmd);
+
+ dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
+ ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
+ val & OTGSTS_XHCI_READY,
+ 1, 100000);
+ if (ret) {
+ dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
+ return ret;
+ }
+ } else {
+ writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP |
+ OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF,
+ &cdns->otg_regs->cmd);
+ /* Waiting till H_IDLE state.*/
+ readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
+ !(val & OTGSTATE_HOST_STATE_MASK),
+ 1, 2000000);
+ }
+
+ return 0;
+}
+
+/**
+ * cdns3_drd_switch_gadget - start/stop gadget
+ * @cdns: Pointer to controller context structure
+ * @on: 1 for start, 0 for stop
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+int cdns3_drd_switch_gadget(struct cdns3 *cdns, int on)
+{
+ int ret, val;
+ u32 reg = OTGCMD_OTG_DIS;
+
+ /* switch OTG core */
+ if (on) {
+ writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
+
+ dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
+
+ ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
+ val & OTGSTS_DEV_READY,
+ 1, 100000);
+ if (ret) {
+ dev_err(cdns->dev, "timeout waiting for dev_ready\n");
+ return ret;
+ }
+ } else {
+ /*
+ * driver should wait at least 10us after disabling Device
+ * before turning-off Device (DEV_BUS_DROP)
+ */
+ usleep_range(20, 30);
+ writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP |
+ OTGCMD_DEV_POWER_OFF | OTGCMD_HOST_POWER_OFF,
+ &cdns->otg_regs->cmd);
+ /* Waiting till DEV_IDLE state.*/
+ readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
+ !(val & OTGSTATE_DEV_STATE_MASK),
+ 1, 2000000);
+ }
+
+ return 0;
+}
+
+/**
+ * cdns3_init_otg_mode - initialize drd controller
+ * @cdns: Pointer to controller context structure
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_init_otg_mode(struct cdns3 *cdns)
+{
+ int ret = 0;
+
+ cdns3_otg_disable_irq(cdns);
+ /* clear all interrupts */
+ writel(~0, &cdns->otg_regs->ivect);
+
+ ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
+ if (ret)
+ return ret;
+
+ cdns3_otg_enable_irq(cdns);
+ return ret;
+}
+
+/**
+ * cdns3_drd_update_mode - initialize mode of operation
+ * @cdns: Pointer to controller context structure
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+int cdns3_drd_update_mode(struct cdns3 *cdns)
+{
+ int ret = 0;
+
+ switch (cdns->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
+ break;
+ case USB_DR_MODE_HOST:
+ ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST);
+ break;
+ case USB_DR_MODE_OTG:
+ ret = cdns3_init_otg_mode(cdns);
+ break;
+ default:
+ dev_err(cdns->dev, "Unsupported mode of operation %d\n",
+ cdns->dr_mode);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static irqreturn_t cdns3_drd_thread_irq(int irq, void *data)
+{
+ struct cdns3 *cdns = data;
+
+ cdns3_hw_role_switch(cdns);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * cdns3_drd_irq - interrupt handler for OTG events
+ *
+ * @irq: irq number for cdns3 core device
+ * @data: structure of cdns3
+ *
+ * Returns IRQ_HANDLED or IRQ_NONE
+ */
+static irqreturn_t cdns3_drd_irq(int irq, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct cdns3 *cdns = data;
+ u32 reg;
+
+ if (cdns->dr_mode != USB_DR_MODE_OTG)
+ return ret;
+
+ reg = readl(&cdns->otg_regs->ivect);
+
+ if (!reg)
+ return ret;
+
+ if (reg & OTGIEN_ID_CHANGE_INT) {
+ dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n",
+ cdns3_get_id(cdns));
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) {
+ dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n",
+ cdns3_get_vbus(cdns));
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ writel(~0, &cdns->otg_regs->ivect);
+ return ret;
+}
+
+int cdns3_drd_init(struct cdns3 *cdns)
+{
+ void __iomem *regs;
+ int ret = 0;
+ u32 state;
+
+ regs = devm_ioremap_resource(cdns->dev, &cdns->otg_res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Detection of DRD version. Controller has been released
+ * in two versions. Both are similar, but they have same changes
+ * in register maps.
+ * The first register in old version is command register and it's read
+ * only, so driver should read 0 from it. On the other hand, in v1
+ * the first register contains device ID number which is not set to 0.
+ * Driver uses this fact to detect the proper version of
+ * controller.
+ */
+ cdns->otg_v0_regs = regs;
+ if (!readl(&cdns->otg_v0_regs->cmd)) {
+ cdns->version = CDNS3_CONTROLLER_V0;
+ cdns->otg_v1_regs = NULL;
+ cdns->otg_regs = regs;
+ writel(1, &cdns->otg_v0_regs->simulate);
+ dev_info(cdns->dev, "DRD version v0 (%08x)\n",
+ readl(&cdns->otg_v0_regs->version));
+ } else {
+ cdns->otg_v0_regs = NULL;
+ cdns->otg_v1_regs = regs;
+ cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
+ cdns->version = CDNS3_CONTROLLER_V1;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ dev_info(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+ readl(&cdns->otg_v1_regs->did),
+ readl(&cdns->otg_v1_regs->rid));
+ }
+
+ state = OTGSTS_STRAP(readl(&cdns->otg_regs->sts));
+
+ /* Update dr_mode according to STRAP configuration. */
+ cdns->dr_mode = USB_DR_MODE_OTG;
+ if (state == OTGSTS_STRAP_HOST) {
+ dev_dbg(cdns->dev, "Controller strapped to HOST\n");
+ cdns->dr_mode = USB_DR_MODE_HOST;
+ } else if (state == OTGSTS_STRAP_GADGET) {
+ dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
+ cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
+
+ ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq,
+ cdns3_drd_irq,
+ cdns3_drd_thread_irq,
+ IRQF_SHARED,
+ dev_name(cdns->dev), cdns);
+
+ if (ret) {
+ dev_err(cdns->dev, "couldn't get otg_irq\n");
+ return ret;
+ }
+
+ state = readl(&cdns->otg_regs->sts);
+ if (OTGSTS_OTG_NRDY(state) != 0) {
+ dev_err(cdns->dev, "Cadence USB3 OTG device not ready\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+int cdns3_drd_exit(struct cdns3 *cdns)
+{
+ cdns3_otg_disable_irq(cdns);
+ return 0;
+}
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
new file mode 100644
index 000000000000..04e01c4d2377
--- /dev/null
+++ b/drivers/usb/cdns3/drd.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USB3 DRD header file.
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+#ifndef __LINUX_CDNS3_DRD
+#define __LINUX_CDNS3_DRD
+
+#include <linux/usb/otg.h>
+#include <linux/phy/phy.h>
+#include "core.h"
+
+/* DRD register interface for version v1. */
+struct cdns3_otg_regs {
+ __le32 did;
+ __le32 rid;
+ __le32 capabilities;
+ __le32 reserved1;
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 reserved2;
+ __le32 ien;
+ __le32 ivect;
+ __le32 refclk;
+ __le32 tmr;
+ __le32 reserved3[4];
+ __le32 simulate;
+ __le32 override;
+ __le32 susp_ctrl;
+ __le32 reserved4;
+ __le32 anasts;
+ __le32 adp_ramp_time;
+ __le32 ctrl1;
+ __le32 ctrl2;
+};
+
+/* DRD register interface for version v0. */
+struct cdns3_otg_legacy_regs {
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 refclk;
+ __le32 ien;
+ __le32 ivect;
+ __le32 reserved1[3];
+ __le32 tmr;
+ __le32 reserved2[2];
+ __le32 version;
+ __le32 capabilities;
+ __le32 reserved3[2];
+ __le32 simulate;
+ __le32 reserved4[5];
+ __le32 ctrl1;
+};
+
+/*
+ * Common registers interface for both version of DRD.
+ */
+struct cdns3_otg_common_regs {
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 different1;
+ __le32 ien;
+ __le32 ivect;
+};
+
+/* CDNS_RID - bitmasks */
+#define CDNS_RID(p) ((p) & GENMASK(15, 0))
+
+/* CDNS_VID - bitmasks */
+#define CDNS_DID(p) ((p) & GENMASK(31, 0))
+
+/* OTGCMD - bitmasks */
+/* "Request the bus for Device mode. */
+#define OTGCMD_DEV_BUS_REQ BIT(0)
+/* Request the bus for Host mode */
+#define OTGCMD_HOST_BUS_REQ BIT(1)
+/* Enable OTG mode. */
+#define OTGCMD_OTG_EN BIT(2)
+/* Disable OTG mode */
+#define OTGCMD_OTG_DIS BIT(3)
+/*"Configure OTG as A-Device. */
+#define OTGCMD_A_DEV_EN BIT(4)
+/*"Configure OTG as A-Device. */
+#define OTGCMD_A_DEV_DIS BIT(5)
+/* Drop the bus for Device mod e. */
+#define OTGCMD_DEV_BUS_DROP BIT(8)
+/* Drop the bus for Host mode*/
+#define OTGCMD_HOST_BUS_DROP BIT(9)
+/* Power Down USBSS-DEV. */
+#define OTGCMD_DEV_POWER_OFF BIT(11)
+/* Power Down CDNSXHCI. */
+#define OTGCMD_HOST_POWER_OFF BIT(12)
+
+/* OTGIEN - bitmasks */
+/* ID change interrupt enable */
+#define OTGIEN_ID_CHANGE_INT BIT(0)
+/* Vbusvalid fall detected interrupt enable.*/
+#define OTGIEN_VBUSVALID_RISE_INT BIT(4)
+/* Vbusvalid fall detected interrupt enable */
+#define OTGIEN_VBUSVALID_FALL_INT BIT(5)
+
+/* OTGSTS - bitmasks */
+/*
+ * Current value of the ID pin. It is only valid when idpullup in
+ * OTGCTRL1_TYPE register is set to '1'.
+ */
+#define OTGSTS_ID_VALUE BIT(0)
+/* Current value of the vbus_valid */
+#define OTGSTS_VBUS_VALID BIT(1)
+/* Current value of the b_sess_vld */
+#define OTGSTS_SESSION_VALID BIT(2)
+/*Device mode is active*/
+#define OTGSTS_DEV_ACTIVE BIT(3)
+/* Host mode is active. */
+#define OTGSTS_HOST_ACTIVE BIT(4)
+/* OTG Controller not ready. */
+#define OTGSTS_OTG_NRDY_MASK BIT(11)
+#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK)
+/*
+ * Value of the strap pins.
+ * 000 - no default configuration
+ * 010 - Controller initiall configured as Host
+ * 100 - Controller initially configured as Device
+ */
+#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12)
+#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00
+#define OTGSTS_STRAP_HOST_OTG 0x01
+#define OTGSTS_STRAP_HOST 0x02
+#define OTGSTS_STRAP_GADGET 0x04
+/* Host mode is turned on. */
+#define OTGSTS_XHCI_READY BIT(26)
+/* "Device mode is turned on .*/
+#define OTGSTS_DEV_READY BIT(27)
+
+/* OTGSTATE- bitmasks */
+#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0)
+#define OTGSTATE_HOST_STATE_MASK GENMASK(5, 3)
+#define OTGSTATE_HOST_STATE_IDLE 0x0
+#define OTGSTATE_HOST_STATE_VBUS_FALL 0x7
+#define OTGSTATE_HOST_STATE(p) (((p) & OTGSTATE_HOST_STATE_MASK) >> 3)
+
+/* OTGREFCLK - bitmasks */
+#define OTGREFCLK_STB_CLK_SWITCH_EN BIT(31)
+
+/* OVERRIDE - bitmasks */
+#define OVERRIDE_IDPULLUP BIT(0)
+/* Only for CDNS3_CONTROLLER_V0 version */
+#define OVERRIDE_IDPULLUP_V0 BIT(24)
+
+int cdns3_is_host(struct cdns3 *cdns);
+int cdns3_is_device(struct cdns3 *cdns);
+int cdns3_get_id(struct cdns3 *cdns);
+int cdns3_get_vbus(struct cdns3 *cdns);
+int cdns3_drd_init(struct cdns3 *cdns);
+int cdns3_drd_exit(struct cdns3 *cdns);
+int cdns3_drd_update_mode(struct cdns3 *cdns);
+int cdns3_drd_switch_gadget(struct cdns3 *cdns, int on);
+int cdns3_drd_switch_host(struct cdns3 *cdns, int on);
+int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
+
+#endif /* __LINUX_CDNS3_DRD */
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
new file mode 100644
index 000000000000..44f652e8b5a2
--- /dev/null
+++ b/drivers/usb/cdns3/ep0.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver - gadget side.
+ *
+ * Copyright (C) 2018 Cadence Design Systems.
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Authors: Pawel Jez <pjez@cadence.com>,
+ * Pawel Laszczak <pawell@cadence.com>
+ * Peter Chen <peter.chen@nxp.com>
+ */
+
+#include <linux/usb/composite.h>
+#include <linux/iopoll.h>
+
+#include "gadget.h"
+#include "trace.h"
+
+static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+};
+
+/**
+ * cdns3_ep0_run_transfer - Do transfer on default endpoint hardware
+ * @priv_dev: extended gadget object
+ * @dma_addr: physical address where data is/will be stored
+ * @length: data length
+ * @erdy: set it to 1 when ERDY packet should be sent -
+ * exit from flow control state
+ */
+static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev,
+ dma_addr_t dma_addr,
+ unsigned int length, int erdy, int zlp)
+{
+ struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
+ struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
+
+ priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr);
+ priv_ep->trb_pool[0].length = TRB_LEN(length);
+
+ if (zlp) {
+ priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL);
+ priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr);
+ priv_ep->trb_pool[1].length = TRB_LEN(0);
+ priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL);
+ } else {
+ priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL);
+ priv_ep->trb_pool[1].control = 0;
+ }
+
+ trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool);
+
+ cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir);
+
+ writel(EP_STS_TRBERR, &regs->ep_sts);
+ writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), &regs->ep_traddr);
+ trace_cdns3_doorbell_ep0(priv_dev->ep0_data_dir ? "ep0in" : "ep0out",
+ readl(&regs->ep_traddr));
+
+ /* TRB should be prepared before starting transfer. */
+ writel(EP_CMD_DRDY, &regs->ep_cmd);
+
+ /* Resume controller before arming transfer. */
+ __cdns3_gadget_wakeup(priv_dev);
+
+ if (erdy)
+ writel(EP_CMD_ERDY, &priv_dev->regs->ep_cmd);
+}
+
+/**
+ * cdns3_ep0_delegate_req - Returns status of handling setup packet
+ * Setup is handled by gadget driver
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns zero on success or negative value on failure
+ */
+static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ int ret;
+
+ spin_unlock(&priv_dev->lock);
+ priv_dev->setup_pending = 1;
+ ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req);
+ priv_dev->setup_pending = 0;
+ spin_lock(&priv_dev->lock);
+ return ret;
+}
+
+static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev)
+{
+ priv_dev->ep0_data_dir = 0;
+ priv_dev->ep0_stage = CDNS3_SETUP_STAGE;
+ cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma,
+ sizeof(struct usb_ctrlrequest), 0, 0);
+}
+
+static void cdns3_ep0_complete_setup(struct cdns3_device *priv_dev,
+ u8 send_stall, u8 send_erdy)
+{
+ struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
+ struct usb_request *request;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ if (request)
+ list_del_init(&request->list);
+
+ if (send_stall) {
+ trace_cdns3_halt(priv_ep, send_stall, 0);
+ /* set_stall on ep0 */
+ cdns3_select_ep(priv_dev, 0x00);
+ writel(EP_CMD_SSTALL, &priv_dev->regs->ep_cmd);
+ } else {
+ cdns3_prepare_setup_packet(priv_dev);
+ }
+
+ priv_dev->ep0_stage = CDNS3_SETUP_STAGE;
+ writel((send_erdy ? EP_CMD_ERDY : 0) | EP_CMD_REQ_CMPL,
+ &priv_dev->regs->ep_cmd);
+
+ cdns3_allow_enable_l1(priv_dev, 1);
+}
+
+/**
+ * cdns3_req_ep0_set_configuration - Handling of SET_CONFIG standard USB request
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, USB_GADGET_DELAYED_STATUS on deferred status stage,
+ * error code on error
+ */
+static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ enum usb_device_state device_state = priv_dev->gadget.state;
+ struct cdns3_endpoint *priv_ep;
+ u32 config = le16_to_cpu(ctrl_req->wValue);
+ int result = 0;
+ int i;
+
+ switch (device_state) {
+ case USB_STATE_ADDRESS:
+ /* Configure non-control EPs */
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
+ priv_ep = priv_dev->eps[i];
+ if (!priv_ep)
+ continue;
+
+ if (priv_ep->flags & EP_CLAIMED)
+ cdns3_ep_config(priv_ep);
+ }
+
+ result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+
+ if (result)
+ return result;
+
+ if (config) {
+ cdns3_set_hw_configuration(priv_dev);
+ } else {
+ cdns3_hw_reset_eps_config(priv_dev);
+ usb_gadget_set_state(&priv_dev->gadget,
+ USB_STATE_ADDRESS);
+ }
+ break;
+ case USB_STATE_CONFIGURED:
+ result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+
+ if (!config && !result) {
+ cdns3_hw_reset_eps_config(priv_dev);
+ usb_gadget_set_state(&priv_dev->gadget,
+ USB_STATE_ADDRESS);
+ }
+ break;
+ default:
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+/**
+ * cdns3_req_ep0_set_address - Handling of SET_ADDRESS standard USB request
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ enum usb_device_state device_state = priv_dev->gadget.state;
+ u32 reg;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl_req->wValue);
+
+ if (addr > USB_DEVICE_MAX_ADDRESS) {
+ dev_err(priv_dev->dev,
+ "Device address (%d) cannot be greater than %d\n",
+ addr, USB_DEVICE_MAX_ADDRESS);
+ return -EINVAL;
+ }
+
+ if (device_state == USB_STATE_CONFIGURED) {
+ dev_err(priv_dev->dev,
+ "can't set_address from configured state\n");
+ return -EINVAL;
+ }
+
+ reg = readl(&priv_dev->regs->usb_cmd);
+
+ writel(reg | USB_CMD_FADDR(addr) | USB_CMD_SET_ADDR,
+ &priv_dev->regs->usb_cmd);
+
+ usb_gadget_set_state(&priv_dev->gadget,
+ (addr ? USB_STATE_ADDRESS : USB_STATE_DEFAULT));
+
+ return 0;
+}
+
+/**
+ * cdns3_req_ep0_get_status - Handling of GET_STATUS standard USB request
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl)
+{
+ __le16 *response_pkt;
+ u16 usb_status = 0;
+ u32 recip;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ /* self powered */
+ if (priv_dev->is_selfpowered)
+ usb_status = BIT(USB_DEVICE_SELF_POWERED);
+
+ if (priv_dev->wake_up_flag)
+ usb_status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (priv_dev->gadget.speed != USB_SPEED_SUPER)
+ break;
+
+ if (priv_dev->u1_allowed)
+ usb_status |= BIT(USB_DEV_STAT_U1_ENABLED);
+
+ if (priv_dev->u2_allowed)
+ usb_status |= BIT(USB_DEV_STAT_U2_ENABLED);
+
+ break;
+ case USB_RECIP_INTERFACE:
+ return cdns3_ep0_delegate_req(priv_dev, ctrl);
+ case USB_RECIP_ENDPOINT:
+ /* check if endpoint is stalled */
+ cdns3_select_ep(priv_dev, ctrl->wIndex);
+ if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)))
+ usb_status = BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ response_pkt = (__le16 *)priv_dev->setup_buf;
+ *response_pkt = cpu_to_le16(usb_status);
+
+ cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma,
+ sizeof(*response_pkt), 1, 0);
+ return 0;
+}
+
+static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+ int ret = 0;
+ u32 wValue;
+ u16 tmode;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ state = priv_dev->gadget.state;
+ speed = priv_dev->gadget.speed;
+
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ priv_dev->wake_up_flag = !!set;
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER)
+ return -EINVAL;
+
+ priv_dev->u1_allowed = !!set;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed != USB_SPEED_SUPER)
+ return -EINVAL;
+
+ priv_dev->u2_allowed = !!set;
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ ret = -EINVAL;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
+ return -EINVAL;
+
+ tmode = le16_to_cpu(ctrl->wIndex);
+
+ if (!set || (tmode & 0xff) != 0)
+ return -EINVAL;
+
+ switch (tmode >> 8) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ cdns3_ep0_complete_setup(priv_dev, 0, 1);
+ /**
+ * Little delay to give the controller some time
+ * for sending status stage.
+ * This time should be less then 3ms.
+ */
+ usleep_range(1000, 2000);
+ cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
+ USB_CMD_STMODE |
+ USB_STS_TMODE_SEL(tmode - 1));
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ u32 wValue;
+ int ret = 0;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ struct cdns3_endpoint *priv_ep;
+ int ret = 0;
+ u8 index;
+
+ if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
+ return -EINVAL;
+
+ if (!(ctrl->wIndex & ~USB_DIR_IN))
+ return 0;
+
+ index = cdns3_ep_addr_to_index(ctrl->wIndex);
+ priv_ep = priv_dev->eps[index];
+
+ cdns3_select_ep(priv_dev, ctrl->wIndex);
+
+ if (set)
+ __cdns3_gadget_ep_set_halt(priv_ep);
+ else if (!(priv_ep->flags & EP_WEDGE))
+ ret = __cdns3_gadget_ep_clear_halt(priv_ep);
+
+ cdns3_select_ep(priv_dev, 0x00);
+
+ return ret;
+}
+
+/**
+ * cdns3_req_ep0_handle_feature -
+ * Handling of GET/SET_FEATURE standard USB request
+ *
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ * @set: must be set to 1 for SET_FEATURE request
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ int ret = 0;
+ u32 recip;
+
+ recip = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set);
+ break;
+ case USB_RECIP_INTERFACE:
+ ret = cdns3_ep0_feature_handle_intf(priv_dev, ctrl, set);
+ break;
+ case USB_RECIP_ENDPOINT:
+ ret = cdns3_ep0_feature_handle_endpoint(priv_dev, ctrl, set);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * cdns3_req_ep0_set_sel - Handling of SET_SEL standard USB request
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ if (priv_dev->gadget.state < USB_STATE_ADDRESS)
+ return -EINVAL;
+
+ if (ctrl_req->wLength != 6) {
+ dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n",
+ ctrl_req->wLength);
+ return -EINVAL;
+ }
+
+ cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0);
+ return 0;
+}
+
+/**
+ * cdns3_req_ep0_set_isoch_delay -
+ * Handling of GET_ISOCH_DELAY standard USB request
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ if (ctrl_req->wIndex || ctrl_req->wLength)
+ return -EINVAL;
+
+ priv_dev->isoch_delay = ctrl_req->wValue;
+
+ return 0;
+}
+
+/**
+ * cdns3_ep0_standard_request - Handling standard USB requests
+ * @priv_dev: extended gadget object
+ * @ctrl_req: pointer to received setup packet
+ *
+ * Returns 0 if success, error code on error
+ */
+static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev,
+ struct usb_ctrlrequest *ctrl_req)
+{
+ int ret;
+
+ switch (ctrl_req->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ ret = cdns3_req_ep0_set_configuration(priv_dev, ctrl_req);
+ break;
+ case USB_REQ_GET_STATUS:
+ ret = cdns3_req_ep0_get_status(priv_dev, ctrl_req);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ ret = cdns3_req_ep0_handle_feature(priv_dev, ctrl_req, 1);
+ break;
+ case USB_REQ_SET_SEL:
+ ret = cdns3_req_ep0_set_sel(priv_dev, ctrl_req);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ ret = cdns3_req_ep0_set_isoch_delay(priv_dev, ctrl_req);
+ break;
+ default:
+ ret = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+ break;
+ }
+
+ return ret;
+}
+
+static void __pending_setup_status_handler(struct cdns3_device *priv_dev)
+{
+ struct usb_request *request = priv_dev->pending_status_request;
+
+ if (priv_dev->status_completion_no_call && request &&
+ request->complete) {
+ request->complete(&priv_dev->eps[0]->endpoint, request);
+ priv_dev->status_completion_no_call = 0;
+ }
+}
+
+void cdns3_pending_setup_status_handler(struct work_struct *work)
+{
+ struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
+ pending_status_wq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ __pending_setup_status_handler(priv_dev);
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+}
+
+/**
+ * cdns3_ep0_setup_phase - Handling setup USB requests
+ * @priv_dev: extended gadget object
+ */
+static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev)
+{
+ struct usb_ctrlrequest *ctrl = priv_dev->setup_buf;
+ struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
+ int result;
+
+ priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN;
+
+ trace_cdns3_ctrl_req(ctrl);
+
+ if (!list_empty(&priv_ep->pending_req_list)) {
+ struct usb_request *request;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ priv_ep->dir = priv_dev->ep0_data_dir;
+ cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
+ -ECONNRESET);
+ }
+
+ if (le16_to_cpu(ctrl->wLength))
+ priv_dev->ep0_stage = CDNS3_DATA_STAGE;
+ else
+ priv_dev->ep0_stage = CDNS3_STATUS_STAGE;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ result = cdns3_ep0_standard_request(priv_dev, ctrl);
+ else
+ result = cdns3_ep0_delegate_req(priv_dev, ctrl);
+
+ if (result == USB_GADGET_DELAYED_STATUS)
+ return;
+
+ if (result < 0)
+ cdns3_ep0_complete_setup(priv_dev, 1, 1);
+ else if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE)
+ cdns3_ep0_complete_setup(priv_dev, 0, 1);
+}
+
+static void cdns3_transfer_completed(struct cdns3_device *priv_dev)
+{
+ struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
+
+ if (!list_empty(&priv_ep->pending_req_list)) {
+ struct usb_request *request;
+
+ trace_cdns3_complete_trb(priv_ep, priv_ep->trb_pool);
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+
+ request->actual =
+ TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length));
+
+ priv_ep->dir = priv_dev->ep0_data_dir;
+ cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0);
+ }
+
+ cdns3_ep0_complete_setup(priv_dev, 0, 0);
+}
+
+/**
+ * cdns3_check_new_setup - Check if controller receive new SETUP packet.
+ * @priv_dev: extended gadget object
+ *
+ * The SETUP packet can be kept in on-chip memory or in system memory.
+ */
+static bool cdns3_check_new_setup(struct cdns3_device *priv_dev)
+{
+ u32 ep_sts_reg;
+
+ cdns3_select_ep(priv_dev, 0 | USB_DIR_OUT);
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+ return !!(ep_sts_reg & (EP_STS_SETUP | EP_STS_STPWAIT));
+}
+
+/**
+ * cdns3_check_ep0_interrupt_proceed - Processes interrupt related to endpoint 0
+ * @priv_dev: extended gadget object
+ * @dir: USB_DIR_IN for IN direction, USB_DIR_OUT for OUT direction
+ */
+void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir)
+{
+ u32 ep_sts_reg;
+
+ cdns3_select_ep(priv_dev, dir);
+
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+ writel(ep_sts_reg, &priv_dev->regs->ep_sts);
+
+ trace_cdns3_ep0_irq(priv_dev, ep_sts_reg);
+
+ __pending_setup_status_handler(priv_dev);
+
+ if (ep_sts_reg & EP_STS_SETUP)
+ priv_dev->wait_for_setup = 1;
+
+ if (priv_dev->wait_for_setup && ep_sts_reg & EP_STS_IOC) {
+ priv_dev->wait_for_setup = 0;
+ cdns3_allow_enable_l1(priv_dev, 0);
+ cdns3_ep0_setup_phase(priv_dev);
+ } else if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+ priv_dev->ep0_data_dir = dir;
+ cdns3_transfer_completed(priv_dev);
+ }
+
+ if (ep_sts_reg & EP_STS_DESCMIS) {
+ if (dir == 0 && !priv_dev->setup_pending)
+ cdns3_prepare_setup_packet(priv_dev);
+ }
+}
+
+/**
+ * cdns3_gadget_ep0_enable
+ * Function shouldn't be called by gadget driver,
+ * endpoint 0 is allways active
+ */
+static int cdns3_gadget_ep0_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EINVAL;
+}
+
+/**
+ * cdns3_gadget_ep0_disable
+ * Function shouldn't be called by gadget driver,
+ * endpoint 0 is allways active
+ */
+static int cdns3_gadget_ep0_disable(struct usb_ep *ep)
+{
+ return -EINVAL;
+}
+
+/**
+ * cdns3_gadget_ep0_set_halt
+ * @ep: pointer to endpoint zero object
+ * @value: 1 for set stall, 0 for clear stall
+ *
+ * Returns 0
+ */
+static int cdns3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
+{
+ /* TODO */
+ return 0;
+}
+
+/**
+ * cdns3_gadget_ep0_queue Transfer data on endpoint zero
+ * @ep: pointer to endpoint zero object
+ * @request: pointer to request object
+ * @gfp_flags: gfp flags
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ unsigned long flags;
+ int erdy_sent = 0;
+ int ret = 0;
+ u8 zlp = 0;
+
+ trace_cdns3_ep0_queue(priv_dev, request);
+
+ /* cancel the request if controller receive new SETUP packet. */
+ if (cdns3_check_new_setup(priv_dev))
+ return -ECONNRESET;
+
+ /* send STATUS stage. Should be called only for SET_CONFIGURATION */
+ if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ cdns3_select_ep(priv_dev, 0x00);
+
+ erdy_sent = !priv_dev->hw_configured_flag;
+ cdns3_set_hw_configuration(priv_dev);
+
+ if (!erdy_sent)
+ cdns3_ep0_complete_setup(priv_dev, 0, 1);
+
+ cdns3_allow_enable_l1(priv_dev, 1);
+
+ request->actual = 0;
+ priv_dev->status_completion_no_call = true;
+ priv_dev->pending_status_request = request;
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ /*
+ * Since there is no completion interrupt for status stage,
+ * it needs to call ->completion in software after
+ * ep0_queue is back.
+ */
+ queue_work(system_freezable_wq, &priv_dev->pending_status_wq);
+ return 0;
+ }
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ if (!list_empty(&priv_ep->pending_req_list)) {
+ dev_err(priv_dev->dev,
+ "can't handle multiple requests for ep0\n");
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return -EBUSY;
+ }
+
+ ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
+ priv_dev->ep0_data_dir);
+ if (ret) {
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ dev_err(priv_dev->dev, "failed to map request\n");
+ return -EINVAL;
+ }
+
+ request->status = -EINPROGRESS;
+ list_add_tail(&request->list, &priv_ep->pending_req_list);
+
+ if (request->zero && request->length &&
+ (request->length % ep->maxpacket == 0))
+ zlp = 1;
+
+ cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp);
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdns3_gadget_ep_set_wedge Set wedge on selected endpoint
+ * @ep: endpoint object
+ *
+ * Returns 0
+ */
+int cdns3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ dev_dbg(priv_dev->dev, "Wedge for %s\n", ep->name);
+ cdns3_gadget_ep_set_halt(ep, 1);
+ priv_ep->flags |= EP_WEDGE;
+
+ return 0;
+}
+
+const struct usb_ep_ops cdns3_gadget_ep0_ops = {
+ .enable = cdns3_gadget_ep0_enable,
+ .disable = cdns3_gadget_ep0_disable,
+ .alloc_request = cdns3_gadget_ep_alloc_request,
+ .free_request = cdns3_gadget_ep_free_request,
+ .queue = cdns3_gadget_ep0_queue,
+ .dequeue = cdns3_gadget_ep_dequeue,
+ .set_halt = cdns3_gadget_ep0_set_halt,
+ .set_wedge = cdns3_gadget_ep_set_wedge,
+};
+
+/**
+ * cdns3_ep0_config - Configures default endpoint
+ * @priv_dev: extended gadget object
+ *
+ * Functions sets parameters: maximal packet size and enables interrupts
+ */
+void cdns3_ep0_config(struct cdns3_device *priv_dev)
+{
+ struct cdns3_usb_regs __iomem *regs;
+ struct cdns3_endpoint *priv_ep;
+ u32 max_packet_size = 64;
+
+ regs = priv_dev->regs;
+
+ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+ max_packet_size = 512;
+
+ priv_ep = priv_dev->eps[0];
+
+ if (!list_empty(&priv_ep->pending_req_list)) {
+ struct usb_request *request;
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ list_del_init(&request->list);
+ }
+
+ priv_dev->u1_allowed = 0;
+ priv_dev->u2_allowed = 0;
+
+ priv_dev->gadget.ep0->maxpacket = max_packet_size;
+ cdns3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
+
+ /* init ep out */
+ cdns3_select_ep(priv_dev, USB_DIR_OUT);
+
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ cdns3_set_register_bit(&priv_dev->regs->dtrans,
+ BIT(0) | BIT(16));
+ cdns3_set_register_bit(&priv_dev->regs->tdl_from_trb,
+ BIT(0) | BIT(16));
+ }
+
+ writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
+ &regs->ep_cfg);
+
+ writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN,
+ &regs->ep_sts_en);
+
+ /* init ep in */
+ cdns3_select_ep(priv_dev, USB_DIR_IN);
+
+ writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
+ &regs->ep_cfg);
+
+ writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, &regs->ep_sts_en);
+
+ cdns3_set_register_bit(&regs->usb_conf, USB_CONF_U1DS | USB_CONF_U2DS);
+}
+
+/**
+ * cdns3_init_ep0 Initializes software endpoint 0 of gadget
+ * @priv_dev: extended gadget object
+ * @ep_priv: extended endpoint object
+ *
+ * Returns 0 on success else error code.
+ */
+int cdns3_init_ep0(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ sprintf(priv_ep->name, "ep0");
+
+ /* fill linux fields */
+ priv_ep->endpoint.ops = &cdns3_gadget_ep0_ops;
+ priv_ep->endpoint.maxburst = 1;
+ usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
+ CDNS3_EP0_MAX_PACKET_LIMIT);
+ priv_ep->endpoint.address = 0;
+ priv_ep->endpoint.caps.type_control = 1;
+ priv_ep->endpoint.caps.dir_in = 1;
+ priv_ep->endpoint.caps.dir_out = 1;
+ priv_ep->endpoint.name = priv_ep->name;
+ priv_ep->endpoint.desc = &cdns3_gadget_ep0_desc;
+ priv_dev->gadget.ep0 = &priv_ep->endpoint;
+ priv_ep->type = USB_ENDPOINT_XFER_CONTROL;
+
+ return cdns3_allocate_trb_pool(priv_ep);
+}
diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h
new file mode 100644
index 000000000000..577469eee961
--- /dev/null
+++ b/drivers/usb/cdns3/gadget-export.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USBSS DRD Driver - Gadget Export APIs.
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Authors: Peter Chen <peter.chen@nxp.com>
+ */
+#ifndef __LINUX_CDNS3_GADGET_EXPORT
+#define __LINUX_CDNS3_GADGET_EXPORT
+
+#ifdef CONFIG_USB_CDNS3_GADGET
+
+int cdns3_gadget_init(struct cdns3 *cdns);
+void cdns3_gadget_exit(struct cdns3 *cdns);
+#else
+
+static inline int cdns3_gadget_init(struct cdns3 *cdns)
+{
+ return -ENXIO;
+}
+
+static inline void cdns3_gadget_exit(struct cdns3 *cdns) { }
+
+#endif
+
+#endif /* __LINUX_CDNS3_GADGET_EXPORT */
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
new file mode 100644
index 000000000000..228cdc4ab886
--- /dev/null
+++ b/drivers/usb/cdns3/gadget.c
@@ -0,0 +1,2744 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver - gadget side.
+ *
+ * Copyright (C) 2018-2019 Cadence Design Systems.
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Authors: Pawel Jez <pjez@cadence.com>,
+ * Pawel Laszczak <pawell@cadence.com>
+ * Peter Chen <peter.chen@nxp.com>
+ */
+
+/*
+ * Work around 1:
+ * At some situations, the controller may get stale data address in TRB
+ * at below sequences:
+ * 1. Controller read TRB includes data address
+ * 2. Software updates TRBs includes data address and Cycle bit
+ * 3. Controller read TRB which includes Cycle bit
+ * 4. DMA run with stale data address
+ *
+ * To fix this problem, driver needs to make the first TRB in TD as invalid.
+ * After preparing all TRBs driver needs to check the position of DMA and
+ * if the DMA point to the first just added TRB and doorbell is 1,
+ * then driver must defer making this TRB as valid. This TRB will be make
+ * as valid during adding next TRB only if DMA is stopped or at TRBERR
+ * interrupt.
+ *
+ * Issue has been fixed in DEV_VER_V3 version of controller.
+ *
+ * Work around 2:
+ * Controller for OUT endpoints has shared on-chip buffers for all incoming
+ * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
+ * in correct order. If the first packet in the buffer will not be handled,
+ * then the following packets directed for other endpoints and functions
+ * will be blocked.
+ * Additionally the packets directed to one endpoint can block entire on-chip
+ * buffers. In this case transfer to other endpoints also will blocked.
+ *
+ * To resolve this issue after raising the descriptor missing interrupt
+ * driver prepares internal usb_request object and use it to arm DMA transfer.
+ *
+ * The problematic situation was observed in case when endpoint has been enabled
+ * but no usb_request were queued. Driver try detects such endpoints and will
+ * use this workaround only for these endpoint.
+ *
+ * Driver use limited number of buffer. This number can be set by macro
+ * CDNS3_WA2_NUM_BUFFERS.
+ *
+ * Such blocking situation was observed on ACM gadget. For this function
+ * host send OUT data packet but ACM function is not prepared for this packet.
+ * It's cause that buffer placed in on chip memory block transfer to other
+ * endpoints.
+ *
+ * Issue has been fixed in DEV_VER_V2 version of controller.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/usb/gadget.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+
+#include "core.h"
+#include "gadget-export.h"
+#include "gadget.h"
+#include "trace.h"
+#include "drd.h"
+
+static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags);
+
+/**
+ * cdns3_set_register_bit - set bit in given register.
+ * @ptr: address of device controller register to be read and changed
+ * @mask: bits requested to set
+ */
+void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
+{
+ mask = readl(ptr) | mask;
+ writel(mask, ptr);
+}
+
+/**
+ * cdns3_ep_addr_to_index - Macro converts endpoint address to
+ * index of endpoint object in cdns3_device.eps[] container
+ * @ep_addr: endpoint address for which endpoint object is required
+ *
+ */
+u8 cdns3_ep_addr_to_index(u8 ep_addr)
+{
+ return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
+}
+
+static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ int dma_index;
+
+ dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
+
+ return dma_index / TRB_SIZE;
+}
+
+/**
+ * cdns3_next_request - returns next request from list
+ * @list: list containing requests
+ *
+ * Returns request or NULL if no requests in list
+ */
+struct usb_request *cdns3_next_request(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct usb_request, list);
+}
+
+/**
+ * cdns3_next_align_buf - returns next buffer from list
+ * @list: list containing buffers
+ *
+ * Returns buffer or NULL if no buffers in list
+ */
+struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
+}
+
+/**
+ * cdns3_next_priv_request - returns next request from list
+ * @list: list containing requests
+ *
+ * Returns request or NULL if no requests in list
+ */
+struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct cdns3_request, list);
+}
+
+/**
+ * select_ep - selects endpoint
+ * @priv_dev: extended gadget object
+ * @ep: endpoint address
+ */
+void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
+{
+ if (priv_dev->selected_ep == ep)
+ return;
+
+ priv_dev->selected_ep = ep;
+ writel(ep, &priv_dev->regs->ep_sel);
+}
+
+dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
+ struct cdns3_trb *trb)
+{
+ u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
+
+ return priv_ep->trb_pool_dma + offset;
+}
+
+int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
+{
+ switch (priv_ep->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ return TRB_ISO_RING_SIZE;
+ case USB_ENDPOINT_XFER_CONTROL:
+ return TRB_CTRL_RING_SIZE;
+ default:
+ return TRB_RING_SIZE;
+ }
+}
+
+/**
+ * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
+ * @priv_ep: endpoint object
+ *
+ * Function will return 0 on success or -ENOMEM on allocation error
+ */
+int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ int ring_size = cdns3_ring_size(priv_ep);
+ struct cdns3_trb *link_trb;
+
+ if (!priv_ep->trb_pool) {
+ priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
+ ring_size,
+ &priv_ep->trb_pool_dma,
+ GFP_DMA32 | GFP_ATOMIC);
+ if (!priv_ep->trb_pool)
+ return -ENOMEM;
+ } else {
+ memset(priv_ep->trb_pool, 0, ring_size);
+ }
+
+ if (!priv_ep->num)
+ return 0;
+
+ priv_ep->num_trbs = ring_size / TRB_SIZE;
+ /* Initialize the last TRB as Link TRB. */
+ link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+ link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
+
+ return 0;
+}
+
+static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_ep->trb_pool) {
+ dma_free_coherent(priv_dev->sysdev,
+ cdns3_ring_size(priv_ep),
+ priv_ep->trb_pool, priv_ep->trb_pool_dma);
+ priv_ep->trb_pool = NULL;
+ }
+}
+
+/**
+ * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
+ * @priv_ep: endpoint object
+ *
+ * Endpoint must be selected before call to this function
+ */
+static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ int val;
+
+ trace_cdns3_halt(priv_ep, 1, 1);
+
+ writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
+ &priv_dev->regs->ep_cmd);
+
+ /* wait for DFLUSH cleared */
+ readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & EP_CMD_DFLUSH), 1, 1000);
+ priv_ep->flags |= EP_STALLED;
+ priv_ep->flags &= ~EP_STALL_PENDING;
+}
+
+/**
+ * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
+ * @priv_dev: extended gadget object
+ */
+void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
+{
+ writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
+
+ cdns3_allow_enable_l1(priv_dev, 0);
+ priv_dev->hw_configured_flag = 0;
+ priv_dev->onchip_used_size = 0;
+ priv_dev->out_mem_is_allocated = 0;
+ priv_dev->wait_for_setup = 0;
+}
+
+/**
+ * cdns3_ep_inc_trb - increment a trb index.
+ * @index: Pointer to the TRB index to increment.
+ * @cs: Cycle state
+ * @trb_in_seg: number of TRBs in segment
+ *
+ * The index should never point to the link TRB. After incrementing,
+ * if it is point to the link TRB, wrap around to the beginning and revert
+ * cycle state bit The
+ * link TRB is always at the last TRB entry.
+ */
+static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
+{
+ (*index)++;
+ if (*index == (trb_in_seg - 1)) {
+ *index = 0;
+ *cs ^= 1;
+ }
+}
+
+/**
+ * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
+ * @priv_ep: The endpoint whose enqueue pointer we're incrementing
+ */
+static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
+{
+ priv_ep->free_trbs--;
+ cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
+}
+
+/**
+ * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
+ * @priv_ep: The endpoint whose dequeue pointer we're incrementing
+ */
+static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
+{
+ priv_ep->free_trbs++;
+ cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
+}
+
+void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
+{
+ struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
+ int current_trb = priv_req->start_trb;
+
+ while (current_trb != priv_req->end_trb) {
+ cdns3_ep_inc_deq(priv_ep);
+ current_trb = priv_ep->dequeue;
+ }
+
+ cdns3_ep_inc_deq(priv_ep);
+}
+
+/**
+ * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
+ * @priv_dev: Extended gadget object
+ * @enable: Enable/disable permit to transition to L1.
+ *
+ * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
+ * then controller answer with ACK handshake.
+ * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
+ * then controller answer with NYET handshake.
+ */
+void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
+{
+ if (enable)
+ writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
+ else
+ writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
+}
+
+enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
+{
+ u32 reg;
+
+ reg = readl(&priv_dev->regs->usb_sts);
+
+ if (DEV_SUPERSPEED(reg))
+ return USB_SPEED_SUPER;
+ else if (DEV_HIGHSPEED(reg))
+ return USB_SPEED_HIGH;
+ else if (DEV_FULLSPEED(reg))
+ return USB_SPEED_FULL;
+ else if (DEV_LOWSPEED(reg))
+ return USB_SPEED_LOW;
+ return USB_SPEED_UNKNOWN;
+}
+
+/**
+ * cdns3_start_all_request - add to ring all request not started
+ * @priv_dev: Extended gadget object
+ * @priv_ep: The endpoint for whom request will be started.
+ *
+ * Returns return ENOMEM if transfer ring i not enough TRBs to start
+ * all requests.
+ */
+static int cdns3_start_all_request(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ struct usb_request *request;
+ int ret = 0;
+
+ while (!list_empty(&priv_ep->deferred_req_list)) {
+ request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ ret = cdns3_ep_run_transfer(priv_ep, request);
+ if (ret)
+ return ret;
+
+ list_del(&request->list);
+ list_add_tail(&request->list,
+ &priv_ep->pending_req_list);
+ }
+
+ priv_ep->flags &= ~EP_RING_FULL;
+ return ret;
+}
+
+/*
+ * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
+ * driver try to detect whether endpoint need additional internal
+ * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
+ * if before first DESCMISS interrupt the DMA will be armed.
+ */
+#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
+ if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
+ priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
+ (reg) |= EP_STS_EN_DESCMISEN; \
+ } } while (0)
+
+/**
+ * cdns3_wa2_descmiss_copy_data copy data from internal requests to
+ * request queued by class driver.
+ * @priv_ep: extended endpoint object
+ * @request: request object
+ */
+static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
+{
+ struct usb_request *descmiss_req;
+ struct cdns3_request *descmiss_priv_req;
+
+ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ int chunk_end;
+ int length;
+
+ descmiss_priv_req =
+ cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ descmiss_req = &descmiss_priv_req->request;
+
+ /* driver can't touch pending request */
+ if (descmiss_priv_req->flags & REQUEST_PENDING)
+ break;
+
+ chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
+ length = request->actual + descmiss_req->actual;
+
+ request->status = descmiss_req->status;
+
+ if (length <= request->length) {
+ memcpy(&((u8 *)request->buf)[request->actual],
+ descmiss_req->buf,
+ descmiss_req->actual);
+ request->actual = length;
+ } else {
+ /* It should never occures */
+ request->status = -ENOMEM;
+ }
+
+ list_del_init(&descmiss_priv_req->list);
+
+ kfree(descmiss_req->buf);
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
+ --priv_ep->wa2_counter;
+
+ if (!chunk_end)
+ break;
+ }
+}
+
+struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
+{
+ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
+ priv_req->flags & REQUEST_INTERNAL) {
+ struct usb_request *req;
+
+ req = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ priv_ep->descmis_req = NULL;
+
+ if (!req)
+ return NULL;
+
+ cdns3_wa2_descmiss_copy_data(priv_ep, req);
+ if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
+ req->length != req->actual) {
+ /* wait for next part of transfer */
+ return NULL;
+ }
+
+ if (req->status == -EINPROGRESS)
+ req->status = 0;
+
+ list_del_init(&req->list);
+ cdns3_start_all_request(priv_dev, priv_ep);
+ return req;
+ }
+
+ return &priv_req->request;
+}
+
+int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
+{
+ int deferred = 0;
+
+ /*
+ * If transfer was queued before DESCMISS appear than we
+ * can disable handling of DESCMISS interrupt. Driver assumes that it
+ * can disable special treatment for this endpoint.
+ */
+ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
+ u32 reg;
+
+ cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
+ priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
+ reg = readl(&priv_dev->regs->ep_sts_en);
+ reg &= ~EP_STS_EN_DESCMISEN;
+ trace_cdns3_wa2(priv_ep, "workaround disabled\n");
+ writel(reg, &priv_dev->regs->ep_sts_en);
+ }
+
+ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
+ u8 pending_empty = list_empty(&priv_ep->pending_req_list);
+ u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
+
+ /*
+ * DESCMISS transfer has been finished, so data will be
+ * directly copied from internal allocated usb_request
+ * objects.
+ */
+ if (pending_empty && !descmiss_empty &&
+ !(priv_req->flags & REQUEST_INTERNAL)) {
+ cdns3_wa2_descmiss_copy_data(priv_ep,
+ &priv_req->request);
+
+ trace_cdns3_wa2(priv_ep, "get internal stored data");
+
+ list_add_tail(&priv_req->request.list,
+ &priv_ep->pending_req_list);
+ cdns3_gadget_giveback(priv_ep, priv_req,
+ priv_req->request.status);
+
+ /*
+ * Intentionally driver returns positive value as
+ * correct value. It informs that transfer has
+ * been finished.
+ */
+ return EINPROGRESS;
+ }
+
+ /*
+ * Driver will wait for completion DESCMISS transfer,
+ * before starts new, not DESCMISS transfer.
+ */
+ if (!pending_empty && !descmiss_empty) {
+ trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
+ deferred = 1;
+ }
+
+ if (priv_req->flags & REQUEST_INTERNAL)
+ list_add_tail(&priv_req->list,
+ &priv_ep->wa2_descmiss_req_list);
+ }
+
+ return deferred;
+}
+
+static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_request *priv_req;
+
+ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ u8 chain;
+
+ priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
+
+ trace_cdns3_wa2(priv_ep, "removes eldest request");
+
+ kfree(priv_req->request.buf);
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint,
+ &priv_req->request);
+ list_del_init(&priv_req->list);
+ --priv_ep->wa2_counter;
+
+ if (!chain)
+ break;
+ }
+}
+
+/**
+ * cdns3_wa2_descmissing_packet - handles descriptor missing event.
+ * @priv_dev: extended gadget object
+ *
+ * This function is used only for WA2. For more information see Work around 2
+ * description.
+ */
+static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_request *priv_req;
+ struct usb_request *request;
+
+ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
+ priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
+ priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
+ }
+
+ trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
+
+ if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
+ cdns3_wa2_remove_old_request(priv_ep);
+
+ request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
+ GFP_ATOMIC);
+ if (!request)
+ goto err;
+
+ priv_req = to_cdns3_request(request);
+ priv_req->flags |= REQUEST_INTERNAL;
+
+ /* if this field is still assigned it indicate that transfer related
+ * with this request has not been finished yet. Driver in this
+ * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
+ * flag to previous one. It will indicate that current request is
+ * part of the previous one.
+ */
+ if (priv_ep->descmis_req)
+ priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
+
+ priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
+ GFP_ATOMIC);
+ priv_ep->wa2_counter++;
+
+ if (!priv_req->request.buf) {
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
+ goto err;
+ }
+
+ priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
+ priv_ep->descmis_req = priv_req;
+
+ __cdns3_gadget_ep_queue(&priv_ep->endpoint,
+ &priv_ep->descmis_req->request,
+ GFP_ATOMIC);
+
+ return;
+
+err:
+ dev_err(priv_ep->cdns3_dev->dev,
+ "Failed: No sufficient memory for DESCMIS\n");
+}
+
+/**
+ * cdns3_gadget_giveback - call struct usb_request's ->complete callback
+ * @priv_ep: The endpoint to whom the request belongs to
+ * @priv_req: The request we're giving back
+ * @status: completion code for the request
+ *
+ * Must be called with controller's lock held and interrupts disabled. This
+ * function will unmap @req and call its ->complete() callback to notify upper
+ * layers that it has completed.
+ */
+void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req,
+ int status)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct usb_request *request = &priv_req->request;
+
+ list_del_init(&request->list);
+
+ if (request->status == -EINPROGRESS)
+ request->status = status;
+
+ usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
+ priv_ep->dir);
+
+ if ((priv_req->flags & REQUEST_UNALIGNED) &&
+ priv_ep->dir == USB_DIR_OUT && !request->status)
+ memcpy(request->buf, priv_req->aligned_buf->buf,
+ request->length);
+
+ priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
+ trace_cdns3_gadget_giveback(priv_req);
+
+ if (priv_dev->dev_ver < DEV_VER_V2) {
+ request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
+ priv_req);
+ if (!request)
+ return;
+ }
+
+ if (request->complete) {
+ spin_unlock(&priv_dev->lock);
+ usb_gadget_giveback_request(&priv_ep->endpoint,
+ request);
+ spin_lock(&priv_dev->lock);
+ }
+
+ if (request->buf == priv_dev->zlp_buf)
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
+}
+
+void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
+{
+ /* Work around for stale data address in TRB*/
+ if (priv_ep->wa1_set) {
+ trace_cdns3_wa1(priv_ep, "restore cycle bit");
+
+ priv_ep->wa1_set = 0;
+ priv_ep->wa1_trb_index = 0xFFFF;
+ if (priv_ep->wa1_cycle_bit) {
+ priv_ep->wa1_trb->control =
+ priv_ep->wa1_trb->control | 0x1;
+ } else {
+ priv_ep->wa1_trb->control =
+ priv_ep->wa1_trb->control & ~0x1;
+ }
+ }
+}
+
+static void cdns3_free_aligned_request_buf(struct work_struct *work)
+{
+ struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
+ aligned_buf_wq);
+ struct cdns3_aligned_buf *buf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
+ if (!buf->in_use) {
+ list_del(&buf->list);
+
+ /*
+ * Re-enable interrupts to free DMA capable memory.
+ * Driver can't free this memory with disabled
+ * interrupts.
+ */
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ dma_free_coherent(priv_dev->sysdev, buf->size,
+ buf->buf, buf->dma);
+ kfree(buf);
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+}
+
+static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
+{
+ struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_aligned_buf *buf;
+
+ /* check if buffer is aligned to 8. */
+ if (!((uintptr_t)priv_req->request.buf & 0x7))
+ return 0;
+
+ buf = priv_req->aligned_buf;
+
+ if (!buf || priv_req->request.length > buf->size) {
+ buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->size = priv_req->request.length;
+
+ buf->buf = dma_alloc_coherent(priv_dev->sysdev,
+ buf->size,
+ &buf->dma,
+ GFP_ATOMIC);
+ if (!buf->buf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ if (priv_req->aligned_buf) {
+ trace_cdns3_free_aligned_request(priv_req);
+ priv_req->aligned_buf->in_use = 0;
+ queue_work(system_freezable_wq,
+ &priv_dev->aligned_buf_wq);
+ }
+
+ buf->in_use = 1;
+ priv_req->aligned_buf = buf;
+
+ list_add_tail(&buf->list,
+ &priv_dev->aligned_buf_list);
+ }
+
+ if (priv_ep->dir == USB_DIR_IN) {
+ memcpy(buf->buf, priv_req->request.buf,
+ priv_req->request.length);
+ }
+
+ priv_req->flags |= REQUEST_UNALIGNED;
+ trace_cdns3_prepare_aligned_request(priv_req);
+
+ return 0;
+}
+
+static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
+ struct cdns3_trb *trb)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (!priv_ep->wa1_set) {
+ u32 doorbell;
+
+ doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
+
+ if (doorbell) {
+ priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
+ priv_ep->wa1_set = 1;
+ priv_ep->wa1_trb = trb;
+ priv_ep->wa1_trb_index = priv_ep->enqueue;
+ trace_cdns3_wa1(priv_ep, "set guard");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ int dma_index;
+ u32 doorbell;
+
+ doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
+ dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
+
+ if (!doorbell || dma_index != priv_ep->wa1_trb_index)
+ cdns3_wa1_restore_cycle_bit(priv_ep);
+}
+
+/**
+ * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
+ * @priv_ep: endpoint object
+ *
+ * Returns zero on success or negative value on failure
+ */
+int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb;
+ dma_addr_t trb_dma;
+ u32 togle_pcs = 1;
+ int sg_iter = 0;
+ int num_trb;
+ int address;
+ u32 control;
+ int pcs;
+
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+ num_trb = priv_ep->interval;
+ else
+ num_trb = request->num_sgs ? request->num_sgs : 1;
+
+ if (num_trb > priv_ep->free_trbs) {
+ priv_ep->flags |= EP_RING_FULL;
+ return -ENOBUFS;
+ }
+
+ priv_req = to_cdns3_request(request);
+ address = priv_ep->endpoint.desc->bEndpointAddress;
+
+ priv_ep->flags |= EP_PENDING_REQUEST;
+
+ /* must allocate buffer aligned to 8 */
+ if (priv_req->flags & REQUEST_UNALIGNED)
+ trb_dma = priv_req->aligned_buf->dma;
+ else
+ trb_dma = request->dma;
+
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ priv_req->start_trb = priv_ep->enqueue;
+ priv_req->trb = trb;
+
+ cdns3_select_ep(priv_ep->cdns3_dev, address);
+
+ /* prepare ring */
+ if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) {
+ struct cdns3_trb *link_trb;
+ int doorbell, dma_index;
+ u32 ch_bit = 0;
+
+ doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
+ dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
+
+ /* Driver can't update LINK TRB if it is current processed. */
+ if (doorbell && dma_index == priv_ep->num_trbs - 1) {
+ priv_ep->flags |= EP_DEFERRED_DRDY;
+ return -ENOBUFS;
+ }
+
+ /*updating C bt in Link TRB before starting DMA*/
+ link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
+ /*
+ * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
+ * that DMA stuck at the LINK TRB.
+ * On the other hand, removing TRB_CHAIN for longer TRs for
+ * epXout cause that DMA stuck after handling LINK TRB.
+ * To eliminate this strange behavioral driver set TRB_CHAIN
+ * bit only for TR size > 2.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
+ TRBS_PER_SEGMENT > 2)
+ ch_bit = TRB_CHAIN;
+
+ link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
+ }
+
+ if (priv_dev->dev_ver <= DEV_VER_V2)
+ togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
+
+ /* set incorrect Cycle Bit for first trb*/
+ control = priv_ep->pcs ? 0 : TRB_CYCLE;
+
+ do {
+ u32 length;
+ u16 td_size = 0;
+
+ /* fill TRB */
+ control |= TRB_TYPE(TRB_NORMAL);
+ trb->buffer = TRB_BUFFER(request->num_sgs == 0
+ ? trb_dma : request->sg[sg_iter].dma_address);
+
+ if (likely(!request->num_sgs))
+ length = request->length;
+ else
+ length = request->sg[sg_iter].length;
+
+ if (likely(priv_dev->dev_ver >= DEV_VER_V2))
+ td_size = DIV_ROUND_UP(length,
+ priv_ep->endpoint.maxpacket);
+
+ trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
+ TRB_LEN(length);
+ if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+ trb->length |= TRB_TDL_SS_SIZE(td_size);
+ else
+ control |= TRB_TDL_HS_SIZE(td_size);
+
+ pcs = priv_ep->pcs ? TRB_CYCLE : 0;
+
+ /*
+ * first trb should be prepared as last to avoid processing
+ * transfer to early
+ */
+ if (sg_iter != 0)
+ control |= pcs;
+
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
+ control |= TRB_IOC | TRB_ISP;
+ } else {
+ /* for last element in TD or in SG list */
+ if (sg_iter == (num_trb - 1) && sg_iter != 0)
+ control |= pcs | TRB_IOC | TRB_ISP;
+ }
+
+ if (sg_iter)
+ trb->control = control;
+ else
+ priv_req->trb->control = control;
+
+ control = 0;
+ ++sg_iter;
+ priv_req->end_trb = priv_ep->enqueue;
+ cdns3_ep_inc_enq(priv_ep);
+ trb = priv_ep->trb_pool + priv_ep->enqueue;
+ } while (sg_iter < num_trb);
+
+ trb = priv_req->trb;
+
+ priv_req->flags |= REQUEST_PENDING;
+
+ if (sg_iter == 1)
+ trb->control |= TRB_IOC | TRB_ISP;
+
+ /*
+ * Memory barrier - cycle bit must be set before other filds in trb.
+ */
+ wmb();
+
+ /* give the TD to the consumer*/
+ if (togle_pcs)
+ trb->control = trb->control ^ 1;
+
+ if (priv_dev->dev_ver <= DEV_VER_V2)
+ cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
+
+ trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+
+ /*
+ * Memory barrier - Cycle Bit must be set before trb->length and
+ * trb->buffer fields.
+ */
+ wmb();
+
+ /*
+ * For DMULT mode we can set address to transfer ring only once after
+ * enabling endpoint.
+ */
+ if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
+ /*
+ * Until SW is not ready to handle the OUT transfer the ISO OUT
+ * Endpoint should be disabled (EP_CFG.ENABLE = 0).
+ * EP_CFG_ENABLE must be set before updating ep_traddr.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir &&
+ !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
+ priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
+ EP_CFG_ENABLE);
+ }
+
+ writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
+ priv_req->start_trb * TRB_SIZE),
+ &priv_dev->regs->ep_traddr);
+
+ priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
+ }
+
+ if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
+ trace_cdns3_ring(priv_ep);
+ /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
+ writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+
+ /* WORKAROUND for transition to L0 */
+ __cdns3_gadget_wakeup(priv_dev);
+
+ return 0;
+}
+
+void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
+{
+ struct cdns3_endpoint *priv_ep;
+ struct usb_ep *ep;
+ int val;
+
+ if (priv_dev->hw_configured_flag)
+ return;
+
+ writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
+ writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd);
+
+ cdns3_set_register_bit(&priv_dev->regs->usb_conf,
+ USB_CONF_U1EN | USB_CONF_U2EN);
+
+ /* wait until configuration set */
+ readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
+ val & USB_STS_CFGSTS_MASK, 1, 100);
+
+ priv_dev->hw_configured_flag = 1;
+
+ list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
+ if (ep->enabled) {
+ priv_ep = ep_to_cdns3_ep(ep);
+ cdns3_start_all_request(priv_dev, priv_ep);
+ }
+ }
+}
+
+/**
+ * cdns3_request_handled - check whether request has been handled by DMA
+ *
+ * @priv_ep: extended endpoint object.
+ * @priv_req: request object for checking
+ *
+ * Endpoint must be selected before invoking this function.
+ *
+ * Returns false if request has not been handled by DMA, else returns true.
+ *
+ * SR - start ring
+ * ER - end ring
+ * DQ = priv_ep->dequeue - dequeue position
+ * EQ = priv_ep->enqueue - enqueue position
+ * ST = priv_req->start_trb - index of first TRB in transfer ring
+ * ET = priv_req->end_trb - index of last TRB in transfer ring
+ * CI = current_index - index of processed TRB by DMA.
+ *
+ * As first step, function checks if cycle bit for priv_req->start_trb is
+ * correct.
+ *
+ * some rules:
+ * 1. priv_ep->dequeue never exceed current_index.
+ * 2 priv_ep->enqueue never exceed priv_ep->dequeue
+ * 3. exception: priv_ep->enqueue == priv_ep->dequeue
+ * and priv_ep->free_trbs is zero.
+ * This case indicate that TR is full.
+ *
+ * Then We can split recognition into two parts:
+ * Case 1 - priv_ep->dequeue < current_index
+ * SR ... EQ ... DQ ... CI ... ER
+ * SR ... DQ ... CI ... EQ ... ER
+ *
+ * Request has been handled by DMA if ST and ET is between DQ and CI.
+ *
+ * Case 2 - priv_ep->dequeue > current_index
+ * This situation take place when CI go through the LINK TRB at the end of
+ * transfer ring.
+ * SR ... CI ... EQ ... DQ ... ER
+ *
+ * Request has been handled by DMA if ET is less then CI or
+ * ET is greater or equal DQ.
+ */
+static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_trb *trb = priv_req->trb;
+ int current_index = 0;
+ int handled = 0;
+ int doorbell;
+
+ current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
+ doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
+
+ trb = &priv_ep->trb_pool[priv_req->start_trb];
+
+ if ((trb->control & TRB_CYCLE) != priv_ep->ccs)
+ goto finish;
+
+ if (doorbell == 1 && current_index == priv_ep->dequeue)
+ goto finish;
+
+ /* The corner case for TRBS_PER_SEGMENT equal 2). */
+ if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ handled = 1;
+ goto finish;
+ }
+
+ if (priv_ep->enqueue == priv_ep->dequeue &&
+ priv_ep->free_trbs == 0) {
+ handled = 1;
+ } else if (priv_ep->dequeue < current_index) {
+ if ((current_index == (priv_ep->num_trbs - 1)) &&
+ !priv_ep->dequeue)
+ goto finish;
+
+ if (priv_req->end_trb >= priv_ep->dequeue &&
+ priv_req->end_trb < current_index)
+ handled = 1;
+ } else if (priv_ep->dequeue > current_index) {
+ if (priv_req->end_trb < current_index ||
+ priv_req->end_trb >= priv_ep->dequeue)
+ handled = 1;
+ }
+
+finish:
+ trace_cdns3_request_handled(priv_req, current_index, handled);
+
+ return handled;
+}
+
+static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_request *priv_req;
+ struct usb_request *request;
+ struct cdns3_trb *trb;
+
+ while (!list_empty(&priv_ep->pending_req_list)) {
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ priv_req = to_cdns3_request(request);
+
+ /* Re-select endpoint. It could be changed by other CPU during
+ * handling usb_gadget_giveback_request.
+ */
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+
+ if (!cdns3_request_handled(priv_ep, priv_req))
+ goto prepare_next_td;
+
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+ trace_cdns3_complete_trb(priv_ep, trb);
+
+ if (trb != priv_req->trb)
+ dev_warn(priv_dev->dev,
+ "request_trb=0x%p, queue_trb=0x%p\n",
+ priv_req->trb, trb);
+
+ request->actual = TRB_LEN(le32_to_cpu(trb->length));
+ cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
+ TRBS_PER_SEGMENT == 2)
+ break;
+ }
+ priv_ep->flags &= ~EP_PENDING_REQUEST;
+
+prepare_next_td:
+ if (!(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING))
+ cdns3_start_all_request(priv_dev, priv_ep);
+}
+
+void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ cdns3_wa1_restore_cycle_bit(priv_ep);
+
+ if (rearm) {
+ trace_cdns3_ring(priv_ep);
+
+ /* Cycle Bit must be updated before arming DMA. */
+ wmb();
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+
+ __cdns3_gadget_wakeup(priv_dev);
+
+ trace_cdns3_doorbell_epx(priv_ep->name,
+ readl(&priv_dev->regs->ep_traddr));
+ }
+}
+
+/**
+ * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
+ * @priv_ep: endpoint object
+ *
+ * Returns 0
+ */
+static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ u32 ep_sts_reg;
+
+ cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
+
+ trace_cdns3_epx_irq(priv_dev, priv_ep);
+
+ ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+ writel(ep_sts_reg, &priv_dev->regs->ep_sts);
+
+ if (ep_sts_reg & EP_STS_TRBERR) {
+ if (priv_ep->flags & EP_STALL_PENDING &&
+ !(ep_sts_reg & EP_STS_DESCMIS &&
+ priv_dev->dev_ver < DEV_VER_V2)) {
+ cdns3_ep_stall_flush(priv_ep);
+ }
+
+ /*
+ * For isochronous transfer driver completes request on
+ * IOC or on TRBERR. IOC appears only when device receive
+ * OUT data packet. If host disable stream or lost some packet
+ * then the only way to finish all queued transfer is to do it
+ * on TRBERR event.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
+ !priv_ep->wa1_set) {
+ if (!priv_ep->dir) {
+ u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
+
+ ep_cfg &= ~EP_CFG_ENABLE;
+ writel(ep_cfg, &priv_dev->regs->ep_cfg);
+ priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ }
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ } else if (!(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING)) {
+ if (priv_ep->flags & EP_DEFERRED_DRDY) {
+ priv_ep->flags &= ~EP_DEFERRED_DRDY;
+ cdns3_start_all_request(priv_dev, priv_ep);
+ } else {
+ cdns3_rearm_transfer(priv_ep,
+ priv_ep->wa1_set);
+ }
+ }
+ }
+
+ if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
+ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
+ if (ep_sts_reg & EP_STS_ISP)
+ priv_ep->flags |= EP_QUIRK_END_TRANSFER;
+ else
+ priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
+ }
+
+ cdns3_transfer_completed(priv_dev, priv_ep);
+ }
+
+ /*
+ * WA2: this condition should only be meet when
+ * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
+ * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
+ * In other cases this interrupt will be disabled/
+ */
+ if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
+ !(priv_ep->flags & EP_STALLED))
+ cdns3_wa2_descmissing_packet(priv_ep);
+
+ return 0;
+}
+
+static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
+{
+ if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
+ spin_unlock(&priv_dev->lock);
+ priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
+ spin_lock(&priv_dev->lock);
+ }
+}
+
+/**
+ * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
+ * @priv_dev: extended gadget object
+ * @usb_ists: bitmap representation of device's reported interrupts
+ * (usb_ists register value)
+ */
+static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
+ u32 usb_ists)
+{
+ int speed = 0;
+
+ trace_cdns3_usb_irq(priv_dev, usb_ists);
+ if (usb_ists & USB_ISTS_L1ENTI) {
+ /*
+ * WORKAROUND: CDNS3 controller has issue with hardware resuming
+ * from L1. To fix it, if any DMA transfer is pending driver
+ * must starts driving resume signal immediately.
+ */
+ if (readl(&priv_dev->regs->drbl))
+ __cdns3_gadget_wakeup(priv_dev);
+ }
+
+ /* Connection detected */
+ if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
+ speed = cdns3_get_speed(priv_dev);
+ priv_dev->gadget.speed = speed;
+ usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
+ cdns3_ep0_config(priv_dev);
+ }
+
+ /* Disconnection detected */
+ if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
+ cdns3_disconnect_gadget(priv_dev);
+ priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
+ usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
+ cdns3_hw_reset_eps_config(priv_dev);
+ }
+
+ if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
+ if (priv_dev->gadget_driver &&
+ priv_dev->gadget_driver->suspend) {
+ spin_unlock(&priv_dev->lock);
+ priv_dev->gadget_driver->suspend(&priv_dev->gadget);
+ spin_lock(&priv_dev->lock);
+ }
+ }
+
+ if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
+ if (priv_dev->gadget_driver &&
+ priv_dev->gadget_driver->resume) {
+ spin_unlock(&priv_dev->lock);
+ priv_dev->gadget_driver->resume(&priv_dev->gadget);
+ spin_lock(&priv_dev->lock);
+ }
+ }
+
+ /* reset*/
+ if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
+ if (priv_dev->gadget_driver) {
+ spin_unlock(&priv_dev->lock);
+ usb_gadget_udc_reset(&priv_dev->gadget,
+ priv_dev->gadget_driver);
+ spin_lock(&priv_dev->lock);
+
+ /*read again to check the actual speed*/
+ speed = cdns3_get_speed(priv_dev);
+ priv_dev->gadget.speed = speed;
+ cdns3_hw_reset_eps_config(priv_dev);
+ cdns3_ep0_config(priv_dev);
+ }
+ }
+}
+
+/**
+ * cdns3_device_irq_handler- interrupt handler for device part of controller
+ *
+ * @irq: irq number for cdns3 core device
+ * @data: structure of cdns3
+ *
+ * Returns IRQ_HANDLED or IRQ_NONE
+ */
+static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
+{
+ struct cdns3_device *priv_dev;
+ struct cdns3 *cdns = data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 reg;
+
+ priv_dev = cdns->gadget_dev;
+
+ /* check USB device interrupt */
+ reg = readl(&priv_dev->regs->usb_ists);
+ if (reg) {
+ /* After masking interrupts the new interrupts won't be
+ * reported in usb_ists/ep_ists. In order to not lose some
+ * of them driver disables only detected interrupts.
+ * They will be enabled ASAP after clearing source of
+ * interrupt. This an unusual behavior only applies to
+ * usb_ists register.
+ */
+ reg = ~reg & readl(&priv_dev->regs->usb_ien);
+ /* mask deferred interrupt. */
+ writel(reg, &priv_dev->regs->usb_ien);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ /* check endpoint interrupt */
+ reg = readl(&priv_dev->regs->ep_ists);
+ if (reg) {
+ writel(0, &priv_dev->regs->ep_ien);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+/**
+ * cdns3_device_thread_irq_handler- interrupt handler for device part
+ * of controller
+ *
+ * @irq: irq number for cdns3 core device
+ * @data: structure of cdns3
+ *
+ * Returns IRQ_HANDLED or IRQ_NONE
+ */
+static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
+{
+ struct cdns3_device *priv_dev;
+ struct cdns3 *cdns = data;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+ int bit;
+ u32 reg;
+
+ priv_dev = cdns->gadget_dev;
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ reg = readl(&priv_dev->regs->usb_ists);
+ if (reg) {
+ writel(reg, &priv_dev->regs->usb_ists);
+ writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
+ cdns3_check_usb_interrupt_proceed(priv_dev, reg);
+ ret = IRQ_HANDLED;
+ }
+
+ reg = readl(&priv_dev->regs->ep_ists);
+
+ /* handle default endpoint OUT */
+ if (reg & EP_ISTS_EP_OUT0) {
+ cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
+ ret = IRQ_HANDLED;
+ }
+
+ /* handle default endpoint IN */
+ if (reg & EP_ISTS_EP_IN0) {
+ cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
+ ret = IRQ_HANDLED;
+ }
+
+ /* check if interrupt from non default endpoint, if no exit */
+ reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
+ if (!reg)
+ goto irqend;
+
+ for_each_set_bit(bit, (unsigned long *)&reg,
+ sizeof(u32) * BITS_PER_BYTE) {
+ cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
+ ret = IRQ_HANDLED;
+ }
+
+irqend:
+ writel(~0, &priv_dev->regs->ep_ien);
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
+ *
+ * The real reservation will occur during write to EP_CFG register,
+ * this function is used to check if the 'size' reservation is allowed.
+ *
+ * @priv_dev: extended gadget object
+ * @size: the size (KB) for EP would like to allocate
+ * @is_in: endpoint direction
+ *
+ * Return 0 if the required size can met or negative value on failure
+ */
+static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
+ int size, int is_in)
+{
+ int remained;
+
+ /* 2KB are reserved for EP0*/
+ remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
+
+ if (is_in) {
+ if (remained < size)
+ return -EPERM;
+
+ priv_dev->onchip_used_size += size;
+ } else {
+ int required;
+
+ /**
+ * ALL OUT EPs are shared the same chunk onchip memory, so
+ * driver checks if it already has assigned enough buffers
+ */
+ if (priv_dev->out_mem_is_allocated >= size)
+ return 0;
+
+ required = size - priv_dev->out_mem_is_allocated;
+
+ if (required > remained)
+ return -EPERM;
+
+ priv_dev->out_mem_is_allocated += required;
+ priv_dev->onchip_used_size += required;
+ }
+
+ return 0;
+}
+
+void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
+
+ /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
+ if (priv_dev->dev_ver <= DEV_VER_V2)
+ writel(USB_CONF_DMULT, &regs->usb_conf);
+
+ if (priv_dev->dev_ver == DEV_VER_V2)
+ writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
+
+ if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
+ u32 mask;
+
+ if (priv_ep->dir)
+ mask = BIT(priv_ep->num + 16);
+ else
+ mask = BIT(priv_ep->num);
+
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ cdns3_set_register_bit(&regs->tdl_from_trb, mask);
+ cdns3_set_register_bit(&regs->tdl_beh, mask);
+ cdns3_set_register_bit(&regs->tdl_beh2, mask);
+ cdns3_set_register_bit(&regs->dma_adv_td, mask);
+ }
+
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
+ cdns3_set_register_bit(&regs->tdl_from_trb, mask);
+
+ cdns3_set_register_bit(&regs->dtrans, mask);
+ }
+}
+
+/**
+ * cdns3_ep_config Configure hardware endpoint
+ * @priv_ep: extended endpoint object
+ */
+void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+{
+ bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
+ u32 max_packet_size = 0;
+ u8 maxburst = 0;
+ u32 ep_cfg = 0;
+ u8 buffering;
+ u8 mult = 0;
+ int ret;
+
+ buffering = CDNS3_EP_BUF_SIZE - 1;
+
+ cdns3_configure_dmult(priv_dev, priv_ep);
+
+ switch (priv_ep->type) {
+ case USB_ENDPOINT_XFER_INT:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
+
+ if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
+ priv_dev->dev_ver > DEV_VER_V2)
+ ep_cfg |= EP_CFG_TDL_CHK;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
+
+ if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
+ priv_dev->dev_ver > DEV_VER_V2)
+ ep_cfg |= EP_CFG_TDL_CHK;
+ break;
+ default:
+ ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
+ mult = CDNS3_EP_ISO_HS_MULT - 1;
+ buffering = mult + 1;
+ }
+
+ switch (priv_dev->gadget.speed) {
+ case USB_SPEED_FULL:
+ max_packet_size = is_iso_ep ? 1023 : 64;
+ break;
+ case USB_SPEED_HIGH:
+ max_packet_size = is_iso_ep ? 1024 : 512;
+ break;
+ case USB_SPEED_SUPER:
+ /* It's limitation that driver assumes in driver. */
+ mult = 0;
+ max_packet_size = 1024;
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+ maxburst = CDNS3_EP_ISO_SS_BURST - 1;
+ buffering = (mult + 1) *
+ (maxburst + 1);
+
+ if (priv_ep->interval > 1)
+ buffering++;
+ } else {
+ maxburst = CDNS3_EP_BUF_SIZE - 1;
+ }
+ break;
+ default:
+ /* all other speed are not supported */
+ return;
+ }
+
+ if (max_packet_size == 1024)
+ priv_ep->trb_burst_size = 128;
+ else if (max_packet_size >= 512)
+ priv_ep->trb_burst_size = 64;
+ else
+ priv_ep->trb_burst_size = 16;
+
+ ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
+ !!priv_ep->dir);
+ if (ret) {
+ dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
+ return;
+ }
+
+ ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
+ EP_CFG_MULT(mult) |
+ EP_CFG_BUFFERING(buffering) |
+ EP_CFG_MAXBURST(maxburst);
+
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+ writel(ep_cfg, &priv_dev->regs->ep_cfg);
+
+ dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
+ priv_ep->name, ep_cfg);
+}
+
+/* Find correct direction for HW endpoint according to description */
+static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
+ struct cdns3_endpoint *priv_ep)
+{
+ return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
+ (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
+}
+
+static struct
+cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
+ struct usb_endpoint_descriptor *desc)
+{
+ struct usb_ep *ep;
+ struct cdns3_endpoint *priv_ep;
+
+ list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
+ unsigned long num;
+ int ret;
+ /* ep name pattern likes epXin or epXout */
+ char c[2] = {ep->name[2], '\0'};
+
+ ret = kstrtoul(c, 10, &num);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_ep = ep_to_cdns3_ep(ep);
+ if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
+ if (!(priv_ep->flags & EP_CLAIMED)) {
+ priv_ep->num = num;
+ return priv_ep;
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/*
+ * Cadence IP has one limitation that all endpoints must be configured
+ * (Type & MaxPacketSize) before setting configuration through hardware
+ * register, it means we can't change endpoints configuration after
+ * set_configuration.
+ *
+ * This function set EP_CLAIMED flag which is added when the gadget driver
+ * uses usb_ep_autoconfig to configure specific endpoint;
+ * When the udc driver receives set_configurion request,
+ * it goes through all claimed endpoints, and configure all endpoints
+ * accordingly.
+ *
+ * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
+ * ep_cfg register which can be changed after set_configuration, and do
+ * some software operation accordingly.
+ */
+static struct
+usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *comp_desc)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ struct cdns3_endpoint *priv_ep;
+ unsigned long flags;
+
+ priv_ep = cdns3_find_available_ep(priv_dev, desc);
+ if (IS_ERR(priv_ep)) {
+ dev_err(priv_dev->dev, "no available ep\n");
+ return NULL;
+ }
+
+ dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ priv_ep->endpoint.desc = desc;
+ priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
+ priv_ep->type = usb_endpoint_type(desc);
+ priv_ep->flags |= EP_CLAIMED;
+ priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return &priv_ep->endpoint;
+}
+
+/**
+ * cdns3_gadget_ep_alloc_request Allocates request
+ * @ep: endpoint object associated with request
+ * @gfp_flags: gfp flags
+ *
+ * Returns allocated request address, NULL on allocation error
+ */
+struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_request *priv_req;
+
+ priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
+ if (!priv_req)
+ return NULL;
+
+ priv_req->priv_ep = priv_ep;
+
+ trace_cdns3_alloc_request(priv_req);
+ return &priv_req->request;
+}
+
+/**
+ * cdns3_gadget_ep_free_request Free memory occupied by request
+ * @ep: endpoint object associated with request
+ * @request: request to free memory
+ */
+void cdns3_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdns3_request *priv_req = to_cdns3_request(request);
+
+ if (priv_req->aligned_buf)
+ priv_req->aligned_buf->in_use = 0;
+
+ trace_cdns3_free_request(priv_req);
+ kfree(priv_req);
+}
+
+/**
+ * cdns3_gadget_ep_enable Enable endpoint
+ * @ep: endpoint object
+ * @desc: endpoint descriptor
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int cdns3_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct cdns3_endpoint *priv_ep;
+ struct cdns3_device *priv_dev;
+ u32 reg = EP_STS_EN_TRBERREN;
+ u32 bEndpointAddress;
+ unsigned long flags;
+ int enable = 1;
+ int ret;
+ int val;
+
+ priv_ep = ep_to_cdns3_ep(ep);
+ priv_dev = priv_ep->cdns3_dev;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
+ return -EINVAL;
+ }
+
+ if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
+ "%s is already enabled\n", priv_ep->name))
+ return 0;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ priv_ep->endpoint.desc = desc;
+ priv_ep->type = usb_endpoint_type(desc);
+ priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+ if (priv_ep->interval > ISO_MAX_INTERVAL &&
+ priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+ dev_err(priv_dev->dev, "Driver is limited to %d period\n",
+ ISO_MAX_INTERVAL);
+
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cdns3_allocate_trb_pool(priv_ep);
+
+ if (ret)
+ goto exit;
+
+ bEndpointAddress = priv_ep->num | priv_ep->dir;
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+
+ trace_cdns3_gadget_ep_enable(priv_ep);
+
+ writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+
+ ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
+ 1, 1000);
+
+ if (unlikely(ret)) {
+ cdns3_free_trb_pool(priv_ep);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* enable interrupt for selected endpoint */
+ cdns3_set_register_bit(&priv_dev->regs->ep_ien,
+ BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
+
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
+
+ writel(reg, &priv_dev->regs->ep_sts_en);
+
+ /*
+ * For some versions of controller at some point during ISO OUT traffic
+ * DMA reads Transfer Ring for the EP which has never got doorbell.
+ * This issue was detected only on simulation, but to avoid this issue
+ * driver add protection against it. To fix it driver enable ISO OUT
+ * endpoint before setting DRBL. This special treatment of ISO OUT
+ * endpoints are recommended by controller specification.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
+ enable = 0;
+
+ if (enable)
+ cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
+
+ ep->desc = desc;
+ priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
+ EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
+ priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
+ priv_ep->wa1_set = 0;
+ priv_ep->enqueue = 0;
+ priv_ep->dequeue = 0;
+ reg = readl(&priv_dev->regs->ep_sts);
+ priv_ep->pcs = !!EP_STS_CCS(reg);
+ priv_ep->ccs = !!EP_STS_CCS(reg);
+ /* one TRB is reserved for link TRB used in DMULT mode*/
+ priv_ep->free_trbs = priv_ep->num_trbs - 1;
+exit:
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdns3_gadget_ep_disable Disable endpoint
+ * @ep: endpoint object
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int cdns3_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct cdns3_endpoint *priv_ep;
+ struct cdns3_request *priv_req;
+ struct cdns3_device *priv_dev;
+ struct usb_request *request;
+ unsigned long flags;
+ int ret = 0;
+ u32 ep_cfg;
+ int val;
+
+ if (!ep) {
+ pr_err("usbss: invalid parameters\n");
+ return -EINVAL;
+ }
+
+ priv_ep = ep_to_cdns3_ep(ep);
+ priv_dev = priv_ep->cdns3_dev;
+
+ if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
+ "%s is already disabled\n", priv_ep->name))
+ return 0;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ trace_cdns3_gadget_ep_disable(priv_ep);
+
+ cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
+
+ ep_cfg = readl(&priv_dev->regs->ep_cfg);
+ ep_cfg &= ~EP_CFG_ENABLE;
+ writel(ep_cfg, &priv_dev->regs->ep_cfg);
+
+ /**
+ * Driver needs some time before resetting endpoint.
+ * It need waits for clearing DBUSY bit or for timeout expired.
+ * 10us is enough time for controller to stop transfer.
+ */
+ readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
+ !(val & EP_STS_DBUSY), 1, 10);
+ writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+
+ readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
+ 1, 1000);
+ if (unlikely(ret))
+ dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
+ priv_ep->name);
+
+ while (!list_empty(&priv_ep->pending_req_list)) {
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+
+ cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
+ -ESHUTDOWN);
+ }
+
+ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+
+ kfree(priv_req->request.buf);
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint,
+ &priv_req->request);
+ list_del_init(&priv_req->list);
+ --priv_ep->wa2_counter;
+ }
+
+ while (!list_empty(&priv_ep->deferred_req_list)) {
+ request = cdns3_next_request(&priv_ep->deferred_req_list);
+
+ cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
+ -ESHUTDOWN);
+ }
+
+ priv_ep->descmis_req = NULL;
+
+ ep->desc = NULL;
+ priv_ep->flags &= ~EP_ENABLED;
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdns3_gadget_ep_queue Transfer data on endpoint
+ * @ep: endpoint object
+ * @request: request object
+ * @gfp_flags: gfp flags
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct cdns3_request *priv_req;
+ int ret = 0;
+
+ request->actual = 0;
+ request->status = -EINPROGRESS;
+ priv_req = to_cdns3_request(request);
+ trace_cdns3_ep_queue(priv_req);
+
+ if (priv_dev->dev_ver < DEV_VER_V2) {
+ ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
+ priv_req);
+
+ if (ret == EINPROGRESS)
+ return 0;
+ }
+
+ ret = cdns3_prepare_aligned_request_buf(priv_req);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
+ usb_endpoint_dir_in(ep->desc));
+ if (ret)
+ return ret;
+
+ list_add_tail(&request->list, &priv_ep->deferred_req_list);
+
+ /*
+ * If hardware endpoint configuration has not been set yet then
+ * just queue request in deferred list. Transfer will be started in
+ * cdns3_set_hw_configuration.
+ */
+ if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
+ !(priv_ep->flags & EP_STALL_PENDING))
+ cdns3_start_all_request(priv_dev, priv_ep);
+
+ return 0;
+}
+
+static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct usb_request *zlp_request;
+ struct cdns3_endpoint *priv_ep;
+ struct cdns3_device *priv_dev;
+ unsigned long flags;
+ int ret;
+
+ if (!request || !ep)
+ return -EINVAL;
+
+ priv_ep = ep_to_cdns3_ep(ep);
+ priv_dev = priv_ep->cdns3_dev;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
+
+ if (ret == 0 && request->zero && request->length &&
+ (request->length % ep->maxpacket == 0)) {
+ struct cdns3_request *priv_req;
+
+ zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
+ zlp_request->buf = priv_dev->zlp_buf;
+ zlp_request->length = 0;
+
+ priv_req = to_cdns3_request(zlp_request);
+ priv_req->flags |= REQUEST_ZLP;
+
+ dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
+ priv_ep->name);
+ ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
+ }
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return ret;
+}
+
+/**
+ * cdns3_gadget_ep_dequeue Remove request from transfer queue
+ * @ep: endpoint object associated with request
+ * @request: request object
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct usb_request *req, *req_temp;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *link_trb;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ep || !request || !ep->desc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ priv_req = to_cdns3_request(request);
+
+ trace_cdns3_ep_dequeue(priv_req);
+
+ cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
+
+ list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
+ list) {
+ if (request == req)
+ goto found;
+ }
+
+ list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
+ list) {
+ if (request == req)
+ goto found;
+ }
+
+ goto not_found;
+
+found:
+
+ if (priv_ep->wa1_trb == priv_req->trb)
+ cdns3_wa1_restore_cycle_bit(priv_ep);
+
+ link_trb = priv_req->trb;
+ cdns3_move_deq_to_next_trb(priv_req);
+ cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
+ /* Update ring */
+ request = cdns3_next_request(&priv_ep->deferred_req_list);
+ if (request) {
+ priv_req = to_cdns3_request(request);
+
+ link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
+ (priv_req->start_trb * TRB_SIZE));
+ link_trb->control = (link_trb->control & TRB_CYCLE) |
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
+ } else {
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return ret;
+}
+
+/**
+ * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
+ * Should be called after acquiring spin_lock and selecting ep
+ * @ep: endpoint object to set stall on.
+ */
+void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ trace_cdns3_halt(priv_ep, 1, 0);
+
+ if (!(priv_ep->flags & EP_STALLED)) {
+ u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
+
+ if (!(ep_sts_reg & EP_STS_DBUSY))
+ cdns3_ep_stall_flush(priv_ep);
+ else
+ priv_ep->flags |= EP_STALL_PENDING;
+ }
+}
+
+/**
+ * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
+ * Should be called after acquiring spin_lock and selecting ep
+ * @ep: endpoint object to clear stall on
+ */
+int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ struct usb_request *request;
+ int ret;
+ int val;
+
+ trace_cdns3_halt(priv_ep, 0, 0);
+
+ writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+
+ /* wait for EPRST cleared */
+ ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & EP_CMD_EPRST), 1, 100);
+ if (ret)
+ return -EINVAL;
+
+ priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
+
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+
+ if (request)
+ cdns3_rearm_transfer(priv_ep, 1);
+
+ cdns3_start_all_request(priv_dev, priv_ep);
+ return ret;
+}
+
+/**
+ * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
+ * @ep: endpoint object to set/clear stall on
+ * @value: 1 for set stall, 0 for clear stall
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!(priv_ep->flags & EP_ENABLED))
+ return -EPERM;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+
+ cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
+
+ if (!value) {
+ priv_ep->flags &= ~EP_WEDGE;
+ ret = __cdns3_gadget_ep_clear_halt(priv_ep);
+ } else {
+ __cdns3_gadget_ep_set_halt(priv_ep);
+ }
+
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+
+ return ret;
+}
+
+extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
+
+static const struct usb_ep_ops cdns3_gadget_ep_ops = {
+ .enable = cdns3_gadget_ep_enable,
+ .disable = cdns3_gadget_ep_disable,
+ .alloc_request = cdns3_gadget_ep_alloc_request,
+ .free_request = cdns3_gadget_ep_free_request,
+ .queue = cdns3_gadget_ep_queue,
+ .dequeue = cdns3_gadget_ep_dequeue,
+ .set_halt = cdns3_gadget_ep_set_halt,
+ .set_wedge = cdns3_gadget_ep_set_wedge,
+};
+
+/**
+ * cdns3_gadget_get_frame Returns number of actual ITP frame
+ * @gadget: gadget object
+ *
+ * Returns number of actual ITP frame
+ */
+static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+
+ return readl(&priv_dev->regs->usb_itpn);
+}
+
+int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
+{
+ enum usb_device_speed speed;
+
+ speed = cdns3_get_speed(priv_dev);
+
+ if (speed >= USB_SPEED_SUPER)
+ return 0;
+
+ /* Start driving resume signaling to indicate remote wakeup. */
+ writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
+
+ return 0;
+}
+
+static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ ret = __cdns3_gadget_wakeup(priv_dev);
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return ret;
+}
+
+static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ priv_dev->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return 0;
+}
+
+static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+
+ if (is_on)
+ writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
+ else
+ writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
+
+ return 0;
+}
+
+static void cdns3_gadget_config(struct cdns3_device *priv_dev)
+{
+ struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
+ u32 reg;
+
+ cdns3_ep0_config(priv_dev);
+
+ /* enable interrupts for endpoint 0 (in and out) */
+ writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
+
+ /*
+ * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
+ * revision of controller.
+ */
+ if (priv_dev->dev_ver == DEV_VER_TI_V1) {
+ reg = readl(&regs->dbg_link1);
+
+ reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
+ reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
+ DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
+ writel(reg, &regs->dbg_link1);
+ }
+
+ /*
+ * By default some platforms has set protected access to memory.
+ * This cause problem with cache, so driver restore non-secure
+ * access to memory.
+ */
+ reg = readl(&regs->dma_axi_ctrl);
+ reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
+ DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
+ writel(reg, &regs->dma_axi_ctrl);
+
+ /* enable generic interrupt*/
+ writel(USB_IEN_INIT, &regs->usb_ien);
+ writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
+
+ cdns3_configure_dmult(priv_dev, NULL);
+
+ cdns3_gadget_pullup(&priv_dev->gadget, 1);
+}
+
+/**
+ * cdns3_gadget_udc_start Gadget start
+ * @gadget: gadget object
+ * @driver: driver which operates on this gadget
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv_dev->lock, flags);
+ priv_dev->gadget_driver = driver;
+ cdns3_gadget_config(priv_dev);
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
+ return 0;
+}
+
+/**
+ * cdns3_gadget_udc_stop Stops gadget
+ * @gadget: gadget object
+ *
+ * Returns 0
+ */
+static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
+{
+ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
+ struct cdns3_endpoint *priv_ep;
+ u32 bEndpointAddress;
+ struct usb_ep *ep;
+ int ret = 0;
+ int val;
+
+ priv_dev->gadget_driver = NULL;
+
+ priv_dev->onchip_used_size = 0;
+ priv_dev->out_mem_is_allocated = 0;
+ priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
+ priv_ep = ep_to_cdns3_ep(ep);
+ bEndpointAddress = priv_ep->num | priv_ep->dir;
+ cdns3_select_ep(priv_dev, bEndpointAddress);
+ writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+ readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+ !(val & EP_CMD_EPRST), 1, 100);
+ }
+
+ /* disable interrupt for device */
+ writel(0, &priv_dev->regs->usb_ien);
+ writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
+
+ return ret;
+}
+
+static const struct usb_gadget_ops cdns3_gadget_ops = {
+ .get_frame = cdns3_gadget_get_frame,
+ .wakeup = cdns3_gadget_wakeup,
+ .set_selfpowered = cdns3_gadget_set_selfpowered,
+ .pullup = cdns3_gadget_pullup,
+ .udc_start = cdns3_gadget_udc_start,
+ .udc_stop = cdns3_gadget_udc_stop,
+ .match_ep = cdns3_gadget_match_ep,
+};
+
+static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
+{
+ int i;
+
+ /* ep0 OUT point to ep0 IN. */
+ priv_dev->eps[16] = NULL;
+
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
+ if (priv_dev->eps[i]) {
+ cdns3_free_trb_pool(priv_dev->eps[i]);
+ devm_kfree(priv_dev->dev, priv_dev->eps[i]);
+ }
+}
+
+/**
+ * cdns3_init_eps Initializes software endpoints of gadget
+ * @cdns3: extended gadget object
+ *
+ * Returns 0 on success, error code elsewhere
+ */
+static int cdns3_init_eps(struct cdns3_device *priv_dev)
+{
+ u32 ep_enabled_reg, iso_ep_reg;
+ struct cdns3_endpoint *priv_ep;
+ int ep_dir, ep_number;
+ u32 ep_mask;
+ int ret = 0;
+ int i;
+
+ /* Read it from USB_CAP3 to USB_CAP5 */
+ ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
+ iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
+
+ dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
+
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
+ ep_dir = i >> 4; /* i div 16 */
+ ep_number = i & 0xF; /* i % 16 */
+ ep_mask = BIT(i);
+
+ if (!(ep_enabled_reg & ep_mask))
+ continue;
+
+ if (ep_dir && !ep_number) {
+ priv_dev->eps[i] = priv_dev->eps[0];
+ continue;
+ }
+
+ priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
+ GFP_KERNEL);
+ if (!priv_ep)
+ goto err;
+
+ /* set parent of endpoint object */
+ priv_ep->cdns3_dev = priv_dev;
+ priv_dev->eps[i] = priv_ep;
+ priv_ep->num = ep_number;
+ priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
+
+ if (!ep_number) {
+ ret = cdns3_init_ep0(priv_dev, priv_ep);
+ if (ret) {
+ dev_err(priv_dev->dev, "Failed to init ep0\n");
+ goto err;
+ }
+ } else {
+ snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
+ ep_number, !!ep_dir ? "in" : "out");
+ priv_ep->endpoint.name = priv_ep->name;
+
+ usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
+ CDNS3_EP_MAX_PACKET_LIMIT);
+ priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
+ priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
+ if (ep_dir)
+ priv_ep->endpoint.caps.dir_in = 1;
+ else
+ priv_ep->endpoint.caps.dir_out = 1;
+
+ if (iso_ep_reg & ep_mask)
+ priv_ep->endpoint.caps.type_iso = 1;
+
+ priv_ep->endpoint.caps.type_bulk = 1;
+ priv_ep->endpoint.caps.type_int = 1;
+
+ list_add_tail(&priv_ep->endpoint.ep_list,
+ &priv_dev->gadget.ep_list);
+ }
+
+ priv_ep->flags = 0;
+
+ dev_info(priv_dev->dev, "Initialized %s support: %s %s\n",
+ priv_ep->name,
+ priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
+ priv_ep->endpoint.caps.type_iso ? "ISO" : "");
+
+ INIT_LIST_HEAD(&priv_ep->pending_req_list);
+ INIT_LIST_HEAD(&priv_ep->deferred_req_list);
+ INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
+ }
+
+ return 0;
+err:
+ cdns3_free_all_eps(priv_dev);
+ return -ENOMEM;
+}
+
+void cdns3_gadget_exit(struct cdns3 *cdns)
+{
+ struct cdns3_device *priv_dev;
+
+ priv_dev = cdns->gadget_dev;
+
+ devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
+
+ pm_runtime_mark_last_busy(cdns->dev);
+ pm_runtime_put_autosuspend(cdns->dev);
+
+ usb_del_gadget_udc(&priv_dev->gadget);
+
+ cdns3_free_all_eps(priv_dev);
+
+ while (!list_empty(&priv_dev->aligned_buf_list)) {
+ struct cdns3_aligned_buf *buf;
+
+ buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
+ dma_free_coherent(priv_dev->sysdev, buf->size,
+ buf->buf,
+ buf->dma);
+
+ list_del(&buf->list);
+ kfree(buf);
+ }
+
+ dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
+ priv_dev->setup_dma);
+
+ kfree(priv_dev->zlp_buf);
+ kfree(priv_dev);
+ cdns->gadget_dev = NULL;
+ cdns3_drd_switch_gadget(cdns, 0);
+}
+
+static int cdns3_gadget_start(struct cdns3 *cdns)
+{
+ struct cdns3_device *priv_dev;
+ u32 max_speed;
+ int ret;
+
+ priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
+ if (!priv_dev)
+ return -ENOMEM;
+
+ cdns->gadget_dev = priv_dev;
+ priv_dev->sysdev = cdns->dev;
+ priv_dev->dev = cdns->dev;
+ priv_dev->regs = cdns->dev_regs;
+
+ device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
+ &priv_dev->onchip_buffers);
+
+ if (priv_dev->onchip_buffers <= 0) {
+ u32 reg = readl(&priv_dev->regs->usb_cap2);
+
+ priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
+ }
+
+ if (!priv_dev->onchip_buffers)
+ priv_dev->onchip_buffers = 256;
+
+ max_speed = usb_get_maximum_speed(cdns->dev);
+
+ /* Check the maximum_speed parameter */
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+ break;
+ case USB_SPEED_HIGH:
+ writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+ break;
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
+ max_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ max_speed = USB_SPEED_SUPER;
+ break;
+ }
+
+ /* fill gadget fields */
+ priv_dev->gadget.max_speed = max_speed;
+ priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
+ priv_dev->gadget.ops = &cdns3_gadget_ops;
+ priv_dev->gadget.name = "usb-ss-gadget";
+ priv_dev->gadget.sg_supported = 1;
+ priv_dev->gadget.quirk_avoids_skb_reserve = 1;
+
+ spin_lock_init(&priv_dev->lock);
+ INIT_WORK(&priv_dev->pending_status_wq,
+ cdns3_pending_setup_status_handler);
+
+ INIT_WORK(&priv_dev->aligned_buf_wq,
+ cdns3_free_aligned_request_buf);
+
+ /* initialize endpoint container */
+ INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
+ INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
+
+ ret = cdns3_init_eps(priv_dev);
+ if (ret) {
+ dev_err(priv_dev->dev, "Failed to create endpoints\n");
+ goto err1;
+ }
+
+ /* allocate memory for setup packet buffer */
+ priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
+ &priv_dev->setup_dma, GFP_DMA);
+ if (!priv_dev->setup_buf) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+
+ priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
+
+ dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
+ readl(&priv_dev->regs->usb_cap6));
+ dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
+ readl(&priv_dev->regs->usb_cap1));
+ dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n",
+ readl(&priv_dev->regs->usb_cap2));
+
+ priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
+
+ priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
+ if (!priv_dev->zlp_buf) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ /* add USB gadget device */
+ ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget);
+ if (ret < 0) {
+ dev_err(priv_dev->dev,
+ "Failed to register USB device controller\n");
+ goto err4;
+ }
+
+ return 0;
+err4:
+ kfree(priv_dev->zlp_buf);
+err3:
+ dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
+ priv_dev->setup_dma);
+err2:
+ cdns3_free_all_eps(priv_dev);
+err1:
+ cdns->gadget_dev = NULL;
+ return ret;
+}
+
+static int __cdns3_gadget_init(struct cdns3 *cdns)
+{
+ int ret = 0;
+
+ cdns3_drd_switch_gadget(cdns, 1);
+ pm_runtime_get_sync(cdns->dev);
+
+ ret = cdns3_gadget_start(cdns);
+ if (ret)
+ return ret;
+
+ /*
+ * Because interrupt line can be shared with other components in
+ * driver it can't use IRQF_ONESHOT flag here.
+ */
+ ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
+ cdns3_device_irq_handler,
+ cdns3_device_thread_irq_handler,
+ IRQF_SHARED, dev_name(cdns->dev), cdns);
+
+ if (ret)
+ goto err0;
+
+ return 0;
+err0:
+ cdns3_gadget_exit(cdns);
+ return ret;
+}
+
+static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+{
+ struct cdns3_device *priv_dev = cdns->gadget_dev;
+
+ cdns3_disconnect_gadget(priv_dev);
+
+ priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
+ usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
+ cdns3_hw_reset_eps_config(priv_dev);
+
+ /* disable interrupt for device */
+ writel(0, &priv_dev->regs->usb_ien);
+
+ cdns3_gadget_pullup(&priv_dev->gadget, 0);
+
+ return 0;
+}
+
+static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
+{
+ struct cdns3_device *priv_dev = cdns->gadget_dev;
+
+ if (!priv_dev->gadget_driver)
+ return 0;
+
+ cdns3_gadget_config(priv_dev);
+
+ return 0;
+}
+
+/**
+ * cdns3_gadget_init - initialize device structure
+ *
+ * cdns: cdns3 instance
+ *
+ * This function initializes the gadget.
+ */
+int cdns3_gadget_init(struct cdns3 *cdns)
+{
+ struct cdns3_role_driver *rdrv;
+
+ rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = __cdns3_gadget_init;
+ rdrv->stop = cdns3_gadget_exit;
+ rdrv->suspend = cdns3_gadget_suspend;
+ rdrv->resume = cdns3_gadget_resume;
+ rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->name = "gadget";
+ cdns->roles[USB_ROLE_DEVICE] = rdrv;
+
+ return 0;
+}
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
new file mode 100644
index 000000000000..bc4024041ef2
--- /dev/null
+++ b/drivers/usb/cdns3/gadget.h
@@ -0,0 +1,1338 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USBSS device controller driver header file
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ * Pawel Jez <pjez@cadence.com>
+ * Peter Chen <peter.chen@nxp.com>
+ */
+#ifndef __LINUX_CDNS3_GADGET
+#define __LINUX_CDNS3_GADGET
+#include <linux/usb/gadget.h>
+
+/*
+ * USBSS-DEV register interface.
+ * This corresponds to the USBSS Device Controller Interface
+ */
+
+/**
+ * struct cdns3_usb_regs - device controller registers.
+ * @usb_conf: Global Configuration.
+ * @usb_sts: Global Status.
+ * @usb_cmd: Global Command.
+ * @usb_itpn: ITP/SOF number.
+ * @usb_lpm: Global Command.
+ * @usb_ien: USB Interrupt Enable.
+ * @usb_ists: USB Interrupt Status.
+ * @ep_sel: Endpoint Select.
+ * @ep_traddr: Endpoint Transfer Ring Address.
+ * @ep_cfg: Endpoint Configuration.
+ * @ep_cmd: Endpoint Command.
+ * @ep_sts: Endpoint Status.
+ * @ep_sts_sid: Endpoint Status.
+ * @ep_sts_en: Endpoint Status Enable.
+ * @drbl: Doorbell.
+ * @ep_ien: EP Interrupt Enable.
+ * @ep_ists: EP Interrupt Status.
+ * @usb_pwr: Global Power Configuration.
+ * @usb_conf2: Global Configuration 2.
+ * @usb_cap1: Capability 1.
+ * @usb_cap2: Capability 2.
+ * @usb_cap3: Capability 3.
+ * @usb_cap4: Capability 4.
+ * @usb_cap5: Capability 5.
+ * @usb_cap6: Capability 6.
+ * @usb_cpkt1: Custom Packet 1.
+ * @usb_cpkt2: Custom Packet 2.
+ * @usb_cpkt3: Custom Packet 3.
+ * @ep_dma_ext_addr: Upper address for DMA operations.
+ * @buf_addr: Address for On-chip Buffer operations.
+ * @buf_data: Data for On-chip Buffer operations.
+ * @buf_ctrl: On-chip Buffer Access Control.
+ * @dtrans: DMA Transfer Mode.
+ * @tdl_from_trb: Source of TD Configuration.
+ * @tdl_beh: TDL Behavior Configuration.
+ * @ep_tdl: Endpoint TDL.
+ * @tdl_beh2: TDL Behavior 2 Configuration.
+ * @dma_adv_td: DMA Advance TD Configuration.
+ * @reserved1: Reserved.
+ * @cfg_regs: Configuration.
+ * @reserved2: Reserved.
+ * @dma_axi_ctrl: AXI Control.
+ * @dma_axi_id: AXI ID register.
+ * @dma_axi_cap: AXI Capability.
+ * @dma_axi_ctrl0: AXI Control 0.
+ * @dma_axi_ctrl1: AXI Control 1.
+ */
+struct cdns3_usb_regs {
+ __le32 usb_conf;
+ __le32 usb_sts;
+ __le32 usb_cmd;
+ __le32 usb_itpn;
+ __le32 usb_lpm;
+ __le32 usb_ien;
+ __le32 usb_ists;
+ __le32 ep_sel;
+ __le32 ep_traddr;
+ __le32 ep_cfg;
+ __le32 ep_cmd;
+ __le32 ep_sts;
+ __le32 ep_sts_sid;
+ __le32 ep_sts_en;
+ __le32 drbl;
+ __le32 ep_ien;
+ __le32 ep_ists;
+ __le32 usb_pwr;
+ __le32 usb_conf2;
+ __le32 usb_cap1;
+ __le32 usb_cap2;
+ __le32 usb_cap3;
+ __le32 usb_cap4;
+ __le32 usb_cap5;
+ __le32 usb_cap6;
+ __le32 usb_cpkt1;
+ __le32 usb_cpkt2;
+ __le32 usb_cpkt3;
+ __le32 ep_dma_ext_addr;
+ __le32 buf_addr;
+ __le32 buf_data;
+ __le32 buf_ctrl;
+ __le32 dtrans;
+ __le32 tdl_from_trb;
+ __le32 tdl_beh;
+ __le32 ep_tdl;
+ __le32 tdl_beh2;
+ __le32 dma_adv_td;
+ __le32 reserved1[26];
+ __le32 cfg_reg1;
+ __le32 dbg_link1;
+ __le32 dbg_link2;
+ __le32 cfg_regs[74];
+ __le32 reserved2[51];
+ __le32 dma_axi_ctrl;
+ __le32 dma_axi_id;
+ __le32 dma_axi_cap;
+ __le32 dma_axi_ctrl0;
+ __le32 dma_axi_ctrl1;
+};
+
+/* USB_CONF - bitmasks */
+/* Reset USB device configuration. */
+#define USB_CONF_CFGRST BIT(0)
+/* Set Configuration. */
+#define USB_CONF_CFGSET BIT(1)
+/* Disconnect USB device in SuperSpeed. */
+#define USB_CONF_USB3DIS BIT(3)
+/* Disconnect USB device in HS/FS */
+#define USB_CONF_USB2DIS BIT(4)
+/* Little Endian access - default */
+#define USB_CONF_LENDIAN BIT(5)
+/*
+ * Big Endian access. Driver assume that byte order for
+ * SFRs access always is as Little Endian so this bit
+ * is not used.
+ */
+#define USB_CONF_BENDIAN BIT(6)
+/* Device software reset. */
+#define USB_CONF_SWRST BIT(7)
+/* Singular DMA transfer mode. Only for VER < DEV_VER_V3*/
+#define USB_CONF_DSING BIT(8)
+/* Multiple DMA transfers mode. Only for VER < DEV_VER_V3 */
+#define USB_CONF_DMULT BIT(9)
+/* DMA clock turn-off enable. */
+#define USB_CONF_DMAOFFEN BIT(10)
+/* DMA clock turn-off disable. */
+#define USB_CONF_DMAOFFDS BIT(11)
+/* Clear Force Full Speed. */
+#define USB_CONF_CFORCE_FS BIT(12)
+/* Set Force Full Speed. */
+#define USB_CONF_SFORCE_FS BIT(13)
+/* Device enable. */
+#define USB_CONF_DEVEN BIT(14)
+/* Device disable. */
+#define USB_CONF_DEVDS BIT(15)
+/* L1 LPM state entry enable (used in HS/FS mode). */
+#define USB_CONF_L1EN BIT(16)
+/* L1 LPM state entry disable (used in HS/FS mode). */
+#define USB_CONF_L1DS BIT(17)
+/* USB 2.0 clock gate disable. */
+#define USB_CONF_CLK2OFFEN BIT(18)
+/* USB 2.0 clock gate enable. */
+#define USB_CONF_CLK2OFFDS BIT(19)
+/* L0 LPM state entry request (used in HS/FS mode). */
+#define USB_CONF_LGO_L0 BIT(20)
+/* USB 3.0 clock gate disable. */
+#define USB_CONF_CLK3OFFEN BIT(21)
+/* USB 3.0 clock gate enable. */
+#define USB_CONF_CLK3OFFDS BIT(22)
+/* Bit 23 is reserved*/
+/* U1 state entry enable (used in SS mode). */
+#define USB_CONF_U1EN BIT(24)
+/* U1 state entry disable (used in SS mode). */
+#define USB_CONF_U1DS BIT(25)
+/* U2 state entry enable (used in SS mode). */
+#define USB_CONF_U2EN BIT(26)
+/* U2 state entry disable (used in SS mode). */
+#define USB_CONF_U2DS BIT(27)
+/* U0 state entry request (used in SS mode). */
+#define USB_CONF_LGO_U0 BIT(28)
+/* U1 state entry request (used in SS mode). */
+#define USB_CONF_LGO_U1 BIT(29)
+/* U2 state entry request (used in SS mode). */
+#define USB_CONF_LGO_U2 BIT(30)
+/* SS.Inactive state entry request (used in SS mode) */
+#define USB_CONF_LGO_SSINACT BIT(31)
+
+/* USB_STS - bitmasks */
+/*
+ * Configuration status.
+ * 1 - device is in the configured state.
+ * 0 - device is not configured.
+ */
+#define USB_STS_CFGSTS_MASK BIT(0)
+#define USB_STS_CFGSTS(p) ((p) & USB_STS_CFGSTS_MASK)
+/*
+ * On-chip memory overflow.
+ * 0 - On-chip memory status OK.
+ * 1 - On-chip memory overflow.
+ */
+#define USB_STS_OV_MASK BIT(1)
+#define USB_STS_OV(p) ((p) & USB_STS_OV_MASK)
+/*
+ * SuperSpeed connection status.
+ * 0 - USB in SuperSpeed mode disconnected.
+ * 1 - USB in SuperSpeed mode connected.
+ */
+#define USB_STS_USB3CONS_MASK BIT(2)
+#define USB_STS_USB3CONS(p) ((p) & USB_STS_USB3CONS_MASK)
+/*
+ * DMA transfer configuration status.
+ * 0 - single request.
+ * 1 - multiple TRB chain
+ * Supported only for controller version < DEV_VER_V3
+ */
+#define USB_STS_DTRANS_MASK BIT(3)
+#define USB_STS_DTRANS(p) ((p) & USB_STS_DTRANS_MASK)
+/*
+ * Device speed.
+ * 0 - Undefined (value after reset).
+ * 1 - Low speed
+ * 2 - Full speed
+ * 3 - High speed
+ * 4 - Super speed
+ */
+#define USB_STS_USBSPEED_MASK GENMASK(6, 4)
+#define USB_STS_USBSPEED(p) (((p) & USB_STS_USBSPEED_MASK) >> 4)
+#define USB_STS_LS (0x1 << 4)
+#define USB_STS_FS (0x2 << 4)
+#define USB_STS_HS (0x3 << 4)
+#define USB_STS_SS (0x4 << 4)
+#define DEV_UNDEFSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == (0x0 << 4))
+#define DEV_LOWSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_LS)
+#define DEV_FULLSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_FS)
+#define DEV_HIGHSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_HS)
+#define DEV_SUPERSPEED(p) (((p) & USB_STS_USBSPEED_MASK) == USB_STS_SS)
+/*
+ * Endianness for SFR access.
+ * 0 - Little Endian order (default after hardware reset).
+ * 1 - Big Endian order
+ */
+#define USB_STS_ENDIAN_MASK BIT(7)
+#define USB_STS_ENDIAN(p) ((p) & USB_STS_ENDIAN_MASK)
+/*
+ * HS/FS clock turn-off status.
+ * 0 - hsfs clock is always on.
+ * 1 - hsfs clock turn-off in L2 (HS/FS mode) is enabled
+ * (default after hardware reset).
+ */
+#define USB_STS_CLK2OFF_MASK BIT(8)
+#define USB_STS_CLK2OFF(p) ((p) & USB_STS_CLK2OFF_MASK)
+/*
+ * PCLK clock turn-off status.
+ * 0 - pclk clock is always on.
+ * 1 - pclk clock turn-off in U3 (SS mode) is enabled
+ * (default after hardware reset).
+ */
+#define USB_STS_CLK3OFF_MASK BIT(9)
+#define USB_STS_CLK3OFF(p) ((p) & USB_STS_CLK3OFF_MASK)
+/*
+ * Controller in reset state.
+ * 0 - Internal reset is active.
+ * 1 - Internal reset is not active and controller is fully operational.
+ */
+#define USB_STS_IN_RST_MASK BIT(10)
+#define USB_STS_IN_RST(p) ((p) & USB_STS_IN_RST_MASK)
+/*
+ * Status of the "TDL calculation basing on TRB" feature.
+ * 0 - disabled
+ * 1 - enabled
+ * Supported only for DEV_VER_V2 controller version.
+ */
+#define USB_STS_TDL_TRB_ENABLED BIT(11)
+/*
+ * Device enable Status.
+ * 0 - USB device is disabled (VBUS input is disconnected from internal logic).
+ * 1 - USB device is enabled (VBUS input is connected to the internal logic).
+ */
+#define USB_STS_DEVS_MASK BIT(14)
+#define USB_STS_DEVS(p) ((p) & USB_STS_DEVS_MASK)
+/*
+ * Address status.
+ * 0 - USB device is default state.
+ * 1 - USB device is at least in address state.
+ */
+#define USB_STS_ADDRESSED_MASK BIT(15)
+#define USB_STS_ADDRESSED(p) ((p) & USB_STS_ADDRESSED_MASK)
+/*
+ * L1 LPM state enable status (used in HS/FS mode).
+ * 0 - Entering to L1 LPM state disabled.
+ * 1 - Entering to L1 LPM state enabled.
+ */
+#define USB_STS_L1ENS_MASK BIT(16)
+#define USB_STS_L1ENS(p) ((p) & USB_STS_L1ENS_MASK)
+/*
+ * Internal VBUS connection status (used both in HS/FS and SS mode).
+ * 0 - internal VBUS is not detected.
+ * 1 - internal VBUS is detected.
+ */
+#define USB_STS_VBUSS_MASK BIT(17)
+#define USB_STS_VBUSS(p) ((p) & USB_STS_VBUSS_MASK)
+/*
+ * HS/FS LPM state (used in FS/HS mode).
+ * 0 - L0 State
+ * 1 - L1 State
+ * 2 - L2 State
+ * 3 - L3 State
+ */
+#define USB_STS_LPMST_MASK GENMASK(19, 18)
+#define DEV_L0_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x0 << 18))
+#define DEV_L1_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x1 << 18))
+#define DEV_L2_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x2 << 18))
+#define DEV_L3_STATE(p) (((p) & USB_STS_LPMST_MASK) == (0x3 << 18))
+/*
+ * Disable HS status (used in FS/HS mode).
+ * 0 - the disconnect bit for HS/FS mode is set .
+ * 1 - the disconnect bit for HS/FS mode is not set.
+ */
+#define USB_STS_USB2CONS_MASK BIT(20)
+#define USB_STS_USB2CONS(p) ((p) & USB_STS_USB2CONS_MASK)
+/*
+ * HS/FS mode connection status (used in FS/HS mode).
+ * 0 - High Speed operations in USB2.0 (FS/HS) mode not disabled.
+ * 1 - High Speed operations in USB2.0 (FS/HS).
+ */
+#define USB_STS_DISABLE_HS_MASK BIT(21)
+#define USB_STS_DISABLE_HS(p) ((p) & USB_STS_DISABLE_HS_MASK)
+/*
+ * U1 state enable status (used in SS mode).
+ * 0 - Entering to U1 state disabled.
+ * 1 - Entering to U1 state enabled.
+ */
+#define USB_STS_U1ENS_MASK BIT(24)
+#define USB_STS_U1ENS(p) ((p) & USB_STS_U1ENS_MASK)
+/*
+ * U2 state enable status (used in SS mode).
+ * 0 - Entering to U2 state disabled.
+ * 1 - Entering to U2 state enabled.
+ */
+#define USB_STS_U2ENS_MASK BIT(25)
+#define USB_STS_U2ENS(p) ((p) & USB_STS_U2ENS_MASK)
+/*
+ * SuperSpeed Link LTSSM state. This field reflects USBSS-DEV current
+ * SuperSpeed link state
+ */
+#define USB_STS_LST_MASK GENMASK(29, 26)
+#define DEV_LST_U0 (((p) & USB_STS_LST_MASK) == (0x0 << 26))
+#define DEV_LST_U1 (((p) & USB_STS_LST_MASK) == (0x1 << 26))
+#define DEV_LST_U2 (((p) & USB_STS_LST_MASK) == (0x2 << 26))
+#define DEV_LST_U3 (((p) & USB_STS_LST_MASK) == (0x3 << 26))
+#define DEV_LST_DISABLED (((p) & USB_STS_LST_MASK) == (0x4 << 26))
+#define DEV_LST_RXDETECT (((p) & USB_STS_LST_MASK) == (0x5 << 26))
+#define DEV_LST_INACTIVE (((p) & USB_STS_LST_MASK) == (0x6 << 26))
+#define DEV_LST_POLLING (((p) & USB_STS_LST_MASK) == (0x7 << 26))
+#define DEV_LST_RECOVERY (((p) & USB_STS_LST_MASK) == (0x8 << 26))
+#define DEV_LST_HOT_RESET (((p) & USB_STS_LST_MASK) == (0x9 << 26))
+#define DEV_LST_COMP_MODE (((p) & USB_STS_LST_MASK) == (0xa << 26))
+#define DEV_LST_LB_STATE (((p) & USB_STS_LST_MASK) == (0xb << 26))
+/*
+ * DMA clock turn-off status.
+ * 0 - DMA clock is always on (default after hardware reset).
+ * 1 - DMA clock turn-off in U1, U2 and U3 (SS mode) is enabled.
+ */
+#define USB_STS_DMAOFF_MASK BIT(30)
+#define USB_STS_DMAOFF(p) ((p) & USB_STS_DMAOFF_MASK)
+/*
+ * SFR Endian status.
+ * 0 - Little Endian order (default after hardware reset).
+ * 1 - Big Endian order.
+ */
+#define USB_STS_ENDIAN2_MASK BIT(31)
+#define USB_STS_ENDIAN2(p) ((p) & USB_STS_ENDIAN2_MASK)
+
+/* USB_CMD - bitmasks */
+/* Set Function Address */
+#define USB_CMD_SET_ADDR BIT(0)
+/*
+ * Function Address This field is saved to the device only when the field
+ * SET_ADDR is set '1 ' during write to USB_CMD register.
+ * Software is responsible for entering the address of the device during
+ * SET_ADDRESS request service. This field should be set immediately after
+ * the SETUP packet is decoded, and prior to confirmation of the status phase
+ */
+#define USB_CMD_FADDR_MASK GENMASK(7, 1)
+#define USB_CMD_FADDR(p) (((p) << 1) & USB_CMD_FADDR_MASK)
+/* Send Function Wake Device Notification TP (used only in SS mode). */
+#define USB_CMD_SDNFW BIT(8)
+/* Set Test Mode (used only in HS/FS mode). */
+#define USB_CMD_STMODE BIT(9)
+/* Test mode selector (used only in HS/FS mode) */
+#define USB_STS_TMODE_SEL_MASK GENMASK(11, 10)
+#define USB_STS_TMODE_SEL(p) (((p) << 10) & USB_STS_TMODE_SEL_MASK)
+/*
+ * Send Latency Tolerance Message Device Notification TP (used only
+ * in SS mode).
+ */
+#define USB_CMD_SDNLTM BIT(12)
+/* Send Custom Transaction Packet (used only in SS mode) */
+#define USB_CMD_SPKT BIT(13)
+/*Device Notification 'Function Wake' - Interface value (only in SS mode. */
+#define USB_CMD_DNFW_INT_MASK GENMASK(23, 16)
+#define USB_STS_DNFW_INT(p) (((p) << 16) & USB_CMD_DNFW_INT_MASK)
+/*
+ * Device Notification 'Latency Tolerance Message' -373 BELT value [7:0]
+ * (used only in SS mode).
+ */
+#define USB_CMD_DNLTM_BELT_MASK GENMASK(27, 16)
+#define USB_STS_DNLTM_BELT(p) (((p) << 16) & USB_CMD_DNLTM_BELT_MASK)
+
+/* USB_ITPN - bitmasks */
+/*
+ * ITP(SS) / SOF (HS/FS) number
+ * In SS mode this field represent number of last ITP received from host.
+ * In HS/FS mode this field represent number of last SOF received from host.
+ */
+#define USB_ITPN_MASK GENMASK(13, 0)
+#define USB_ITPN(p) ((p) & USB_ITPN_MASK)
+
+/* USB_LPM - bitmasks */
+/* Host Initiated Resume Duration. */
+#define USB_LPM_HIRD_MASK GENMASK(3, 0)
+#define USB_LPM_HIRD(p) ((p) & USB_LPM_HIRD_MASK)
+/* Remote Wakeup Enable (bRemoteWake). */
+#define USB_LPM_BRW BIT(4)
+
+/* USB_IEN - bitmasks */
+/* SS connection interrupt enable */
+#define USB_IEN_CONIEN BIT(0)
+/* SS disconnection interrupt enable. */
+#define USB_IEN_DISIEN BIT(1)
+/* USB SS warm reset interrupt enable. */
+#define USB_IEN_UWRESIEN BIT(2)
+/* USB SS hot reset interrupt enable */
+#define USB_IEN_UHRESIEN BIT(3)
+/* SS link U3 state enter interrupt enable (suspend).*/
+#define USB_IEN_U3ENTIEN BIT(4)
+/* SS link U3 state exit interrupt enable (wakeup). */
+#define USB_IEN_U3EXTIEN BIT(5)
+/* SS link U2 state enter interrupt enable.*/
+#define USB_IEN_U2ENTIEN BIT(6)
+/* SS link U2 state exit interrupt enable.*/
+#define USB_IEN_U2EXTIEN BIT(7)
+/* SS link U1 state enter interrupt enable.*/
+#define USB_IEN_U1ENTIEN BIT(8)
+/* SS link U1 state exit interrupt enable.*/
+#define USB_IEN_U1EXTIEN BIT(9)
+/* ITP/SOF packet detected interrupt enable.*/
+#define USB_IEN_ITPIEN BIT(10)
+/* Wakeup interrupt enable.*/
+#define USB_IEN_WAKEIEN BIT(11)
+/* Send Custom Packet interrupt enable.*/
+#define USB_IEN_SPKTIEN BIT(12)
+/* HS/FS mode connection interrupt enable.*/
+#define USB_IEN_CON2IEN BIT(16)
+/* HS/FS mode disconnection interrupt enable.*/
+#define USB_IEN_DIS2IEN BIT(17)
+/* USB reset (HS/FS mode) interrupt enable.*/
+#define USB_IEN_U2RESIEN BIT(18)
+/* LPM L2 state enter interrupt enable.*/
+#define USB_IEN_L2ENTIEN BIT(20)
+/* LPM L2 state exit interrupt enable.*/
+#define USB_IEN_L2EXTIEN BIT(21)
+/* LPM L1 state enter interrupt enable.*/
+#define USB_IEN_L1ENTIEN BIT(24)
+/* LPM L1 state exit interrupt enable.*/
+#define USB_IEN_L1EXTIEN BIT(25)
+/* Configuration reset interrupt enable.*/
+#define USB_IEN_CFGRESIEN BIT(26)
+/* Start of the USB SS warm reset interrupt enable.*/
+#define USB_IEN_UWRESSIEN BIT(28)
+/* End of the USB SS warm reset interrupt enable.*/
+#define USB_IEN_UWRESEIEN BIT(29)
+
+#define USB_IEN_INIT (USB_IEN_U2RESIEN | USB_ISTS_DIS2I | USB_IEN_CON2IEN \
+ | USB_IEN_UHRESIEN | USB_IEN_UWRESIEN | USB_IEN_DISIEN \
+ | USB_IEN_CONIEN | USB_IEN_U3EXTIEN | USB_IEN_L2ENTIEN \
+ | USB_IEN_L2EXTIEN | USB_IEN_L1ENTIEN | USB_IEN_U3ENTIEN)
+
+/* USB_ISTS - bitmasks */
+/* SS Connection detected. */
+#define USB_ISTS_CONI BIT(0)
+/* SS Disconnection detected. */
+#define USB_ISTS_DISI BIT(1)
+/* UUSB warm reset detectede. */
+#define USB_ISTS_UWRESI BIT(2)
+/* USB hot reset detected. */
+#define USB_ISTS_UHRESI BIT(3)
+/* U3 link state enter detected (suspend).*/
+#define USB_ISTS_U3ENTI BIT(4)
+/* U3 link state exit detected (wakeup). */
+#define USB_ISTS_U3EXTI BIT(5)
+/* U2 link state enter detected.*/
+#define USB_ISTS_U2ENTI BIT(6)
+/* U2 link state exit detected.*/
+#define USB_ISTS_U2EXTI BIT(7)
+/* U1 link state enter detected.*/
+#define USB_ISTS_U1ENTI BIT(8)
+/* U1 link state exit detected.*/
+#define USB_ISTS_U1EXTI BIT(9)
+/* ITP/SOF packet detected.*/
+#define USB_ISTS_ITPI BIT(10)
+/* Wakeup detected.*/
+#define USB_ISTS_WAKEI BIT(11)
+/* Send Custom Packet detected.*/
+#define USB_ISTS_SPKTI BIT(12)
+/* HS/FS mode connection detected.*/
+#define USB_ISTS_CON2I BIT(16)
+/* HS/FS mode disconnection detected.*/
+#define USB_ISTS_DIS2I BIT(17)
+/* USB reset (HS/FS mode) detected.*/
+#define USB_ISTS_U2RESI BIT(18)
+/* LPM L2 state enter detected.*/
+#define USB_ISTS_L2ENTI BIT(20)
+/* LPM L2 state exit detected.*/
+#define USB_ISTS_L2EXTI BIT(21)
+/* LPM L1 state enter detected.*/
+#define USB_ISTS_L1ENTI BIT(24)
+/* LPM L1 state exit detected.*/
+#define USB_ISTS_L1EXTI BIT(25)
+/* USB configuration reset detected.*/
+#define USB_ISTS_CFGRESI BIT(26)
+/* Start of the USB warm reset detected.*/
+#define USB_ISTS_UWRESSI BIT(28)
+/* End of the USB warm reset detected.*/
+#define USB_ISTS_UWRESEI BIT(29)
+
+/* USB_SEL - bitmasks */
+#define EP_SEL_EPNO_MASK GENMASK(3, 0)
+/* Endpoint number. */
+#define EP_SEL_EPNO(p) ((p) & EP_SEL_EPNO_MASK)
+/* Endpoint direction bit - 0 - OUT, 1 - IN. */
+#define EP_SEL_DIR BIT(7)
+
+#define select_ep_in(nr) (EP_SEL_EPNO(p) | EP_SEL_DIR)
+#define select_ep_out (EP_SEL_EPNO(p))
+
+/* EP_TRADDR - bitmasks */
+/* Transfer Ring address. */
+#define EP_TRADDR_TRADDR(p) ((p))
+
+/* EP_CFG - bitmasks */
+/* Endpoint enable */
+#define EP_CFG_ENABLE BIT(0)
+/*
+ * Endpoint type.
+ * 1 - isochronous
+ * 2 - bulk
+ * 3 - interrupt
+ */
+#define EP_CFG_EPTYPE_MASK GENMASK(2, 1)
+#define EP_CFG_EPTYPE(p) (((p) << 1) & EP_CFG_EPTYPE_MASK)
+/* Stream support enable (only in SS mode). */
+#define EP_CFG_STREAM_EN BIT(3)
+/* TDL check (only in SS mode for BULK EP). */
+#define EP_CFG_TDL_CHK BIT(4)
+/* SID check (only in SS mode for BULK OUT EP). */
+#define EP_CFG_SID_CHK BIT(5)
+/* DMA transfer endianness. */
+#define EP_CFG_EPENDIAN BIT(7)
+/* Max burst size (used only in SS mode). */
+#define EP_CFG_MAXBURST_MASK GENMASK(11, 8)
+#define EP_CFG_MAXBURST(p) (((p) << 8) & EP_CFG_MAXBURST_MASK)
+/* ISO max burst. */
+#define EP_CFG_MULT_MASK GENMASK(15, 14)
+#define EP_CFG_MULT(p) (((p) << 14) & EP_CFG_MULT_MASK)
+/* ISO max burst. */
+#define EP_CFG_MAXPKTSIZE_MASK GENMASK(26, 16)
+#define EP_CFG_MAXPKTSIZE(p) (((p) << 16) & EP_CFG_MAXPKTSIZE_MASK)
+/* Max number of buffered packets. */
+#define EP_CFG_BUFFERING_MASK GENMASK(31, 27)
+#define EP_CFG_BUFFERING(p) (((p) << 27) & EP_CFG_BUFFERING_MASK)
+
+/* EP_CMD - bitmasks */
+/* Endpoint reset. */
+#define EP_CMD_EPRST BIT(0)
+/* Endpoint STALL set. */
+#define EP_CMD_SSTALL BIT(1)
+/* Endpoint STALL clear. */
+#define EP_CMD_CSTALL BIT(2)
+/* Send ERDY TP. */
+#define EP_CMD_ERDY BIT(3)
+/* Request complete. */
+#define EP_CMD_REQ_CMPL BIT(5)
+/* Transfer descriptor ready. */
+#define EP_CMD_DRDY BIT(6)
+/* Data flush. */
+#define EP_CMD_DFLUSH BIT(7)
+/*
+ * Transfer Descriptor Length write (used only for Bulk Stream capable
+ * endpoints in SS mode).
+ * Bit Removed from DEV_VER_V3 controller version.
+ */
+#define EP_CMD_STDL BIT(8)
+/*
+ * Transfer Descriptor Length (used only in SS mode for bulk endpoints).
+ * Bits Removed from DEV_VER_V3 controller version.
+ */
+#define EP_CMD_TDL_MASK GENMASK(15, 9)
+#define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK)
+#define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9)
+
+/* ERDY Stream ID value (used in SS mode). */
+#define EP_CMD_ERDY_SID_MASK GENMASK(31, 16)
+#define EP_CMD_ERDY_SID(p) (((p) << 16) & EP_CMD_ERDY_SID_MASK)
+
+/* EP_STS - bitmasks */
+/* Setup transfer complete. */
+#define EP_STS_SETUP BIT(0)
+/* Endpoint STALL status. */
+#define EP_STS_STALL(p) ((p) & BIT(1))
+/* Interrupt On Complete. */
+#define EP_STS_IOC BIT(2)
+/* Interrupt on Short Packet. */
+#define EP_STS_ISP BIT(3)
+/* Transfer descriptor missing. */
+#define EP_STS_DESCMIS BIT(4)
+/* Stream Rejected (used only in SS mode) */
+#define EP_STS_STREAMR BIT(5)
+/* EXIT from MOVE DATA State (used only for stream transfers in SS mode). */
+#define EP_STS_MD_EXIT BIT(6)
+/* TRB error. */
+#define EP_STS_TRBERR BIT(7)
+/* Not ready (used only in SS mode). */
+#define EP_STS_NRDY BIT(8)
+/* DMA busy bit. */
+#define EP_STS_DBUSY BIT(9)
+/* Endpoint Buffer Empty */
+#define EP_STS_BUFFEMPTY(p) ((p) & BIT(10))
+/* Current Cycle Status */
+#define EP_STS_CCS(p) ((p) & BIT(11))
+/* Prime (used only in SS mode. */
+#define EP_STS_PRIME BIT(12)
+/* Stream error (used only in SS mode). */
+#define EP_STS_SIDERR BIT(13)
+/* OUT size mismatch. */
+#define EP_STS_OUTSMM BIT(14)
+/* ISO transmission error. */
+#define EP_STS_ISOERR BIT(15)
+/* Host Packet Pending (only for SS mode). */
+#define EP_STS_HOSTPP(p) ((p) & BIT(16))
+/* Stream Protocol State Machine State (only for Bulk stream endpoints). */
+#define EP_STS_SPSMST_MASK GENMASK(18, 17)
+#define EP_STS_SPSMST_DISABLED(p) (((p) & EP_STS_SPSMST_MASK) >> 17)
+#define EP_STS_SPSMST_IDLE(p) (((p) & EP_STS_SPSMST_MASK) >> 17)
+#define EP_STS_SPSMST_START_STREAM(p) (((p) & EP_STS_SPSMST_MASK) >> 17)
+#define EP_STS_SPSMST_MOVE_DATA(p) (((p) & EP_STS_SPSMST_MASK) >> 17)
+/* Interrupt On Transfer complete. */
+#define EP_STS_IOT BIT(19)
+/* OUT queue endpoint number. */
+#define EP_STS_OUTQ_NO_MASK GENMASK(27, 24)
+#define EP_STS_OUTQ_NO(p) (((p) & EP_STS_OUTQ_NO_MASK) >> 24)
+/* OUT queue valid flag. */
+#define EP_STS_OUTQ_VAL_MASK BIT(28)
+#define EP_STS_OUTQ_VAL(p) ((p) & EP_STS_OUTQ_VAL_MASK)
+/* SETUP WAIT. */
+#define EP_STS_STPWAIT BIT(31)
+
+/* EP_STS_SID - bitmasks */
+/* Stream ID (used only in SS mode). */
+#define EP_STS_SID_MASK GENMASK(15, 0)
+#define EP_STS_SID(p) ((p) & EP_STS_SID_MASK)
+
+/* EP_STS_EN - bitmasks */
+/* SETUP interrupt enable. */
+#define EP_STS_EN_SETUPEN BIT(0)
+/* OUT transfer missing descriptor enable. */
+#define EP_STS_EN_DESCMISEN BIT(4)
+/* Stream Rejected enable. */
+#define EP_STS_EN_STREAMREN BIT(5)
+/* Move Data Exit enable.*/
+#define EP_STS_EN_MD_EXITEN BIT(6)
+/* TRB enable. */
+#define EP_STS_EN_TRBERREN BIT(7)
+/* NRDY enable. */
+#define EP_STS_EN_NRDYEN BIT(8)
+/* Prime enable. */
+#define EP_STS_EN_PRIMEEEN BIT(12)
+/* Stream error enable. */
+#define EP_STS_EN_SIDERREN BIT(13)
+/* OUT size mismatch enable. */
+#define EP_STS_EN_OUTSMMEN BIT(14)
+/* ISO transmission error enable. */
+#define EP_STS_EN_ISOERREN BIT(15)
+/* Interrupt on Transmission complete enable. */
+#define EP_STS_EN_IOTEN BIT(19)
+/* Setup Wait interrupt enable. */
+#define EP_STS_EN_STPWAITEN BIT(31)
+
+/* DRBL- bitmasks */
+#define DB_VALUE_BY_INDEX(index) (1 << (index))
+#define DB_VALUE_EP0_OUT BIT(0)
+#define DB_VALUE_EP0_IN BIT(16)
+
+/* EP_IEN - bitmasks */
+#define EP_IEN(index) (1 << (index))
+#define EP_IEN_EP_OUT0 BIT(0)
+#define EP_IEN_EP_IN0 BIT(16)
+
+/* EP_ISTS - bitmasks */
+#define EP_ISTS(index) (1 << (index))
+#define EP_ISTS_EP_OUT0 BIT(0)
+#define EP_ISTS_EP_IN0 BIT(16)
+
+/* USB_PWR- bitmasks */
+/*Power Shut Off capability enable*/
+#define PUSB_PWR_PSO_EN BIT(0)
+/*Power Shut Off capability disable*/
+#define PUSB_PWR_PSO_DS BIT(1)
+/*
+ * Enables turning-off Reference Clock.
+ * This bit is optional and implemented only when support for OTG is
+ * implemented (indicated by OTG_READY bit set to '1').
+ */
+#define PUSB_PWR_STB_CLK_SWITCH_EN BIT(8)
+/*
+ * Status bit indicating that operation required by STB_CLK_SWITCH_EN write
+ * is completed
+ */
+#define PUSB_PWR_STB_CLK_SWITCH_DONE BIT(9)
+/* This bit informs if Fast Registers Access is enabled. */
+#define PUSB_PWR_FST_REG_ACCESS_STAT BIT(30)
+/* Fast Registers Access Enable. */
+#define PUSB_PWR_FST_REG_ACCESS BIT(31)
+
+/* USB_CONF2- bitmasks */
+/*
+ * Writing 1 disables TDL calculation basing on TRB feature in controller
+ * for DMULT mode.
+ * Bit supported only for DEV_VER_V2 version.
+ */
+#define USB_CONF2_DIS_TDL_TRB BIT(1)
+/*
+ * Writing 1 enables TDL calculation basing on TRB feature in controller
+ * for DMULT mode.
+ * Bit supported only for DEV_VER_V2 version.
+ */
+#define USB_CONF2_EN_TDL_TRB BIT(2)
+
+/* USB_CAP1- bitmasks */
+/*
+ * SFR Interface type
+ * These field reflects type of SFR interface implemented:
+ * 0x0 - OCP
+ * 0x1 - AHB,
+ * 0x2 - PLB
+ * 0x3 - AXI
+ * 0x4-0xF - reserved
+ */
+#define USB_CAP1_SFR_TYPE_MASK GENMASK(3, 0)
+#define DEV_SFR_TYPE_OCP(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x0)
+#define DEV_SFR_TYPE_AHB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x1)
+#define DEV_SFR_TYPE_PLB(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x2)
+#define DEV_SFR_TYPE_AXI(p) (((p) & USB_CAP1_SFR_TYPE_MASK) == 0x3)
+/*
+ * SFR Interface width
+ * These field reflects width of SFR interface implemented:
+ * 0x0 - 8 bit interface,
+ * 0x1 - 16 bit interface,
+ * 0x2 - 32 bit interface
+ * 0x3 - 64 bit interface
+ * 0x4-0xF - reserved
+ */
+#define USB_CAP1_SFR_WIDTH_MASK GENMASK(7, 4)
+#define DEV_SFR_WIDTH_8(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x0 << 4))
+#define DEV_SFR_WIDTH_16(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x1 << 4))
+#define DEV_SFR_WIDTH_32(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x2 << 4))
+#define DEV_SFR_WIDTH_64(p) (((p) & USB_CAP1_SFR_WIDTH_MASK) == (0x3 << 4))
+/*
+ * DMA Interface type
+ * These field reflects type of DMA interface implemented:
+ * 0x0 - OCP
+ * 0x1 - AHB,
+ * 0x2 - PLB
+ * 0x3 - AXI
+ * 0x4-0xF - reserved
+ */
+#define USB_CAP1_DMA_TYPE_MASK GENMASK(11, 8)
+#define DEV_DMA_TYPE_OCP(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x0 << 8))
+#define DEV_DMA_TYPE_AHB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x1 << 8))
+#define DEV_DMA_TYPE_PLB(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x2 << 8))
+#define DEV_DMA_TYPE_AXI(p) (((p) & USB_CAP1_DMA_TYPE_MASK) == (0x3 << 8))
+/*
+ * DMA Interface width
+ * These field reflects width of DMA interface implemented:
+ * 0x0 - reserved,
+ * 0x1 - reserved,
+ * 0x2 - 32 bit interface
+ * 0x3 - 64 bit interface
+ * 0x4-0xF - reserved
+ */
+#define USB_CAP1_DMA_WIDTH_MASK GENMASK(15, 12)
+#define DEV_DMA_WIDTH_32(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x2 << 12))
+#define DEV_DMA_WIDTH_64(p) (((p) & USB_CAP1_DMA_WIDTH_MASK) == (0x3 << 12))
+/*
+ * USB3 PHY Interface type
+ * These field reflects type of USB3 PHY interface implemented:
+ * 0x0 - USB PIPE,
+ * 0x1 - RMMI,
+ * 0x2-0xF - reserved
+ */
+#define USB_CAP1_U3PHY_TYPE_MASK GENMASK(19, 16)
+#define DEV_U3PHY_PIPE(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x0 << 16))
+#define DEV_U3PHY_RMMI(p) (((p) & USB_CAP1_U3PHY_TYPE_MASK) == (0x1 << 16))
+/*
+ * USB3 PHY Interface width
+ * These field reflects width of USB3 PHY interface implemented:
+ * 0x0 - 8 bit PIPE interface,
+ * 0x1 - 16 bit PIPE interface,
+ * 0x2 - 32 bit PIPE interface,
+ * 0x3 - 64 bit PIPE interface
+ * 0x4-0xF - reserved
+ * Note: When SSIC interface is implemented this field shows the width of
+ * internal PIPE interface. The RMMI interface is always 20bit wide.
+ */
+#define USB_CAP1_U3PHY_WIDTH_MASK GENMASK(23, 20)
+#define DEV_U3PHY_WIDTH_8(p) \
+ (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x0 << 20))
+#define DEV_U3PHY_WIDTH_16(p) \
+ (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x1 << 16))
+#define DEV_U3PHY_WIDTH_32(p) \
+ (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x2 << 20))
+#define DEV_U3PHY_WIDTH_64(p) \
+ (((p) & USB_CAP1_U3PHY_WIDTH_MASK) == (0x3 << 16))
+
+/*
+ * USB2 PHY Interface enable
+ * These field informs if USB2 PHY interface is implemented:
+ * 0x0 - interface NOT implemented,
+ * 0x1 - interface implemented
+ */
+#define USB_CAP1_U2PHY_EN(p) ((p) & BIT(24))
+/*
+ * USB2 PHY Interface type
+ * These field reflects type of USB2 PHY interface implemented:
+ * 0x0 - UTMI,
+ * 0x1 - ULPI
+ */
+#define DEV_U2PHY_ULPI(p) ((p) & BIT(25))
+/*
+ * USB2 PHY Interface width
+ * These field reflects width of USB2 PHY interface implemented:
+ * 0x0 - 8 bit interface,
+ * 0x1 - 16 bit interface,
+ * Note: The ULPI interface is always 8bit wide.
+ */
+#define DEV_U2PHY_WIDTH_16(p) ((p) & BIT(26))
+/*
+ * OTG Ready
+ * 0x0 - pure device mode
+ * 0x1 - some features and ports for CDNS USB OTG controller are implemented.
+ */
+#define USB_CAP1_OTG_READY(p) ((p) & BIT(27))
+
+/*
+ * When set, indicates that controller supports automatic internal TDL
+ * calculation basing on the size provided in TRB (TRB[22:17]) for DMULT mode
+ * Supported only for DEV_VER_V2 controller version.
+ */
+#define USB_CAP1_TDL_FROM_TRB(p) ((p) & BIT(28))
+
+/* USB_CAP2- bitmasks */
+/*
+ * The actual size of the connected On-chip RAM memory in kB:
+ * - 0 means 256 kB (max supported mem size)
+ * - value other than 0 reflects the mem size in kB
+ */
+#define USB_CAP2_ACTUAL_MEM_SIZE(p) ((p) & GENMASK(7, 0))
+/*
+ * Max supported mem size
+ * These field reflects width of on-chip RAM address bus width,
+ * which determines max supported mem size:
+ * 0x0-0x7 - reserved,
+ * 0x8 - support for 4kB mem,
+ * 0x9 - support for 8kB mem,
+ * 0xA - support for 16kB mem,
+ * 0xB - support for 32kB mem,
+ * 0xC - support for 64kB mem,
+ * 0xD - support for 128kB mem,
+ * 0xE - support for 256kB mem,
+ * 0xF - reserved
+ */
+#define USB_CAP2_MAX_MEM_SIZE(p) ((p) & GENMASK(11, 8))
+
+/* USB_CAP3- bitmasks */
+#define EP_IS_IMPLEMENTED(reg, index) ((reg) & (1 << (index)))
+
+/* USB_CAP4- bitmasks */
+#define EP_SUPPORT_ISO(reg, index) ((reg) & (1 << (index)))
+
+/* USB_CAP5- bitmasks */
+#define EP_SUPPORT_STREAM(reg, index) ((reg) & (1 << (index)))
+
+/* USB_CAP6- bitmasks */
+/* The USBSS-DEV Controller Internal build number. */
+#define GET_DEV_BASE_VERSION(p) ((p) & GENMASK(23, 0))
+/* The USBSS-DEV Controller version number. */
+#define GET_DEV_CUSTOM_VERSION(p) ((p) & GENMASK(31, 24))
+
+#define DEV_VER_NXP_V1 0x00024502
+#define DEV_VER_TI_V1 0x00024509
+#define DEV_VER_V2 0x0002450C
+#define DEV_VER_V3 0x0002450d
+
+/* DBG_LINK1- bitmasks */
+/*
+ * LFPS_MIN_DET_U1_EXIT value This parameter configures the minimum
+ * time required for decoding the received LFPS as an LFPS.U1_Exit.
+ */
+#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT(p) ((p) & GENMASK(7, 0))
+/*
+ * LFPS_MIN_GEN_U1_EXIT value This parameter configures the minimum time for
+ * phytxelecidle deassertion when LFPS.U1_Exit
+ */
+#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK GENMASK(15, 8)
+#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(p) (((p) << 8) & GENMASK(15, 8))
+/*
+ * RXDET_BREAK_DIS value This parameter configures terminating the Far-end
+ * Receiver termination detection sequence:
+ * 0: it is possible that USBSS_DEV will terminate Farend receiver
+ * termination detection sequence
+ * 1: USBSS_DEV will not terminate Far-end receiver termination
+ * detection sequence
+ */
+#define DBG_LINK1_RXDET_BREAK_DIS BIT(16)
+/* LFPS_GEN_PING value This parameter configures the LFPS.Ping generation */
+#define DBG_LINK1_LFPS_GEN_PING(p) (((p) << 17) & GENMASK(21, 17))
+/*
+ * Set the LFPS_MIN_DET_U1_EXIT value Writing '1' to this bit writes the
+ * LFPS_MIN_DET_U1_EXIT field value to the device. This bit is automatically
+ * cleared. Writing '0' has no effect
+ */
+#define DBG_LINK1_LFPS_MIN_DET_U1_EXIT_SET BIT(24)
+/*
+ * Set the LFPS_MIN_GEN_U1_EXIT value. Writing '1' to this bit writes the
+ * LFPS_MIN_GEN_U1_EXIT field value to the device. This bit is automatically
+ * cleared. Writing '0' has no effect
+ */
+#define DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET BIT(25)
+/*
+ * Set the RXDET_BREAK_DIS value Writing '1' to this bit writes
+ * the RXDET_BREAK_DIS field value to the device. This bit is automatically
+ * cleared. Writing '0' has no effect
+ */
+#define DBG_LINK1_RXDET_BREAK_DIS_SET BIT(26)
+/*
+ * Set the LFPS_GEN_PING_SET value Writing '1' to this bit writes
+ * the LFPS_GEN_PING field value to the device. This bit is automatically
+ * cleared. Writing '0' has no effect."
+ */
+#define DBG_LINK1_LFPS_GEN_PING_SET BIT(27)
+
+/* DMA_AXI_CTRL- bitmasks */
+/* The mawprot pin configuration. */
+#define DMA_AXI_CTRL_MARPROT(p) ((p) & GENMASK(2, 0))
+/* The marprot pin configuration. */
+#define DMA_AXI_CTRL_MAWPROT(p) (((p) & GENMASK(2, 0)) << 16)
+#define DMA_AXI_CTRL_NON_SECURE 0x02
+
+#define gadget_to_cdns3_device(g) (container_of(g, struct cdns3_device, gadget))
+
+#define ep_to_cdns3_ep(ep) (container_of(ep, struct cdns3_endpoint, endpoint))
+
+/*-------------------------------------------------------------------------*/
+/*
+ * USBSS-DEV DMA interface.
+ */
+#define TRBS_PER_SEGMENT 40
+
+#define ISO_MAX_INTERVAL 10
+
+#if TRBS_PER_SEGMENT < 2
+#error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2."
+#endif
+
+/*
+ *Only for ISOC endpoints - maximum number of TRBs is calculated as
+ * pow(2, bInterval-1) * number of usb requests. It is limitation made by
+ * driver to save memory. Controller must prepare TRB for each ITP even
+ * if bInterval > 1. It's the reason why driver needs so many TRBs for
+ * isochronous endpoints.
+ */
+#define TRBS_PER_ISOC_SEGMENT (ISO_MAX_INTERVAL * 8)
+
+#define GET_TRBS_PER_SEGMENT(ep_type) ((ep_type) == USB_ENDPOINT_XFER_ISOC ? \
+ TRBS_PER_ISOC_SEGMENT : TRBS_PER_SEGMENT)
+/**
+ * struct cdns3_trb - represent Transfer Descriptor block.
+ * @buffer: pointer to buffer data
+ * @length: length of data
+ * @control: control flags.
+ *
+ * This structure describes transfer block serviced by DMA module.
+ */
+struct cdns3_trb {
+ __le32 buffer;
+ __le32 length;
+ __le32 control;
+};
+
+#define TRB_SIZE (sizeof(struct cdns3_trb))
+#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
+#define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT)
+#define TRB_CTRL_RING_SIZE (TRB_SIZE * 2)
+
+/* TRB bit mask */
+#define TRB_TYPE_BITMASK GENMASK(15, 10)
+#define TRB_TYPE(p) ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
+
+/* TRB type IDs */
+/* bulk, interrupt, isoc , and control data stage */
+#define TRB_NORMAL 1
+/* TRB for linking ring segments */
+#define TRB_LINK 6
+
+/* Cycle bit - indicates TRB ownership by driver or hw*/
+#define TRB_CYCLE BIT(0)
+/*
+ * When set to '1', the device will toggle its interpretation of the Cycle bit
+ */
+#define TRB_TOGGLE BIT(1)
+
+/*
+ * Short Packet (SP). OUT EPs at DMULT=1 only. Indicates if the TRB was
+ * processed while USB short packet was received. No more buffers defined by
+ * the TD will be used. DMA will automatically advance to next TD.
+ * - Shall be set to 0 by Software when putting TRB on the Transfer Ring
+ * - Shall be set to 1 by Controller when Short Packet condition for this TRB
+ * is detected independent if ISP is set or not.
+ */
+#define TRB_SP BIT(1)
+
+/* Interrupt on short packet*/
+#define TRB_ISP BIT(2)
+/*Setting this bit enables FIFO DMA operation mode*/
+#define TRB_FIFO_MODE BIT(3)
+/* Set PCIe no snoop attribute */
+#define TRB_CHAIN BIT(4)
+/* Interrupt on completion */
+#define TRB_IOC BIT(5)
+
+/* stream ID bitmasks. */
+#define TRB_STREAM_ID_BITMASK GENMASK(31, 16)
+#define TRB_STREAM_ID(p) ((p) << 16)
+#define TRB_FIELD_TO_STREAMID(p) (((p) & TRB_STREAM_ID_BITMASK) >> 16)
+
+/* Size of TD expressed in USB packets for HS/FS mode. */
+#define TRB_TDL_HS_SIZE(p) (((p) << 16) & GENMASK(31, 16))
+#define TRB_TDL_HS_SIZE_GET(p) (((p) & GENMASK(31, 16)) >> 16)
+
+/* transfer_len bitmasks. */
+#define TRB_LEN(p) ((p) & GENMASK(16, 0))
+
+/* Size of TD expressed in USB packets for SS mode. */
+#define TRB_TDL_SS_SIZE(p) (((p) << 17) & GENMASK(23, 17))
+#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17)
+
+/* transfer_len bitmasks - bits 31:24 */
+#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24))
+#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24)
+
+/* Data buffer pointer bitmasks*/
+#define TRB_BUFFER(p) ((p) & GENMASK(31, 0))
+
+/*-------------------------------------------------------------------------*/
+/* Driver numeric constants */
+
+/* Such declaration should be added to ch9.h */
+#define USB_DEVICE_MAX_ADDRESS 127
+
+/* Endpoint init values */
+#define CDNS3_EP_MAX_PACKET_LIMIT 1024
+#define CDNS3_EP_MAX_STREAMS 15
+#define CDNS3_EP0_MAX_PACKET_LIMIT 512
+
+/* All endpoints including EP0 */
+#define CDNS3_ENDPOINTS_MAX_COUNT 32
+#define CDNS3_EP_ZLP_BUF_SIZE 1024
+
+#define CDNS3_EP_BUF_SIZE 2 /* KB */
+#define CDNS3_EP_ISO_HS_MULT 3
+#define CDNS3_EP_ISO_SS_BURST 3
+#define CDNS3_MAX_NUM_DESCMISS_BUF 32
+#define CDNS3_DESCMIS_BUF_SIZE 2048 /* Bytes */
+#define CDNS3_WA2_NUM_BUFFERS 128
+/*-------------------------------------------------------------------------*/
+/* Used structs */
+
+struct cdns3_device;
+
+/**
+ * struct cdns3_endpoint - extended device side representation of USB endpoint.
+ * @endpoint: usb endpoint
+ * @pending_req_list: list of requests queuing on transfer ring.
+ * @deferred_req_list: list of requests waiting for queuing on transfer ring.
+ * @wa2_descmiss_req_list: list of requests internally allocated by driver.
+ * @trb_pool: transfer ring - array of transaction buffers
+ * @trb_pool_dma: dma address of transfer ring
+ * @cdns3_dev: device associated with this endpoint
+ * @name: a human readable name e.g. ep1out
+ * @flags: specify the current state of endpoint
+ * @descmis_req: internal transfer object used for getting data from on-chip
+ * buffer. It can happen only if function driver doesn't send usb_request
+ * object on time.
+ * @dir: endpoint direction
+ * @num: endpoint number (1 - 15)
+ * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
+ * @interval: interval between packets used for ISOC endpoint.
+ * @free_trbs: number of free TRBs in transfer ring
+ * @num_trbs: number of all TRBs in transfer ring
+ * @pcs: producer cycle state
+ * @ccs: consumer cycle state
+ * @enqueue: enqueue index in transfer ring
+ * @dequeue: dequeue index in transfer ring
+ * @trb_burst_size: number of burst used in trb.
+ */
+struct cdns3_endpoint {
+ struct usb_ep endpoint;
+ struct list_head pending_req_list;
+ struct list_head deferred_req_list;
+ struct list_head wa2_descmiss_req_list;
+ int wa2_counter;
+
+ struct cdns3_trb *trb_pool;
+ dma_addr_t trb_pool_dma;
+
+ struct cdns3_device *cdns3_dev;
+ char name[20];
+
+#define EP_ENABLED BIT(0)
+#define EP_STALLED BIT(1)
+#define EP_STALL_PENDING BIT(2)
+#define EP_WEDGE BIT(3)
+#define EP_TRANSFER_STARTED BIT(4)
+#define EP_UPDATE_EP_TRBADDR BIT(5)
+#define EP_PENDING_REQUEST BIT(6)
+#define EP_RING_FULL BIT(7)
+#define EP_CLAIMED BIT(8)
+#define EP_DEFERRED_DRDY BIT(9)
+#define EP_QUIRK_ISO_OUT_EN BIT(10)
+#define EP_QUIRK_END_TRANSFER BIT(11)
+#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
+#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
+ u32 flags;
+
+ struct cdns3_request *descmis_req;
+
+ u8 dir;
+ u8 num;
+ u8 type;
+ int interval;
+
+ int free_trbs;
+ int num_trbs;
+ u8 pcs;
+ u8 ccs;
+ int enqueue;
+ int dequeue;
+ u8 trb_burst_size;
+
+ unsigned int wa1_set:1;
+ struct cdns3_trb *wa1_trb;
+ unsigned int wa1_trb_index;
+ unsigned int wa1_cycle_bit:1;
+};
+
+/**
+ * struct cdns3_aligned_buf - represent aligned buffer used for DMA transfer
+ * @buf: aligned to 8 bytes data buffer. Buffer address used in
+ * TRB shall be aligned to 8.
+ * @dma: dma address
+ * @size: size of buffer
+ * @in_use: inform if this buffer is associated with usb_request
+ * @list: used to adding instance of this object to list
+ */
+struct cdns3_aligned_buf {
+ void *buf;
+ dma_addr_t dma;
+ u32 size;
+ int in_use:1;
+ struct list_head list;
+};
+
+/**
+ * struct cdns3_request - extended device side representation of usb_request
+ * object .
+ * @request: generic usb_request object describing single I/O request.
+ * @priv_ep: extended representation of usb_ep object
+ * @trb: the first TRB association with this request
+ * @start_trb: number of the first TRB in transfer ring
+ * @end_trb: number of the last TRB in transfer ring
+ * @aligned_buf: object holds information about aligned buffer associated whit
+ * this endpoint
+ * @flags: flag specifying special usage of request
+ * @list: used by internally allocated request to add to wa2_descmiss_req_list.
+ */
+struct cdns3_request {
+ struct usb_request request;
+ struct cdns3_endpoint *priv_ep;
+ struct cdns3_trb *trb;
+ int start_trb;
+ int end_trb;
+ struct cdns3_aligned_buf *aligned_buf;
+#define REQUEST_PENDING BIT(0)
+#define REQUEST_INTERNAL BIT(1)
+#define REQUEST_INTERNAL_CH BIT(2)
+#define REQUEST_ZLP BIT(3)
+#define REQUEST_UNALIGNED BIT(4)
+ u32 flags;
+ struct list_head list;
+};
+
+#define to_cdns3_request(r) (container_of(r, struct cdns3_request, request))
+
+/*Stages used during enumeration process.*/
+#define CDNS3_SETUP_STAGE 0x0
+#define CDNS3_DATA_STAGE 0x1
+#define CDNS3_STATUS_STAGE 0x2
+
+/**
+ * struct cdns3_device - represent USB device.
+ * @dev: pointer to device structure associated whit this controller
+ * @sysdev: pointer to the DMA capable device
+ * @gadget: device side representation of the peripheral controller
+ * @gadget_driver: pointer to the gadget driver
+ * @dev_ver: device controller version.
+ * @lock: for synchronizing
+ * @regs: base address for device side registers
+ * @setup_buf: used while processing usb control requests
+ * @setup_dma: dma address for setup_buf
+ * @zlp_buf - zlp buffer
+ * @ep0_stage: ep0 stage during enumeration process.
+ * @ep0_data_dir: direction for control transfer
+ * @eps: array of pointers to all endpoints with exclusion ep0
+ * @aligned_buf_list: list of aligned buffers internally allocated by driver
+ * @aligned_buf_wq: workqueue freeing no longer used aligned buf.
+ * @selected_ep: actually selected endpoint. It's used only to improve
+ * performance.
+ * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP.
+ * @u1_allowed: allow device transition to u1 state
+ * @u2_allowed: allow device transition to u2 state
+ * @is_selfpowered: device is self powered
+ * @setup_pending: setup packet is processing by gadget driver
+ * @hw_configured_flag: hardware endpoint configuration was set.
+ * @wake_up_flag: allow device to remote up the host
+ * @status_completion_no_call: indicate that driver is waiting for status s
+ * stage completion. It's used in deferred SET_CONFIGURATION request.
+ * @onchip_buffers: number of available on-chip buffers.
+ * @onchip_used_size: actual size of on-chip memory assigned to endpoints.
+ * @pending_status_wq: workqueue handling status stage for deferred requests.
+ * @pending_status_request: request for which status stage was deferred
+ */
+struct cdns3_device {
+ struct device *dev;
+ struct device *sysdev;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+
+#define CDNS_REVISION_V0 0x00024501
+#define CDNS_REVISION_V1 0x00024509
+ u32 dev_ver;
+
+ /* generic spin-lock for drivers */
+ spinlock_t lock;
+
+ struct cdns3_usb_regs __iomem *regs;
+
+ struct usb_ctrlrequest *setup_buf;
+ dma_addr_t setup_dma;
+ void *zlp_buf;
+
+ u8 ep0_stage;
+ int ep0_data_dir;
+
+ struct cdns3_endpoint *eps[CDNS3_ENDPOINTS_MAX_COUNT];
+
+ struct list_head aligned_buf_list;
+ struct work_struct aligned_buf_wq;
+
+ u32 selected_ep;
+ u16 isoch_delay;
+
+ unsigned wait_for_setup:1;
+ unsigned u1_allowed:1;
+ unsigned u2_allowed:1;
+ unsigned is_selfpowered:1;
+ unsigned setup_pending:1;
+ int hw_configured_flag:1;
+ int wake_up_flag:1;
+ unsigned status_completion_no_call:1;
+ int out_mem_is_allocated;
+
+ struct work_struct pending_status_wq;
+ struct usb_request *pending_status_request;
+
+ /*in KB */
+ u16 onchip_buffers;
+ u16 onchip_used_size;
+};
+
+void cdns3_set_register_bit(void __iomem *ptr, u32 mask);
+dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
+ struct cdns3_trb *trb);
+enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev);
+void cdns3_pending_setup_status_handler(struct work_struct *work);
+void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev);
+void cdns3_set_hw_configuration(struct cdns3_device *priv_dev);
+void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep);
+void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable);
+struct usb_request *cdns3_next_request(struct list_head *list);
+int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ struct usb_request *request);
+void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm);
+int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep);
+u8 cdns3_ep_addr_to_index(u8 ep_addr);
+int cdns3_gadget_ep_set_wedge(struct usb_ep *ep);
+int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value);
+void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep);
+int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep);
+struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags);
+void cdns3_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request);
+int cdns3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request);
+void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req,
+ int status);
+
+int cdns3_init_ep0(struct cdns3_device *priv_dev,
+ struct cdns3_endpoint *priv_ep);
+void cdns3_ep0_config(struct cdns3_device *priv_dev);
+void cdns3_ep_config(struct cdns3_endpoint *priv_ep);
+void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir);
+int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev);
+
+#endif /* __LINUX_CDNS3_GADGET */
diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h
new file mode 100644
index 000000000000..b498a170b7e8
--- /dev/null
+++ b/drivers/usb/cdns3/host-export.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence USBSS DRD Driver - Host Export APIs
+ *
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Authors: Peter Chen <peter.chen@nxp.com>
+ */
+#ifndef __LINUX_CDNS3_HOST_EXPORT
+#define __LINUX_CDNS3_HOST_EXPORT
+
+#ifdef CONFIG_USB_CDNS3_HOST
+
+int cdns3_host_init(struct cdns3 *cdns);
+void cdns3_host_exit(struct cdns3 *cdns);
+
+#else
+
+static inline int cdns3_host_init(struct cdns3 *cdns)
+{
+ return -ENXIO;
+}
+
+static inline void cdns3_host_exit(struct cdns3 *cdns) { }
+
+#endif /* CONFIG_USB_CDNS3_HOST */
+
+#endif /* __LINUX_CDNS3_HOST_EXPORT */
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
new file mode 100644
index 000000000000..2733a8f71fcd
--- /dev/null
+++ b/drivers/usb/cdns3/host.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver - host side
+ *
+ * Copyright (C) 2018-2019 Cadence Design Systems.
+ * Copyright (C) 2017-2018 NXP
+ *
+ * Authors: Peter Chen <peter.chen@nxp.com>
+ * Pawel Laszczak <pawell@cadence.com>
+ */
+
+#include <linux/platform_device.h>
+#include "core.h"
+#include "drd.h"
+
+static int __cdns3_host_init(struct cdns3 *cdns)
+{
+ struct platform_device *xhci;
+ int ret;
+
+ cdns3_drd_switch_host(cdns, 1);
+
+ xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
+ if (!xhci) {
+ dev_err(cdns->dev, "couldn't allocate xHCI device\n");
+ return -ENOMEM;
+ }
+
+ xhci->dev.parent = cdns->dev;
+ cdns->host_dev = xhci;
+
+ ret = platform_device_add_resources(xhci, cdns->xhci_res,
+ CDNS3_XHCI_RESOURCES_NUM);
+ if (ret) {
+ dev_err(cdns->dev, "couldn't add resources to xHCI device\n");
+ goto err1;
+ }
+
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(cdns->dev, "failed to register xHCI device\n");
+ goto err1;
+ }
+
+ return 0;
+err1:
+ platform_device_put(xhci);
+ return ret;
+}
+
+static void cdns3_host_exit(struct cdns3 *cdns)
+{
+ platform_device_unregister(cdns->host_dev);
+ cdns->host_dev = NULL;
+ cdns3_drd_switch_host(cdns, 0);
+}
+
+int cdns3_host_init(struct cdns3 *cdns)
+{
+ struct cdns3_role_driver *rdrv;
+
+ rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = __cdns3_host_init;
+ rdrv->stop = cdns3_host_exit;
+ rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->name = "host";
+
+ cdns->roles[USB_ROLE_HOST] = rdrv;
+
+ return 0;
+}
diff --git a/drivers/usb/cdns3/trace.c b/drivers/usb/cdns3/trace.c
new file mode 100644
index 000000000000..459fa72d9c74
--- /dev/null
+++ b/drivers/usb/cdns3/trace.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USBSS device controller driver Trace Support
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
new file mode 100644
index 000000000000..e92348c9b4d7
--- /dev/null
+++ b/drivers/usb/cdns3/trace.h
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USBSS device controller driver.
+ * Trace support header file.
+ *
+ * Copyright (C) 2018-2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cdns3
+
+#if !defined(__LINUX_CDNS3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __LINUX_CDNS3_TRACE
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/ch9.h>
+#include "core.h"
+#include "gadget.h"
+#include "debug.h"
+
+#define CDNS3_MSG_MAX 500
+
+TRACE_EVENT(cdns3_halt,
+ TP_PROTO(struct cdns3_endpoint *ep_priv, u8 halt, u8 flush),
+ TP_ARGS(ep_priv, halt, flush),
+ TP_STRUCT__entry(
+ __string(name, ep_priv->name)
+ __field(u8, halt)
+ __field(u8, flush)
+ ),
+ TP_fast_assign(
+ __assign_str(name, ep_priv->name);
+ __entry->halt = halt;
+ __entry->flush = flush;
+ ),
+ TP_printk("Halt %s for %s: %s", __entry->flush ? " and flush" : "",
+ __get_str(name), __entry->halt ? "set" : "cleared")
+);
+
+TRACE_EVENT(cdns3_wa1,
+ TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg),
+ TP_ARGS(ep_priv, msg),
+ TP_STRUCT__entry(
+ __string(ep_name, ep_priv->name)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, ep_priv->name);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("WA1: %s %s", __get_str(ep_name), __get_str(msg))
+);
+
+TRACE_EVENT(cdns3_wa2,
+ TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg),
+ TP_ARGS(ep_priv, msg),
+ TP_STRUCT__entry(
+ __string(ep_name, ep_priv->name)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, ep_priv->name);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("WA2: %s %s", __get_str(ep_name), __get_str(msg))
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_doorbell,
+ TP_PROTO(const char *ep_name, u32 ep_trbaddr),
+ TP_ARGS(ep_name, ep_trbaddr),
+ TP_STRUCT__entry(
+ __string(name, ep_name)
+ __field(u32, ep_trbaddr)
+ ),
+ TP_fast_assign(
+ __assign_str(name, ep_name);
+ __entry->ep_trbaddr = ep_trbaddr;
+ ),
+ TP_printk("%s, ep_trbaddr %08x", __get_str(name),
+ __entry->ep_trbaddr)
+);
+
+DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_ep0,
+ TP_PROTO(const char *ep_name, u32 ep_trbaddr),
+ TP_ARGS(ep_name, ep_trbaddr)
+);
+
+DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_epx,
+ TP_PROTO(const char *ep_name, u32 ep_trbaddr),
+ TP_ARGS(ep_name, ep_trbaddr)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_usb_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists),
+ TP_ARGS(priv_dev, usb_ists),
+ TP_STRUCT__entry(
+ __field(enum usb_device_speed, speed)
+ __field(u32, usb_ists)
+ __dynamic_array(char, str, CDNS3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->speed = cdns3_get_speed(priv_dev);
+ __entry->usb_ists = usb_ists;
+ ),
+ TP_printk("%s", cdns3_decode_usb_irq(__get_str(str), __entry->speed,
+ __entry->usb_ists))
+);
+
+DEFINE_EVENT(cdns3_log_usb_irq, cdns3_usb_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists),
+ TP_ARGS(priv_dev, usb_ists)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_epx_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_dev, priv_ep),
+ TP_STRUCT__entry(
+ __string(ep_name, priv_ep->name)
+ __field(u32, ep_sts)
+ __field(u32, ep_traddr)
+ __dynamic_array(char, str, CDNS3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(ep_name, priv_ep->name);
+ __entry->ep_sts = readl(&priv_dev->regs->ep_sts);
+ __entry->ep_traddr = readl(&priv_dev->regs->ep_traddr);
+ ),
+ TP_printk("%s, ep_traddr: %08x",
+ cdns3_decode_epx_irq(__get_str(str),
+ __get_str(ep_name),
+ __entry->ep_sts),
+ __entry->ep_traddr)
+);
+
+DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_dev, priv_ep)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts),
+ TP_ARGS(priv_dev, ep_sts),
+ TP_STRUCT__entry(
+ __field(int, ep_dir)
+ __field(u32, ep_sts)
+ __dynamic_array(char, str, CDNS3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->ep_dir = priv_dev->ep0_data_dir;
+ __entry->ep_sts = ep_sts;
+ ),
+ TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
+ __entry->ep_dir,
+ __entry->ep_sts))
+);
+
+DEFINE_EVENT(cdns3_log_ep0_irq, cdns3_ep0_irq,
+ TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts),
+ TP_ARGS(priv_dev, ep_sts)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_ctrl,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl),
+ TP_STRUCT__entry(
+ __field(u8, bRequestType)
+ __field(u8, bRequest)
+ __field(u16, wValue)
+ __field(u16, wIndex)
+ __field(u16, wLength)
+ __dynamic_array(char, str, CDNS3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = ctrl->bRequestType;
+ __entry->bRequest = ctrl->bRequest;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
+ ),
+ TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNS3_MSG_MAX,
+ __entry->bRequestType,
+ __entry->bRequest, __entry->wValue,
+ __entry->wIndex, __entry->wLength)
+ )
+);
+
+DEFINE_EVENT(cdns3_log_ctrl, cdns3_ctrl_req,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->priv_ep->name)
+ __field(struct cdns3_request *, req)
+ __field(void *, buf)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(int, zero)
+ __field(int, short_not_ok)
+ __field(int, no_interrupt)
+ __field(int, start_trb)
+ __field(int, end_trb)
+ __field(struct cdns3_trb *, start_trb_addr)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->priv_ep->name);
+ __entry->req = req;
+ __entry->buf = req->request.buf;
+ __entry->actual = req->request.actual;
+ __entry->length = req->request.length;
+ __entry->status = req->request.status;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ __entry->start_trb = req->start_trb;
+ __entry->end_trb = req->end_trb;
+ __entry->start_trb_addr = req->trb;
+ __entry->flags = req->flags;
+ ),
+ TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d,"
+ " trb: [start:%d, end:%d: virt addr %pa], flags:%x ",
+ __get_str(name), __entry->req, __entry->buf, __entry->actual,
+ __entry->length,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->status,
+ __entry->start_trb,
+ __entry->end_trb,
+ __entry->start_trb_addr,
+ __entry->flags
+ )
+);
+
+DEFINE_EVENT(cdns3_log_request, cdns3_alloc_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_log_request, cdns3_free_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_log_request, cdns3_ep_queue,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_log_request, cdns3_ep_dequeue,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_log_request, cdns3_gadget_giveback,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+TRACE_EVENT(cdns3_ep0_queue,
+ TP_PROTO(struct cdns3_device *dev_priv, struct usb_request *request),
+ TP_ARGS(dev_priv, request),
+ TP_STRUCT__entry(
+ __field(int, dir)
+ __field(int, length)
+ ),
+ TP_fast_assign(
+ __entry->dir = dev_priv->ep0_data_dir;
+ __entry->length = request->length;
+ ),
+ TP_printk("Queue to ep0%s length: %u", __entry->dir ? "in" : "out",
+ __entry->length)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_aligned_request,
+ TP_PROTO(struct cdns3_request *priv_req),
+ TP_ARGS(priv_req),
+ TP_STRUCT__entry(
+ __string(name, priv_req->priv_ep->name)
+ __field(struct usb_request *, req)
+ __field(void *, buf)
+ __field(dma_addr_t, dma)
+ __field(void *, aligned_buf)
+ __field(dma_addr_t, aligned_dma)
+ __field(u32, aligned_buf_size)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_req->priv_ep->name);
+ __entry->req = &priv_req->request;
+ __entry->buf = priv_req->request.buf;
+ __entry->dma = priv_req->request.dma;
+ __entry->aligned_buf = priv_req->aligned_buf->buf;
+ __entry->aligned_dma = priv_req->aligned_buf->dma;
+ __entry->aligned_buf_size = priv_req->aligned_buf->size;
+ ),
+ TP_printk("%s: req: %p, req buf %p, dma %pad a_buf %p a_dma %pad, size %d",
+ __get_str(name), __entry->req, __entry->buf, &__entry->dma,
+ __entry->aligned_buf, &__entry->aligned_dma,
+ __entry->aligned_buf_size
+ )
+);
+
+DEFINE_EVENT(cdns3_log_aligned_request, cdns3_free_aligned_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request,
+ TP_PROTO(struct cdns3_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_trb,
+ TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
+ TP_ARGS(priv_ep, trb),
+ TP_STRUCT__entry(
+ __string(name, priv_ep->name)
+ __field(struct cdns3_trb *, trb)
+ __field(u32, buffer)
+ __field(u32, length)
+ __field(u32, control)
+ __field(u32, type)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_ep->name);
+ __entry->trb = trb;
+ __entry->buffer = trb->buffer;
+ __entry->length = trb->length;
+ __entry->control = trb->control;
+ __entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
+ ),
+ TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)",
+ __get_str(name), __entry->trb, __entry->buffer,
+ TRB_LEN(__entry->length),
+ (u8)TRB_BURST_LEN_GET(__entry->length),
+ __entry->control,
+ __entry->control & TRB_CYCLE ? "C=1, " : "C=0, ",
+ __entry->control & TRB_TOGGLE ? "T=1, " : "T=0, ",
+ __entry->control & TRB_ISP ? "ISP, " : "",
+ __entry->control & TRB_FIFO_MODE ? "FIFO, " : "",
+ __entry->control & TRB_CHAIN ? "CHAIN, " : "",
+ __entry->control & TRB_IOC ? "IOC, " : "",
+ TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK"
+ )
+);
+
+DEFINE_EVENT(cdns3_log_trb, cdns3_prepare_trb,
+ TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
+ TP_ARGS(priv_ep, trb)
+);
+
+DEFINE_EVENT(cdns3_log_trb, cdns3_complete_trb,
+ TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb),
+ TP_ARGS(priv_ep, trb)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_ring,
+ TP_PROTO(struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_ep),
+ TP_STRUCT__entry(
+ __dynamic_array(u8, ring, TRB_RING_SIZE)
+ __dynamic_array(u8, priv_ep, sizeof(struct cdns3_endpoint))
+ __dynamic_array(char, buffer,
+ (TRBS_PER_SEGMENT * 65) + CDNS3_MSG_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(priv_ep), priv_ep,
+ sizeof(struct cdns3_endpoint));
+ memcpy(__get_dynamic_array(ring), priv_ep->trb_pool,
+ TRB_RING_SIZE);
+ ),
+
+ TP_printk("%s",
+ cdns3_dbg_ring((struct cdns3_endpoint *)__get_str(priv_ep),
+ (struct cdns3_trb *)__get_str(ring),
+ __get_str(buffer)))
+);
+
+DEFINE_EVENT(cdns3_log_ring, cdns3_ring,
+ TP_PROTO(struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_ep)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_ep,
+ TP_PROTO(struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_ep),
+ TP_STRUCT__entry(
+ __string(name, priv_ep->name)
+ __field(unsigned int, maxpacket)
+ __field(unsigned int, maxpacket_limit)
+ __field(unsigned int, max_streams)
+ __field(unsigned int, maxburst)
+ __field(unsigned int, flags)
+ __field(unsigned int, dir)
+ __field(u8, enqueue)
+ __field(u8, dequeue)
+ ),
+ TP_fast_assign(
+ __assign_str(name, priv_ep->name);
+ __entry->maxpacket = priv_ep->endpoint.maxpacket;
+ __entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit;
+ __entry->max_streams = priv_ep->endpoint.max_streams;
+ __entry->maxburst = priv_ep->endpoint.maxburst;
+ __entry->flags = priv_ep->flags;
+ __entry->dir = priv_ep->dir;
+ __entry->enqueue = priv_ep->enqueue;
+ __entry->dequeue = priv_ep->dequeue;
+ ),
+ TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, "
+ "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s",
+ __get_str(name), __entry->maxpacket,
+ __entry->maxpacket_limit, __entry->max_streams,
+ __entry->maxburst, __entry->enqueue,
+ __entry->dequeue,
+ __entry->flags & EP_ENABLED ? "EN | " : "",
+ __entry->flags & EP_STALLED ? "STALLED | " : "",
+ __entry->flags & EP_WEDGE ? "WEDGE | " : "",
+ __entry->flags & EP_TRANSFER_STARTED ? "STARTED | " : "",
+ __entry->flags & EP_UPDATE_EP_TRBADDR ? "UPD TRB | " : "",
+ __entry->flags & EP_PENDING_REQUEST ? "REQ PEN | " : "",
+ __entry->flags & EP_RING_FULL ? "RING FULL |" : "",
+ __entry->flags & EP_CLAIMED ? "CLAIMED " : "",
+ __entry->dir ? "IN" : "OUT"
+ )
+);
+
+DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_enable,
+ TP_PROTO(struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_ep)
+);
+
+DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_disable,
+ TP_PROTO(struct cdns3_endpoint *priv_ep),
+ TP_ARGS(priv_ep)
+);
+
+DECLARE_EVENT_CLASS(cdns3_log_request_handled,
+ TP_PROTO(struct cdns3_request *priv_req, int current_index,
+ int handled),
+ TP_ARGS(priv_req, current_index, handled),
+ TP_STRUCT__entry(
+ __field(struct cdns3_request *, priv_req)
+ __field(unsigned int, dma_position)
+ __field(unsigned int, handled)
+ __field(unsigned int, dequeue_idx)
+ __field(unsigned int, enqueue_idx)
+ __field(unsigned int, start_trb)
+ __field(unsigned int, end_trb)
+ ),
+ TP_fast_assign(
+ __entry->priv_req = priv_req;
+ __entry->dma_position = current_index;
+ __entry->handled = handled;
+ __entry->dequeue_idx = priv_req->priv_ep->dequeue;
+ __entry->enqueue_idx = priv_req->priv_ep->enqueue;
+ __entry->start_trb = priv_req->start_trb;
+ __entry->end_trb = priv_req->end_trb;
+ ),
+ TP_printk("Req: %p %s, DMA pos: %d, ep deq: %d, ep enq: %d,"
+ " start trb: %d, end trb: %d",
+ __entry->priv_req,
+ __entry->handled ? "handled" : "not handled",
+ __entry->dma_position, __entry->dequeue_idx,
+ __entry->enqueue_idx, __entry->start_trb,
+ __entry->end_trb
+ )
+);
+
+DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled,
+ TP_PROTO(struct cdns3_request *priv_req, int current_index,
+ int handled),
+ TP_ARGS(priv_req, current_index, handled)
+);
+#endif /* __LINUX_CDNS3_TRACE */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index eb37ebfcb123..ae850b3fddf2 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -6,6 +6,7 @@ config USB_CHIPIDEA
select EXTCON
select RESET_CONTROLLER
select USB_ULPI_BUS
+ select USB_ROLE_SWITCH
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6a2cc5cd0281..6911aef500e9 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -16,6 +16,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
#include <linux/ulpi/interface.h>
/******************************************************************************
@@ -217,6 +218,7 @@ struct ci_hdrc {
ktime_t hr_timeouts[NUM_OTG_FSM_TIMERS];
unsigned enabled_otg_timer_bits;
enum otg_fsm_timer next_otg_timer;
+ struct usb_role_switch *role_switch;
struct work_struct work;
struct workqueue_struct *wq;
@@ -290,6 +292,16 @@ static inline void ci_role_stop(struct ci_hdrc *ci)
ci->roles[role]->stop(ci);
}
+static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
+{
+ if (ci->role == CI_ROLE_HOST)
+ return USB_ROLE_HOST;
+ else if (ci->role == CI_ROLE_GADGET && ci->vbus_active)
+ return USB_ROLE_DEVICE;
+ else
+ return USB_ROLE_NONE;
+}
+
/**
* hw_read_id_reg: reads from a identification register
* @ci: the controller
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index b5abfe89190c..df8812c30640 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -454,9 +454,11 @@ err_clk:
imx_disable_unprepare_clks(dev);
disable_hsic_regulator:
if (data->hsic_pad_regulator)
- ret = regulator_disable(data->hsic_pad_regulator);
+ /* don't overwrite original ret (cf. EPROBE_DEFER) */
+ regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
pm_qos_remove_request(&data->pm_qos_req);
+ data->ci_pdev = NULL;
return ret;
}
@@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
- ci_hdrc_remove_device(data->ci_pdev);
+ if (data->ci_pdev)
+ ci_hdrc_remove_device(data->ci_pdev);
if (data->override_phy_control)
usb_phy_shutdown(data->phy);
- imx_disable_unprepare_clks(&pdev->dev);
- if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
- if (data->hsic_pad_regulator)
- regulator_disable(data->hsic_pad_regulator);
+ if (data->ci_pdev) {
+ imx_disable_unprepare_clks(&pdev->dev);
+ if (data->plat_data->flags & CI_HDRC_PMQOS)
+ pm_qos_remove_request(&data->pm_qos_req);
+ if (data->hsic_pad_regulator)
+ regulator_disable(data->hsic_pad_regulator);
+ }
return 0;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index bb4645a8ca46..af648ba6544d 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -216,13 +216,13 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
ci->rcdev.ops = &ci_hdrc_msm_reset_ops;
ci->rcdev.of_node = pdev->dev.of_node;
ci->rcdev.nr_resets = 2;
- ret = reset_controller_register(&ci->rcdev);
+ ret = devm_reset_controller_register(&pdev->dev, &ci->rcdev);
if (ret)
return ret;
ret = clk_prepare_enable(ci->fs_clk);
if (ret)
- goto err_fs;
+ return ret;
reset_control_assert(reset);
usleep_range(10000, 12000);
@@ -232,7 +232,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
ret = clk_prepare_enable(ci->core_clk);
if (ret)
- goto err_fs;
+ return ret;
ret = clk_prepare_enable(ci->iface_clk);
if (ret)
@@ -271,8 +271,6 @@ err_mux:
clk_disable_unprepare(ci->iface_clk);
err_iface:
clk_disable_unprepare(ci->core_clk);
-err_fs:
- reset_controller_unregister(&ci->rcdev);
return ret;
}
@@ -284,7 +282,6 @@ static int ci_hdrc_msm_remove(struct platform_device *pdev)
ci_hdrc_remove_device(ci->ci);
clk_disable_unprepare(ci->iface_clk);
clk_disable_unprepare(ci->core_clk);
- reset_controller_unregister(&ci->rcdev);
return 0;
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 26062d610c20..98ee575ee500 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -600,6 +600,71 @@ static int ci_cable_notifier(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
}
+static enum usb_role ci_usb_role_switch_get(struct device *dev)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+ enum usb_role role;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ci->lock, flags);
+ role = ci_role_to_usb_role(ci);
+ spin_unlock_irqrestore(&ci->lock, flags);
+
+ return role;
+}
+
+static int ci_usb_role_switch_set(struct device *dev, enum usb_role role)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+ struct ci_hdrc_cable *cable = NULL;
+ enum usb_role current_role = ci_role_to_usb_role(ci);
+ unsigned long flags;
+
+ if (current_role == role)
+ return 0;
+
+ pm_runtime_get_sync(ci->dev);
+ /* Stop current role */
+ spin_lock_irqsave(&ci->lock, flags);
+ if (current_role == USB_ROLE_DEVICE)
+ cable = &ci->platdata->vbus_extcon;
+ else if (current_role == USB_ROLE_HOST)
+ cable = &ci->platdata->id_extcon;
+
+ if (cable) {
+ cable->changed = true;
+ cable->connected = false;
+ ci_irq(ci->irq, ci);
+ spin_unlock_irqrestore(&ci->lock, flags);
+ if (ci->wq && role != USB_ROLE_NONE)
+ flush_workqueue(ci->wq);
+ spin_lock_irqsave(&ci->lock, flags);
+ }
+
+ cable = NULL;
+
+ /* Start target role */
+ if (role == USB_ROLE_DEVICE)
+ cable = &ci->platdata->vbus_extcon;
+ else if (role == USB_ROLE_HOST)
+ cable = &ci->platdata->id_extcon;
+
+ if (cable) {
+ cable->changed = true;
+ cable->connected = true;
+ ci_irq(ci->irq, ci);
+ }
+ spin_unlock_irqrestore(&ci->lock, flags);
+ pm_runtime_put_sync(ci->dev);
+
+ return 0;
+}
+
+static struct usb_role_switch_desc ci_role_switch = {
+ .set = ci_usb_role_switch_set,
+ .get = ci_usb_role_switch_get,
+};
+
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
@@ -726,6 +791,9 @@ static int ci_get_platdata(struct device *dev,
cable->connected = false;
}
+ if (device_property_read_bool(dev, "usb-role-switch"))
+ ci_role_switch.fwnode = dev->fwnode;
+
platdata->pctl = devm_pinctrl_get(dev);
if (!IS_ERR(platdata->pctl)) {
struct pinctrl_state *p;
@@ -903,10 +971,7 @@ static struct attribute *ci_attrs[] = {
&dev_attr_role.attr,
NULL,
};
-
-static const struct attribute_group ci_attr_group = {
- .attrs = ci_attrs,
-};
+ATTRIBUTE_GROUPS(ci);
static int ci_hdrc_probe(struct platform_device *pdev)
{
@@ -1008,7 +1073,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci->irq = platform_get_irq(pdev, 0);
if (ci->irq < 0) {
- dev_err(dev, "missing IRQ\n");
ret = ci->irq;
goto deinit_phy;
}
@@ -1051,6 +1115,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
+ if (ci_role_switch.fwnode) {
+ ci->role_switch = usb_role_switch_register(dev,
+ &ci_role_switch);
+ if (IS_ERR(ci->role_switch)) {
+ ret = PTR_ERR(ci->role_switch);
+ goto deinit_otg;
+ }
+ }
+
if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
if (ci->is_otg) {
ci->role = ci_otg_role(ci);
@@ -1106,15 +1179,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
dbg_create_files(ci);
- ret = sysfs_create_group(&dev->kobj, &ci_attr_group);
- if (ret)
- goto remove_debug;
-
return 0;
-remove_debug:
- dbg_remove_files(ci);
stop:
+ if (ci->role_switch)
+ usb_role_switch_unregister(ci->role_switch);
+deinit_otg:
if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
deinit_gadget:
@@ -1133,6 +1203,9 @@ static int ci_hdrc_remove(struct platform_device *pdev)
{
struct ci_hdrc *ci = platform_get_drvdata(pdev);
+ if (ci->role_switch)
+ usb_role_switch_unregister(ci->role_switch);
+
if (ci->supports_runtime_pm) {
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1140,7 +1213,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
}
dbg_remove_files(ci);
- sysfs_remove_group(&ci->dev->kobj, &ci_attr_group);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
@@ -1319,6 +1391,7 @@ static struct platform_driver ci_hdrc_driver = {
.driver = {
.name = "ci_hdrc",
.pm = &ci_pm_ops,
+ .dev_groups = ci_groups,
},
};
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index f25d4827fd49..fbfb02e05c97 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -35,7 +35,7 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
* detection overwrite OTGSC register value
*/
cable = &ci->platdata->vbus_extcon;
- if (!IS_ERR(cable->edev)) {
+ if (!IS_ERR(cable->edev) || ci->role_switch) {
if (cable->changed)
val |= OTGSC_BSVIS;
else
@@ -53,7 +53,7 @@ u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
}
cable = &ci->platdata->id_extcon;
- if (!IS_ERR(cable->edev)) {
+ if (!IS_ERR(cable->edev) || ci->role_switch) {
if (cable->changed)
val |= OTGSC_IDIS;
else
@@ -83,7 +83,7 @@ void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
struct ci_hdrc_cable *cable;
cable = &ci->platdata->vbus_extcon;
- if (!IS_ERR(cable->edev)) {
+ if (!IS_ERR(cable->edev) || ci->role_switch) {
if (data & mask & OTGSC_BSVIS)
cable->changed = false;
@@ -97,7 +97,7 @@ void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
}
cable = &ci->platdata->id_extcon;
- if (!IS_ERR(cable->edev)) {
+ if (!IS_ERR(cable->edev) || ci->role_switch) {
if (data & mask & OTGSC_IDIS)
cable->changed = false;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6a5ee8e6da10..8f18e7b6cadf 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
- spin_lock_irqsave(&ci->lock, flags);
- ci->gadget.speed = USB_SPEED_UNKNOWN;
- ci->remote_wakeup = 0;
- ci->suspended = 0;
- spin_unlock_irqrestore(&ci->lock, flags);
-
/* flush all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
ci->status = NULL;
}
+ spin_lock_irqsave(&ci->lock, flags);
+ ci->gadget.speed = USB_SPEED_UNKNOWN;
+ ci->remote_wakeup = 0;
+ ci->suspended = 0;
+ spin_unlock_irqrestore(&ci->lock, flags);
+
return 0;
}
@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
return -EBUSY;
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return 0;
+ }
/* only internal SW should disable ctrl endpts */
@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return 0;
+ }
retval = _ep_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
-
- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
}
spin_lock_irqsave(hwep->lock, flags);
+ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return;
+ }
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
+ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
+ spin_unlock_irqrestore(&ci->lock, flags);
+ return 0;
+ }
if (!ci->remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
@@ -1746,12 +1762,11 @@ static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
- int retval = -ENOMEM;
+ int retval;
if (driver->disconnect == NULL)
return -EINVAL;
-
ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
retval = usb_ep_enable(&ci->ep0out->ep);
if (retval)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 183b41753c98..62f4fb9b362f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1301,10 +1301,6 @@ made_compressed_probe:
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- minor = acm_alloc_minor(acm);
- if (minor < 0)
- goto alloc_fail1;
-
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1312,6 +1308,13 @@ made_compressed_probe:
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
+
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+ if (minor < 0)
+ goto alloc_fail1;
+
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
@@ -1458,7 +1461,6 @@ skip_countries:
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
- usb_get_intf(control_interface);
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a7824a51f86d..70afb2ca1eab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
{
struct wdm_device *desc = file->private_data;
- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+ wait_event(desc->wait,
+ /*
+ * needs both flags. We cannot do with one
+ * because resetting it would cause a race
+ * with write() yet we need to signal
+ * a disconnect
+ */
+ !test_bit(WDM_IN_USE, &desc->flags) ||
+ test_bit(WDM_DISCONNECTING, &desc->flags));
/* cannot dereference desc->intf if WDM_DISCONNECTING */
- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ return -ENODEV;
+ if (desc->werr < 0)
dev_err(&desc->intf->dev, "Error in flush path: %d\n",
desc->werr);
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
spin_lock_irqsave(&desc->iuspin, flags);
set_bit(WDM_DISCONNECTING, &desc->flags);
set_bit(WDM_READ, &desc->flags);
- /* to terminate pending flushes */
- clear_bit(WDM_IN_USE, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 407a7a6198a2..7fea4999d352 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1082,6 +1082,12 @@ static ssize_t ieee1284_id_show(struct device *dev, struct device_attribute *att
static DEVICE_ATTR_RO(ieee1284_id);
+static struct attribute *usblp_attrs[] = {
+ &dev_attr_ieee1284_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(usblp);
+
static int usblp_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1156,9 +1162,6 @@ static int usblp_probe(struct usb_interface *intf,
/* Retrieve and store the device ID string. */
usblp_cache_device_id_string(usblp);
- retval = device_create_file(&intf->dev, &dev_attr_ieee1284_id);
- if (retval)
- goto abort_intfdata;
#ifdef DEBUG
usblp_check_status(usblp, 0);
@@ -1189,7 +1192,6 @@ static int usblp_probe(struct usb_interface *intf,
abort_intfdata:
usb_set_intfdata(intf, NULL);
- device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
abort:
kfree(usblp->readbuf);
kfree(usblp->statusbuf);
@@ -1360,8 +1362,6 @@ static void usblp_disconnect(struct usb_interface *intf)
BUG();
}
- device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
-
mutex_lock(&usblp_mutex);
mutex_lock(&usblp->mut);
usblp->present = 0;
@@ -1421,6 +1421,7 @@ static struct usb_driver usblp_driver = {
.suspend = usblp_suspend,
.resume = usblp_resume,
.id_table = usblp_ids,
+ .dev_groups = usblp_groups,
.supports_autosuspend = 1,
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 4942122b2346..dcd7066ffba2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1836,17 +1836,14 @@ capability_attribute(device_capabilities);
capability_attribute(usb488_interface_capabilities);
capability_attribute(usb488_device_capabilities);
-static struct attribute *capability_attrs[] = {
+static struct attribute *usbtmc_attrs[] = {
&dev_attr_interface_capabilities.attr,
&dev_attr_device_capabilities.attr,
&dev_attr_usb488_interface_capabilities.attr,
&dev_attr_usb488_device_capabilities.attr,
NULL,
};
-
-static const struct attribute_group capability_attr_grp = {
- .attrs = capability_attrs,
-};
+ATTRIBUTE_GROUPS(usbtmc);
static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
{
@@ -2362,8 +2359,11 @@ static int usbtmc_probe(struct usb_interface *intf,
goto err_put;
}
+ retcode = -EINVAL;
data->bulk_in = bulk_in->bEndpointAddress;
data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
+ if (!data->wMaxPacketSize)
+ goto err_put;
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
@@ -2383,9 +2383,6 @@ static int usbtmc_probe(struct usb_interface *intf,
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
- else
- retcode = sysfs_create_group(&intf->dev.kobj,
- &capability_attr_grp);
if (data->iin_ep_present) {
/* allocate int urb */
@@ -2432,7 +2429,6 @@ static int usbtmc_probe(struct usb_interface *intf,
return 0;
error_register:
- sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
usbtmc_free_int(data);
err_put:
kref_put(&data->kref, usbtmc_delete);
@@ -2445,7 +2441,6 @@ static void usbtmc_disconnect(struct usb_interface *intf)
struct list_head *elem;
usb_deregister_dev(intf, &usbtmc_class);
- sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
wake_up_interruptible_all(&data->waitq);
@@ -2551,6 +2546,7 @@ static struct usb_driver usbtmc_driver = {
.resume = usbtmc_resume,
.pre_reset = usbtmc_pre_reset,
.post_reset = usbtmc_post_reset,
+ .dev_groups = usbtmc_groups,
};
module_usb_driver(usbtmc_driver);
diff --git a/drivers/usb/common/Kconfig b/drivers/usb/common/Kconfig
new file mode 100644
index 000000000000..d611477aae41
--- /dev/null
+++ b/drivers/usb/common/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_COMMON
+ tristate
+
+
+config USB_LED_TRIG
+ bool "USB LED Triggers"
+ depends on LEDS_CLASS && LEDS_TRIGGERS
+ select USB_COMMON
+ help
+ This option adds LED triggers for USB host and/or gadget activity.
+
+ Say Y here if you are working on a system with led-class supported
+ LEDs and you want to use them as activity indicators for USB host or
+ gadget.
+
+config USB_ULPI_BUS
+ tristate "USB ULPI PHY interface support"
+ select USB_COMMON
+ help
+ UTMI+ Low Pin Interface (ULPI) is specification for a commonly used
+ USB 2.0 PHY interface. The ULPI specification defines a standard set
+ of registers that can be used to detect the vendor and product which
+ allows ULPI to be handled as a bus. This module is the driver for that
+ bus.
+
+ The ULPI interfaces (the buses) are registered by the drivers for USB
+ controllers which support ULPI register access and have ULPI PHY
+ attached to them. The ULPI PHY drivers themselves are normal PHY
+ drivers.
+
+ ULPI PHYs provide often functions such as ADP sensing/probing (OTG
+ protocol) and USB charger detection.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ulpi.
+
+config USB_CONN_GPIO
+ tristate "USB GPIO Based Connection Detection Driver"
+ depends on GPIOLIB
+ select USB_ROLE_SWITCH
+ help
+ The driver supports USB role switch between host and device via GPIO
+ based USB cable detection, used typically if an input GPIO is used
+ to detect USB ID pin, and another input GPIO may be also used to detect
+ Vbus pin at the same time, it also can be used to enable/disable
+ device if an input GPIO is only used to detect Vbus pin.
+
+ To compile the driver as a module, choose M here: the module will
+ be called usb-conn-gpio.ko
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index 0a7c45e85481..8ac4d21ef5c8 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -5,7 +5,9 @@
obj-$(CONFIG_USB_COMMON) += usb-common.o
usb-common-y += common.o
+usb-common-$(CONFIG_TRACING) += debug.o
usb-common-$(CONFIG_USB_LED_TRIG) += led.o
+obj-$(CONFIG_USB_CONN_GPIO) += usb-conn-gpio.o
obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
new file mode 100644
index 000000000000..92a986aeaa5d
--- /dev/null
+++ b/drivers/usb/common/debug.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Common USB debugging functions
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#include <linux/usb/ch9.h>
+
+static void usb_decode_get_status(__u8 bRequestType, __u16 wIndex,
+ __u16 wLength, char *str, size_t size)
+{
+ switch (bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ snprintf(str, size, "Get Device Status(Length = %d)", wLength);
+ break;
+ case USB_RECIP_INTERFACE:
+ snprintf(str, size,
+ "Get Interface Status(Intf = %d, Length = %d)",
+ wIndex, wLength);
+ break;
+ case USB_RECIP_ENDPOINT:
+ snprintf(str, size, "Get Endpoint Status(ep%d%s)",
+ wIndex & ~USB_DIR_IN,
+ wIndex & USB_DIR_IN ? "in" : "out");
+ break;
+ }
+}
+
+static const char *usb_decode_device_feature(u16 wValue)
+{
+ switch (wValue) {
+ case USB_DEVICE_SELF_POWERED:
+ return "Self Powered";
+ case USB_DEVICE_REMOTE_WAKEUP:
+ return "Remote Wakeup";
+ case USB_DEVICE_TEST_MODE:
+ return "Test Mode";
+ case USB_DEVICE_U1_ENABLE:
+ return "U1 Enable";
+ case USB_DEVICE_U2_ENABLE:
+ return "U2 Enable";
+ case USB_DEVICE_LTM_ENABLE:
+ return "LTM Enable";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *usb_decode_test_mode(u16 wIndex)
+{
+ switch (wIndex) {
+ case TEST_J:
+ return ": TEST_J";
+ case TEST_K:
+ return ": TEST_K";
+ case TEST_SE0_NAK:
+ return ": TEST_SE0_NAK";
+ case TEST_PACKET:
+ return ": TEST_PACKET";
+ case TEST_FORCE_EN:
+ return ": TEST_FORCE_EN";
+ default:
+ return ": UNKNOWN";
+ }
+}
+
+static void usb_decode_set_clear_feature(__u8 bRequestType,
+ __u8 bRequest, __u16 wValue,
+ __u16 wIndex, char *str, size_t size)
+{
+ switch (bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ snprintf(str, size, "%s Device Feature(%s%s)",
+ bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+ usb_decode_device_feature(wValue),
+ wValue == USB_DEVICE_TEST_MODE ?
+ usb_decode_test_mode(wIndex) : "");
+ break;
+ case USB_RECIP_INTERFACE:
+ snprintf(str, size, "%s Interface Feature(%s)",
+ bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+ wValue == USB_INTRF_FUNC_SUSPEND ?
+ "Function Suspend" : "UNKNOWN");
+ break;
+ case USB_RECIP_ENDPOINT:
+ snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)",
+ bRequest == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+ wValue == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN",
+ wIndex & ~USB_DIR_IN,
+ wIndex & USB_DIR_IN ? "in" : "out");
+ break;
+ }
+}
+
+static void usb_decode_set_address(__u16 wValue, char *str, size_t size)
+{
+ snprintf(str, size, "Set Address(Addr = %02x)", wValue);
+}
+
+static void usb_decode_get_set_descriptor(__u8 bRequestType, __u8 bRequest,
+ __u16 wValue, __u16 wIndex,
+ __u16 wLength, char *str, size_t size)
+{
+ char *s;
+
+ switch (wValue >> 8) {
+ case USB_DT_DEVICE:
+ s = "Device";
+ break;
+ case USB_DT_CONFIG:
+ s = "Configuration";
+ break;
+ case USB_DT_STRING:
+ s = "String";
+ break;
+ case USB_DT_INTERFACE:
+ s = "Interface";
+ break;
+ case USB_DT_ENDPOINT:
+ s = "Endpoint";
+ break;
+ case USB_DT_DEVICE_QUALIFIER:
+ s = "Device Qualifier";
+ break;
+ case USB_DT_OTHER_SPEED_CONFIG:
+ s = "Other Speed Config";
+ break;
+ case USB_DT_INTERFACE_POWER:
+ s = "Interface Power";
+ break;
+ case USB_DT_OTG:
+ s = "OTG";
+ break;
+ case USB_DT_DEBUG:
+ s = "Debug";
+ break;
+ case USB_DT_INTERFACE_ASSOCIATION:
+ s = "Interface Association";
+ break;
+ case USB_DT_BOS:
+ s = "BOS";
+ break;
+ case USB_DT_DEVICE_CAPABILITY:
+ s = "Device Capability";
+ break;
+ case USB_DT_PIPE_USAGE:
+ s = "Pipe Usage";
+ break;
+ case USB_DT_SS_ENDPOINT_COMP:
+ s = "SS Endpoint Companion";
+ break;
+ case USB_DT_SSP_ISOC_ENDPOINT_COMP:
+ s = "SSP Isochronous Endpoint Companion";
+ break;
+ default:
+ s = "UNKNOWN";
+ break;
+ }
+
+ snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)",
+ bRequest == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set",
+ s, wValue & 0xff, wLength);
+}
+
+static void usb_decode_get_configuration(__u16 wLength, char *str, size_t size)
+{
+ snprintf(str, size, "Get Configuration(Length = %d)", wLength);
+}
+
+static void usb_decode_set_configuration(__u8 wValue, char *str, size_t size)
+{
+ snprintf(str, size, "Set Configuration(Config = %d)", wValue);
+}
+
+static void usb_decode_get_intf(__u16 wIndex, __u16 wLength, char *str,
+ size_t size)
+{
+ snprintf(str, size, "Get Interface(Intf = %d, Length = %d)",
+ wIndex, wLength);
+}
+
+static void usb_decode_set_intf(__u8 wValue, __u16 wIndex, char *str,
+ size_t size)
+{
+ snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)",
+ wIndex, wValue);
+}
+
+static void usb_decode_synch_frame(__u16 wIndex, __u16 wLength,
+ char *str, size_t size)
+{
+ snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)",
+ wIndex, wLength);
+}
+
+static void usb_decode_set_sel(__u16 wLength, char *str, size_t size)
+{
+ snprintf(str, size, "Set SEL(Length = %d)", wLength);
+}
+
+static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size)
+{
+ snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
+}
+
+/**
+ * usb_decode_ctrl - returns a string representation of ctrl request
+ */
+const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ switch (bRequest) {
+ case USB_REQ_GET_STATUS:
+ usb_decode_get_status(bRequestType, wIndex, wLength, str, size);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ usb_decode_set_clear_feature(bRequestType, bRequest, wValue,
+ wIndex, str, size);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ usb_decode_set_address(wValue, str, size);
+ break;
+ case USB_REQ_GET_DESCRIPTOR:
+ case USB_REQ_SET_DESCRIPTOR:
+ usb_decode_get_set_descriptor(bRequestType, bRequest, wValue,
+ wIndex, wLength, str, size);
+ break;
+ case USB_REQ_GET_CONFIGURATION:
+ usb_decode_get_configuration(wLength, str, size);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ usb_decode_set_configuration(wValue, str, size);
+ break;
+ case USB_REQ_GET_INTERFACE:
+ usb_decode_get_intf(wIndex, wLength, str, size);
+ break;
+ case USB_REQ_SET_INTERFACE:
+ usb_decode_set_intf(wValue, wIndex, str, size);
+ break;
+ case USB_REQ_SYNCH_FRAME:
+ usb_decode_synch_frame(wIndex, wLength, str, size);
+ break;
+ case USB_REQ_SET_SEL:
+ usb_decode_set_sel(wLength, str, size);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ usb_decode_set_isoch_delay(wValue, str, size);
+ break;
+ default:
+ snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
+ bRequestType, bRequest,
+ (u8)(cpu_to_le16(wValue) & 0xff),
+ (u8)(cpu_to_le16(wValue) >> 8),
+ (u8)(cpu_to_le16(wIndex) & 0xff),
+ (u8)(cpu_to_le16(wIndex) >> 8),
+ (u8)(cpu_to_le16(wLength) & 0xff),
+ (u8)(cpu_to_le16(wLength) >> 8));
+ }
+
+ return str;
+}
+EXPORT_SYMBOL_GPL(usb_decode_ctrl);
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
new file mode 100644
index 000000000000..87338f9eb5be
--- /dev/null
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB GPIO Based Connection Detection Driver
+ *
+ * Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Some code borrowed from drivers/extcon/extcon-usb-gpio.c
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/role.h>
+
+#define USB_GPIO_DEB_MS 20 /* ms */
+#define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */
+
+#define USB_CONN_IRQF \
+ (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT)
+
+struct usb_conn_info {
+ struct device *dev;
+ struct usb_role_switch *role_sw;
+ enum usb_role last_role;
+ struct regulator *vbus;
+ struct delayed_work dw_det;
+ unsigned long debounce_jiffies;
+
+ struct gpio_desc *id_gpiod;
+ struct gpio_desc *vbus_gpiod;
+ int id_irq;
+ int vbus_irq;
+};
+
+/**
+ * "DEVICE" = VBUS and "HOST" = !ID, so we have:
+ * Both "DEVICE" and "HOST" can't be set as active at the same time
+ * so if "HOST" is active (i.e. ID is 0) we keep "DEVICE" inactive
+ * even if VBUS is on.
+ *
+ * Role | ID | VBUS
+ * ------------------------------------
+ * [1] DEVICE | H | H
+ * [2] NONE | H | L
+ * [3] HOST | L | H
+ * [4] HOST | L | L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID
+ */
+static void usb_conn_detect_cable(struct work_struct *work)
+{
+ struct usb_conn_info *info;
+ enum usb_role role;
+ int id, vbus, ret;
+
+ info = container_of(to_delayed_work(work),
+ struct usb_conn_info, dw_det);
+
+ /* check ID and VBUS */
+ id = info->id_gpiod ?
+ gpiod_get_value_cansleep(info->id_gpiod) : 1;
+ vbus = info->vbus_gpiod ?
+ gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+ if (!id)
+ role = USB_ROLE_HOST;
+ else if (vbus)
+ role = USB_ROLE_DEVICE;
+ else
+ role = USB_ROLE_NONE;
+
+ dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n",
+ info->last_role, role, id, vbus);
+
+ if (info->last_role == role) {
+ dev_warn(info->dev, "repeated role: %d\n", role);
+ return;
+ }
+
+ if (info->last_role == USB_ROLE_HOST)
+ regulator_disable(info->vbus);
+
+ ret = usb_role_switch_set_role(info->role_sw, role);
+ if (ret)
+ dev_err(info->dev, "failed to set role: %d\n", ret);
+
+ if (role == USB_ROLE_HOST) {
+ ret = regulator_enable(info->vbus);
+ if (ret)
+ dev_err(info->dev, "enable vbus regulator failed\n");
+ }
+
+ info->last_role = role;
+
+ dev_dbg(info->dev, "vbus regulator is %s\n",
+ regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
+}
+
+static void usb_conn_queue_dwork(struct usb_conn_info *info,
+ unsigned long delay)
+{
+ queue_delayed_work(system_power_efficient_wq, &info->dw_det, delay);
+}
+
+static irqreturn_t usb_conn_isr(int irq, void *dev_id)
+{
+ struct usb_conn_info *info = dev_id;
+
+ usb_conn_queue_dwork(info, info->debounce_jiffies);
+
+ return IRQ_HANDLED;
+}
+
+static int usb_conn_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_conn_info *info;
+ int ret = 0;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->id_gpiod = devm_gpiod_get_optional(dev, "id", GPIOD_IN);
+ if (IS_ERR(info->id_gpiod))
+ return PTR_ERR(info->id_gpiod);
+
+ info->vbus_gpiod = devm_gpiod_get_optional(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(info->vbus_gpiod))
+ return PTR_ERR(info->vbus_gpiod);
+
+ if (!info->id_gpiod && !info->vbus_gpiod) {
+ dev_err(dev, "failed to get gpios\n");
+ return -ENODEV;
+ }
+
+ if (info->id_gpiod)
+ ret = gpiod_set_debounce(info->id_gpiod, USB_GPIO_DEB_US);
+ if (!ret && info->vbus_gpiod)
+ ret = gpiod_set_debounce(info->vbus_gpiod, USB_GPIO_DEB_US);
+ if (ret < 0)
+ info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEB_MS);
+
+ INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable);
+
+ info->vbus = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(info->vbus)) {
+ dev_err(dev, "failed to get vbus\n");
+ return PTR_ERR(info->vbus);
+ }
+
+ info->role_sw = usb_role_switch_get(dev);
+ if (IS_ERR(info->role_sw)) {
+ if (PTR_ERR(info->role_sw) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get role switch\n");
+
+ return PTR_ERR(info->role_sw);
+ }
+
+ if (info->id_gpiod) {
+ info->id_irq = gpiod_to_irq(info->id_gpiod);
+ if (info->id_irq < 0) {
+ dev_err(dev, "failed to get ID IRQ\n");
+ ret = info->id_irq;
+ goto put_role_sw;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+ usb_conn_isr, USB_CONN_IRQF,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request ID IRQ\n");
+ goto put_role_sw;
+ }
+ }
+
+ if (info->vbus_gpiod) {
+ info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+ if (info->vbus_irq < 0) {
+ dev_err(dev, "failed to get VBUS IRQ\n");
+ ret = info->vbus_irq;
+ goto put_role_sw;
+ }
+
+ ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+ usb_conn_isr, USB_CONN_IRQF,
+ pdev->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request VBUS IRQ\n");
+ goto put_role_sw;
+ }
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ /* Perform initial detection */
+ usb_conn_queue_dwork(info, 0);
+
+ return 0;
+
+put_role_sw:
+ usb_role_switch_put(info->role_sw);
+ return ret;
+}
+
+static int usb_conn_remove(struct platform_device *pdev)
+{
+ struct usb_conn_info *info = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&info->dw_det);
+
+ if (info->last_role == USB_ROLE_HOST)
+ regulator_disable(info->vbus);
+
+ usb_role_switch_put(info->role_sw);
+
+ return 0;
+}
+
+static int __maybe_unused usb_conn_suspend(struct device *dev)
+{
+ struct usb_conn_info *info = dev_get_drvdata(dev);
+
+ if (info->id_gpiod)
+ disable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ disable_irq(info->vbus_irq);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused usb_conn_resume(struct device *dev)
+{
+ struct usb_conn_info *info = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+
+ if (info->id_gpiod)
+ enable_irq(info->id_irq);
+ if (info->vbus_gpiod)
+ enable_irq(info->vbus_irq);
+
+ usb_conn_queue_dwork(info, 0);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(usb_conn_pm_ops,
+ usb_conn_suspend, usb_conn_resume);
+
+static const struct of_device_id usb_conn_dt_match[] = {
+ { .compatible = "gpio-usb-b-connector", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, usb_conn_dt_match);
+
+static struct platform_driver usb_conn_driver = {
+ .probe = usb_conn_probe,
+ .remove = usb_conn_remove,
+ .driver = {
+ .name = "usb-conn-gpio",
+ .pm = &usb_conn_pm_ops,
+ .of_match_table = usb_conn_dt_match,
+ },
+};
+
+module_platform_driver(usb_conn_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_DESCRIPTION("USB GPIO based connection detection driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 1359b78a624e..6cf22c27f2d2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
char name[16];
int i, size;
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!is_device_dma_capable(hcd->self.sysdev) &&
- !hcd->localmem_pool))
+ if (hcd->localmem_pool || !hcd_uses_dma(hcd))
return 0;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -129,8 +127,7 @@ void *hcd_buffer_alloc(
return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
/* some USB hosts just use PIO */
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
}
@@ -160,8 +157,7 @@ void hcd_buffer_free(
return;
}
- if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- !is_device_dma_capable(bus->sysdev)) {
+ if (!hcd_uses_dma(hcd)) {
kfree(addr);
return;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 9d6cb709ca7b..151a74a54386 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
- unsigned char *buffer;
+ unsigned char *buffer, *buffer0;
int length, total_len, num, i, ssac;
__u8 cap_type;
int ret;
@@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
ret = -ENOMSG;
goto err;
}
+
+ buffer0 = buffer;
total_len -= length;
+ buffer += length;
for (i = 0; i < num; i++) {
- buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break;
}
- total_len -= length;
-
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
@@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
default:
break;
}
+
+ total_len -= length;
+ buffer += length;
}
+ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b265ab5405f9..3f899552f6e3 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -44,10 +44,19 @@
#include "usb.h"
+#ifdef CONFIG_PM
+#define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND
+#else
+#define MAYBE_CAP_SUSPEND 0
+#endif
+
#define USB_MAXBUS 64
#define USB_DEVICE_MAX (USB_MAXBUS * 128)
#define USB_SG_SIZE 16384 /* split-size for large txs */
+/* Mutual exclusion for ps->list in resume vs. release and remove */
+static DEFINE_MUTEX(usbfs_mutex);
+
struct usb_dev_state {
struct list_head list; /* state list */
struct usb_device *dev;
@@ -57,14 +66,17 @@ struct usb_dev_state {
struct list_head async_completed;
struct list_head memory_list;
wait_queue_head_t wait; /* wake up if a request completed */
+ wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */
unsigned int discsignr;
struct pid *disc_pid;
const struct cred *cred;
sigval_t disccontext;
unsigned long ifclaimed;
u32 disabled_bulk_eps;
- bool privileges_dropped;
unsigned long interface_allowed_mask;
+ int not_yet_resumed;
+ bool suspend_allowed;
+ bool privileges_dropped;
};
struct usb_memory {
@@ -694,9 +706,7 @@ static void driver_disconnect(struct usb_interface *intf)
destroy_async_on_interface(ps, ifnum);
}
-/* The following routines are merely placeholders. There is no way
- * to inform a user task about suspend or resumes.
- */
+/* We don't care about suspend/resume of claimed interfaces */
static int driver_suspend(struct usb_interface *intf, pm_message_t msg)
{
return 0;
@@ -707,12 +717,32 @@ static int driver_resume(struct usb_interface *intf)
return 0;
}
+/* The following routines apply to the entire device, not interfaces */
+void usbfs_notify_suspend(struct usb_device *udev)
+{
+ /* We don't need to handle this */
+}
+
+void usbfs_notify_resume(struct usb_device *udev)
+{
+ struct usb_dev_state *ps;
+
+ /* Protect against simultaneous remove or release */
+ mutex_lock(&usbfs_mutex);
+ list_for_each_entry(ps, &udev->filelist, list) {
+ WRITE_ONCE(ps->not_yet_resumed, 0);
+ wake_up_all(&ps->wait_for_resume);
+ }
+ mutex_unlock(&usbfs_mutex);
+}
+
struct usb_driver usbfs_driver = {
.name = "usbfs",
.probe = driver_probe,
.disconnect = driver_disconnect,
.suspend = driver_suspend,
.resume = driver_resume,
+ .supports_autosuspend = 1,
};
static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
@@ -942,17 +972,11 @@ error:
return ret;
}
-static int match_devt(struct device *dev, const void *data)
-{
- return dev->devt == (dev_t)(unsigned long)(void *)data;
-}
-
static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
{
struct device *dev;
- dev = bus_find_device(&usb_bus_type, NULL,
- (void *) (unsigned long) devt, match_devt);
+ dev = bus_find_device_by_devt(&usb_bus_type, devt);
if (!dev)
return NULL;
return to_usb_device(dev);
@@ -997,9 +1021,12 @@ static int usbdev_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&ps->async_completed);
INIT_LIST_HEAD(&ps->memory_list);
init_waitqueue_head(&ps->wait);
+ init_waitqueue_head(&ps->wait_for_resume);
ps->disc_pid = get_pid(task_pid(current));
ps->cred = get_current_cred();
smp_wmb();
+
+ /* Can't race with resume; the device is already active */
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
usb_unlock_device(dev);
@@ -1025,7 +1052,10 @@ static int usbdev_release(struct inode *inode, struct file *file)
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
+ /* Protect against simultaneous resume */
+ mutex_lock(&usbfs_mutex);
list_del_init(&ps->list);
+ mutex_unlock(&usbfs_mutex);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
@@ -1033,7 +1063,8 @@ static int usbdev_release(struct inode *inode, struct file *file)
releaseintf(ps, ifnum);
}
destroy_all_async(ps);
- usb_autosuspend_device(dev);
+ if (!ps->suspend_allowed)
+ usb_autosuspend_device(dev);
usb_unlock_device(dev);
usb_put_dev(dev);
put_pid(ps->disc_pid);
@@ -1627,7 +1658,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (as->usbm)
num_sgs = 0;
- u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length +
+ u += sizeof(struct async) + sizeof(struct urb) +
+ (as->usbm ? 0 : uurb->buffer_length) +
num_sgs * sizeof(struct scatterlist);
ret = usbfs_increase_memory_usage(u);
if (ret)
@@ -1812,8 +1844,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return 0;
error:
- if (as && as->usbm)
- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
kfree(isopkt);
kfree(dr);
if (as)
@@ -2281,7 +2311,8 @@ static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP |
- USBDEVFS_CAP_DROP_PRIVILEGES | USBDEVFS_CAP_CONNINFO_EX;
+ USBDEVFS_CAP_DROP_PRIVILEGES |
+ USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND;
if (!ps->dev->bus->no_stop_on_short)
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
if (ps->dev->bus->sg_tablesize)
@@ -2384,6 +2415,47 @@ static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg)
return 0;
}
+static int proc_forbid_suspend(struct usb_dev_state *ps)
+{
+ int ret = 0;
+
+ if (ps->suspend_allowed) {
+ ret = usb_autoresume_device(ps->dev);
+ if (ret == 0)
+ ps->suspend_allowed = false;
+ else if (ret != -ENODEV)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int proc_allow_suspend(struct usb_dev_state *ps)
+{
+ if (!connected(ps))
+ return -ENODEV;
+
+ WRITE_ONCE(ps->not_yet_resumed, 1);
+ if (!ps->suspend_allowed) {
+ usb_autosuspend_device(ps->dev);
+ ps->suspend_allowed = true;
+ }
+ return 0;
+}
+
+static int proc_wait_for_resume(struct usb_dev_state *ps)
+{
+ int ret;
+
+ usb_unlock_device(ps->dev);
+ ret = wait_event_interruptible(ps->wait_for_resume,
+ READ_ONCE(ps->not_yet_resumed) == 0);
+ usb_lock_device(ps->dev);
+
+ if (ret != 0)
+ return -EINTR;
+ return proc_forbid_suspend(ps);
+}
+
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@@ -2578,6 +2650,15 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_GET_SPEED:
ret = ps->dev->speed;
break;
+ case USBDEVFS_FORBID_SUSPEND:
+ ret = proc_forbid_suspend(ps);
+ break;
+ case USBDEVFS_ALLOW_SUSPEND:
+ ret = proc_allow_suspend(ps);
+ break;
+ case USBDEVFS_WAIT_FOR_RESUME:
+ ret = proc_wait_for_resume(ps);
+ break;
}
/* Handle variable-length commands */
@@ -2651,15 +2732,20 @@ static void usbdev_remove(struct usb_device *udev)
{
struct usb_dev_state *ps;
+ /* Protect against simultaneous resume */
+ mutex_lock(&usbfs_mutex);
while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
+ WRITE_ONCE(ps->not_yet_resumed, 0);
+ wake_up_all(&ps->wait_for_resume);
list_del_init(&ps->list);
if (ps->discsignr)
kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext,
ps->disc_pid, ps->cred);
}
+ mutex_unlock(&usbfs_mutex);
}
static int usbdev_notify(struct notifier_block *self,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ebcadaad89d1..2b27d232d7a7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -892,6 +892,7 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
new_udriver->drvwrap.driver.probe = usb_probe_device;
new_udriver->drvwrap.driver.remove = usb_unbind_device;
new_udriver->drvwrap.driver.owner = owner;
+ new_udriver->drvwrap.driver.dev_groups = new_udriver->dev_groups;
retval = driver_register(&new_udriver->drvwrap.driver);
@@ -954,6 +955,7 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
new_driver->drvwrap.driver.remove = usb_unbind_interface;
new_driver->drvwrap.driver.owner = owner;
new_driver->drvwrap.driver.mod_name = mod_name;
+ new_driver->drvwrap.driver.dev_groups = new_driver->dev_groups;
spin_lock_init(&new_driver->dynids.lock);
INIT_LIST_HEAD(&new_driver->dynids.list);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f73b672..558890ada0e5 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
intf->minor = minor;
break;
}
- up_write(&minor_rwsem);
- if (intf->minor < 0)
+ if (intf->minor < 0) {
+ up_write(&minor_rwsem);
return -EXFULL;
+ }
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
- down_write(&minor_rwsem);
usb_minors[minor] = NULL;
intf->minor = -1;
- up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
+ up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
intf->usb_dev = NULL;
intf->minor = -1;
destroy_usb_class();
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 1ac9c1e5f773..38f8b3e31762 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -257,6 +257,8 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
else
rc = usb_port_suspend(udev, msg);
+ if (rc == 0)
+ usbfs_notify_suspend(udev);
return rc;
}
@@ -273,6 +275,9 @@ static int generic_resume(struct usb_device *udev, pm_message_t msg)
rc = hcd_bus_resume(udev, msg);
else
rc = usb_port_resume(udev, msg);
+
+ if (rc == 0)
+ usbfs_notify_resume(udev);
return rc;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 03432467b05f..9e26b0143a59 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* EHCI, OHCI */
hcd->rsrc_start = pci_resource_start(dev, 0);
hcd->rsrc_len = pci_resource_len(dev, 0);
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description)) {
+ if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len, driver->description)) {
dev_dbg(&dev->dev, "controller already in use\n");
retval = -EBUSY;
goto put_hcd;
}
- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
retval = -EFAULT;
- goto release_mem_region;
+ goto put_hcd;
}
} else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
hcd->rsrc_start = pci_resource_start(dev, region);
hcd->rsrc_len = pci_resource_len(dev, region);
- if (request_region(hcd->rsrc_start, hcd->rsrc_len,
- driver->description))
+ if (devm_request_region(&dev->dev, hcd->rsrc_start,
+ hcd->rsrc_len, driver->description))
break;
}
if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
if (retval != 0)
- goto unmap_registers;
+ goto put_hcd;
device_wakeup_enable(hcd->self.controller);
if (pci_dev_run_wake(dev))
pm_runtime_put_noidle(&dev->dev);
return retval;
-unmap_registers:
- if (driver->flags & HCD_MEMORY) {
- iounmap(hcd->regs);
-release_mem_region:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- } else
- release_region(hcd->rsrc_start, hcd->rsrc_len);
put_hcd:
usb_put_hcd(hcd);
disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
dev_set_drvdata(&dev->dev, NULL);
up_read(&companions_rwsem);
}
-
- if (hcd->driver->flags & HCD_MEMORY) {
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- } else {
- release_region(hcd->rsrc_start, hcd->rsrc_len);
- }
-
usb_put_hcd(hcd);
pci_disable_device(dev);
}
@@ -407,8 +393,7 @@ static inline void powermac_set_asic(struct pci_dev *pci_dev, int enable)
static int check_root_hub_suspended(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
if (HCD_RH_RUNNING(hcd)) {
dev_warn(dev, "Root hub is not suspended\n");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 88533938ce19..f225eaa98ff8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -103,11 +103,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
/* wait queue for synchronous unlinks */
DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
-static inline int is_root_hub(struct usb_device *udev)
-{
- return (udev->parent == NULL);
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -880,101 +875,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
-
-/*
- * Show & store the current value of authorized_default
- */
-static ssize_t authorized_default_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_device *rh_usb_dev = to_usb_device(dev);
- struct usb_bus *usb_bus = rh_usb_dev->bus;
- struct usb_hcd *hcd;
-
- hcd = bus_to_hcd(usb_bus);
- return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
-}
-
-static ssize_t authorized_default_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- ssize_t result;
- unsigned val;
- struct usb_device *rh_usb_dev = to_usb_device(dev);
- struct usb_bus *usb_bus = rh_usb_dev->bus;
- struct usb_hcd *hcd;
-
- hcd = bus_to_hcd(usb_bus);
- result = sscanf(buf, "%u\n", &val);
- if (result == 1) {
- hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
- val : USB_DEVICE_AUTHORIZE_ALL;
- result = size;
- } else {
- result = -EINVAL;
- }
- return result;
-}
-static DEVICE_ATTR_RW(authorized_default);
-
-/*
- * interface_authorized_default_show - show default authorization status
- * for USB interfaces
- *
- * note: interface_authorized_default is the default value
- * for initializing the authorized attribute of interfaces
- */
-static ssize_t interface_authorized_default_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_device *usb_dev = to_usb_device(dev);
- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
-
- return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
-}
-
-/*
- * interface_authorized_default_store - store default authorization status
- * for USB interfaces
- *
- * note: interface_authorized_default is the default value
- * for initializing the authorized attribute of interfaces
- */
-static ssize_t interface_authorized_default_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct usb_device *usb_dev = to_usb_device(dev);
- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
- int rc = count;
- bool val;
-
- if (strtobool(buf, &val) != 0)
- return -EINVAL;
-
- if (val)
- set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
- else
- clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
-
- return rc;
-}
-static DEVICE_ATTR_RW(interface_authorized_default);
-
-/* Group all the USB bus attributes */
-static struct attribute *usb_bus_attrs[] = {
- &dev_attr_authorized_default.attr,
- &dev_attr_interface_authorized_default.attr,
- NULL,
-};
-
-static const struct attribute_group usb_bus_attr_group = {
- .name = NULL, /* we want them in the same directory */
- .attrs = usb_bus_attrs,
-};
-
-
-
/*-------------------------------------------------------------------------*/
/**
@@ -1349,9 +1249,6 @@ EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
* To support host controllers with limited dma capabilities we provide dma
* bounce buffers. This feature can be enabled by initializing
* hcd->localmem_pool using usb_hcd_setup_local_mem().
- * For this to work properly the host controller code must first use the
- * function dma_declare_coherent_memory() to point out which memory area
- * that should be used for dma allocations.
*
* The initialized hcd->localmem_pool then tells the usb code to allocate all
* data for dma using the genalloc API.
@@ -1512,7 +1409,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (is_vmalloc_addr(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is not dma capable\n");
return -EAGAIN;
@@ -1546,7 +1443,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (urb->num_sgs) {
int n;
@@ -2291,6 +2188,9 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
hcd->state = HC_STATE_RESUMING;
status = hcd->driver->bus_resume(hcd);
clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
+ if (status == 0)
+ status = usb_phy_roothub_calibrate(hcd->phy_roothub);
+
if (status == 0) {
struct usb_device *udev;
int port1;
@@ -2554,7 +2454,6 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->self.controller = dev;
hcd->self.sysdev = sysdev;
hcd->self.bus_name = bus_name;
- hcd->self.uses_dma = (sysdev->dma_mask != NULL);
timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
@@ -2864,6 +2763,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
hcd->rh_pollable = 1;
+ retval = usb_phy_roothub_calibrate(hcd->phy_roothub);
+ if (retval)
+ goto err_hcd_driver_setup;
+
/* NOTE: root hub and controller capabilities may not be the same */
if (device_can_wakeup(hcd->self.controller)
&& device_can_wakeup(&hcd->self.root_hub->dev))
@@ -2894,32 +2797,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
if (retval != 0)
goto err_register_root_hub;
- retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
- if (retval < 0) {
- printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
- retval);
- goto error_create_attr_group;
- }
if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
return retval;
-error_create_attr_group:
- clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
- if (HC_IS_RUNNING(hcd->state))
- hcd->state = HC_STATE_QUIESCING;
- spin_lock_irq(&hcd_root_hub_lock);
- hcd->rh_registered = 0;
- spin_unlock_irq(&hcd_root_hub_lock);
-
-#ifdef CONFIG_PM
- cancel_work_sync(&hcd->wakeup_work);
-#endif
- cancel_work_sync(&hcd->died_work);
- mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
- mutex_unlock(&usb_bus_idr_lock);
err_register_root_hub:
hcd->rh_pollable = 0;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2963,8 +2845,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
usb_get_dev(rhdev);
- sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group);
-
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
if (HC_IS_RUNNING (hcd->state))
hcd->state = HC_STATE_QUIESCING;
@@ -3052,8 +2932,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
size, MEMREMAP_WC);
- if (!local_mem)
- return -ENOMEM;
+ if (IS_ERR(local_mem))
+ return PTR_ERR(local_mem);
/*
* Here we pass a dma_addr_t but the arg type is a phys_addr_t.
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e844bb7b5676..5adf489428aa 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 7580493b867a..fb1588e7c282 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -151,6 +151,27 @@ err_out:
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode);
+int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_calibrate(roothub_entry->phy);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate);
+
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index dad564e2d2d4..20a267cd986b 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -18,6 +18,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
enum phy_mode mode);
+int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 1a06a4b5fbb1..bbbb35fa639f 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -285,6 +285,14 @@ static int usb_port_runtime_suspend(struct device *dev)
}
#endif
+static void usb_port_shutdown(struct device *dev)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ if (port_dev->child)
+ usb_disable_usb2_hardware_lpm(port_dev->child);
+}
+
static const struct dev_pm_ops usb_port_pm_ops = {
#ifdef CONFIG_PM
.runtime_suspend = usb_port_runtime_suspend,
@@ -301,6 +309,7 @@ struct device_type usb_port_device_type = {
static struct device_driver usb_port_driver = {
.name = "usb",
.owner = THIS_MODULE,
+ .shutdown = usb_port_shutdown,
};
static int link_peers(struct usb_port *left, struct usb_port *right)
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7e88fdfe3cf5..f19694e69f5c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/usb/quirks.h>
#include <linux/of.h>
#include "usb.h"
@@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = {
.size = 18 + 65535, /* dev descr + max-size raw descriptor */
};
+/*
+ * Show & store the current value of authorized_default
+ */
+static ssize_t authorized_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *rh_usb_dev = to_usb_device(dev);
+ struct usb_bus *usb_bus = rh_usb_dev->bus;
+ struct usb_hcd *hcd;
+
+ hcd = bus_to_hcd(usb_bus);
+ return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
+}
+
+static ssize_t authorized_default_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ unsigned int val;
+ struct usb_device *rh_usb_dev = to_usb_device(dev);
+ struct usb_bus *usb_bus = rh_usb_dev->bus;
+ struct usb_hcd *hcd;
+
+ hcd = bus_to_hcd(usb_bus);
+ result = sscanf(buf, "%u\n", &val);
+ if (result == 1) {
+ hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
+ val : USB_DEVICE_AUTHORIZE_ALL;
+ result = size;
+ } else {
+ result = -EINVAL;
+ }
+ return result;
+}
+static DEVICE_ATTR_RW(authorized_default);
+
+/*
+ * interface_authorized_default_show - show default authorization status
+ * for USB interfaces
+ *
+ * note: interface_authorized_default is the default value
+ * for initializing the authorized attribute of interfaces
+ */
+static ssize_t interface_authorized_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *usb_dev = to_usb_device(dev);
+ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+
+ return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
+}
+
+/*
+ * interface_authorized_default_store - store default authorization status
+ * for USB interfaces
+ *
+ * note: interface_authorized_default is the default value
+ * for initializing the authorized attribute of interfaces
+ */
+static ssize_t interface_authorized_default_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_device *usb_dev = to_usb_device(dev);
+ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+ int rc = count;
+ bool val;
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ if (val)
+ set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(interface_authorized_default);
+
+/* Group all the USB bus attributes */
+static struct attribute *usb_bus_attrs[] = {
+ &dev_attr_authorized_default.attr,
+ &dev_attr_interface_authorized_default.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_bus_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = usb_bus_attrs,
+};
+
+
+static int add_default_authorized_attributes(struct device *dev)
+{
+ int rc = 0;
+
+ if (is_usb_device(dev))
+ rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group);
+
+ return rc;
+}
+
+static void remove_default_authorized_attributes(struct device *dev)
+{
+ if (is_usb_device(dev)) {
+ sysfs_remove_group(&dev->kobj, &usb_bus_attr_group);
+ }
+}
+
int usb_create_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
@@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
retval = add_power_attributes(dev);
if (retval)
goto error;
+
+ if (is_root_hub(udev)) {
+ retval = add_default_authorized_attributes(dev);
+ if (retval)
+ goto error;
+ }
return retval;
+
error:
usb_remove_sysfs_dev_files(udev);
return retval;
@@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
+ if (is_root_hub(udev))
+ remove_default_authorized_attributes(dev);
+
remove_power_attributes(dev);
remove_persist_attributes(dev);
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0ab8738047da..f16c26dc079d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -933,228 +933,6 @@ void usb_free_coherent(struct usb_device *dev, size_t size, void *addr,
}
EXPORT_SYMBOL_GPL(usb_free_coherent);
-/**
- * usb_buffer_map - create DMA mapping(s) for an urb
- * @urb: urb whose transfer_buffer/setup_packet will be mapped
- *
- * URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation
- * succeeds. If the device is connected to this system through a non-DMA
- * controller, this operation always succeeds.
- *
- * This call would normally be used for an urb which is reused, perhaps
- * as the target of a large periodic transfer, with usb_buffer_dmasync()
- * calls to synchronize memory and dma state.
- *
- * Reverse the effect of this call with usb_buffer_unmap().
- *
- * Return: Either %NULL (indicating no buffer could be mapped), or @urb.
- *
- */
-#if 0
-struct urb *usb_buffer_map(struct urb *urb)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!urb
- || !urb->dev
- || !(bus = urb->dev->bus)
- || !(controller = bus->sysdev))
- return NULL;
-
- if (controller->dma_mask) {
- urb->transfer_dma = dma_map_single(controller,
- urb->transfer_buffer, urb->transfer_buffer_length,
- usb_pipein(urb->pipe)
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- /* FIXME generic api broken like pci, can't report errors */
- /* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */
- } else
- urb->transfer_dma = ~0;
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- return urb;
-}
-EXPORT_SYMBOL_GPL(usb_buffer_map);
-#endif /* 0 */
-
-/* XXX DISABLED, no users currently. If you wish to re-enable this
- * XXX please determine whether the sync is to transfer ownership of
- * XXX the buffer from device to cpu or vice verse, and thusly use the
- * XXX appropriate _for_{cpu,device}() method. -DaveM
- */
-#if 0
-
-/**
- * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
- * @urb: urb whose transfer_buffer/setup_packet will be synchronized
- */
-void usb_buffer_dmasync(struct urb *urb)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!urb
- || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
- || !urb->dev
- || !(bus = urb->dev->bus)
- || !(controller = bus->sysdev))
- return;
-
- if (controller->dma_mask) {
- dma_sync_single_for_cpu(controller,
- urb->transfer_dma, urb->transfer_buffer_length,
- usb_pipein(urb->pipe)
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (usb_pipecontrol(urb->pipe))
- dma_sync_single_for_cpu(controller,
- urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- }
-}
-EXPORT_SYMBOL_GPL(usb_buffer_dmasync);
-#endif
-
-/**
- * usb_buffer_unmap - free DMA mapping(s) for an urb
- * @urb: urb whose transfer_buffer will be unmapped
- *
- * Reverses the effect of usb_buffer_map().
- */
-#if 0
-void usb_buffer_unmap(struct urb *urb)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!urb
- || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
- || !urb->dev
- || !(bus = urb->dev->bus)
- || !(controller = bus->sysdev))
- return;
-
- if (controller->dma_mask) {
- dma_unmap_single(controller,
- urb->transfer_dma, urb->transfer_buffer_length,
- usb_pipein(urb->pipe)
- ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
- urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
-}
-EXPORT_SYMBOL_GPL(usb_buffer_unmap);
-#endif /* 0 */
-
-#if 0
-/**
- * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
- * @dev: device to which the scatterlist will be mapped
- * @is_in: mapping transfer direction
- * @sg: the scatterlist to map
- * @nents: the number of entries in the scatterlist
- *
- * Return: Either < 0 (indicating no buffers could be mapped), or the
- * number of DMA mapping array entries in the scatterlist.
- *
- * Note:
- * The caller is responsible for placing the resulting DMA addresses from
- * the scatterlist into URB transfer buffer pointers, and for setting the
- * URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs.
- *
- * Top I/O rates come from queuing URBs, instead of waiting for each one
- * to complete before starting the next I/O. This is particularly easy
- * to do with scatterlists. Just allocate and submit one URB for each DMA
- * mapping entry returned, stopping on the first error or when all succeed.
- * Better yet, use the usb_sg_*() calls, which do that (and more) for you.
- *
- * This call would normally be used when translating scatterlist requests,
- * rather than usb_buffer_map(), since on some hardware (with IOMMUs) it
- * may be able to coalesce mappings for improved I/O efficiency.
- *
- * Reverse the effect of this call with usb_buffer_unmap_sg().
- */
-int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int nents)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!dev
- || !(bus = dev->bus)
- || !(controller = bus->sysdev)
- || !controller->dma_mask)
- return -EINVAL;
-
- /* FIXME generic api broken like pci, can't report errors */
- return dma_map_sg(controller, sg, nents,
- is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
-#endif
-
-/* XXX DISABLED, no users currently. If you wish to re-enable this
- * XXX please determine whether the sync is to transfer ownership of
- * XXX the buffer from device to cpu or vice verse, and thusly use the
- * XXX appropriate _for_{cpu,device}() method. -DaveM
- */
-#if 0
-
-/**
- * usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s)
- * @dev: device to which the scatterlist will be mapped
- * @is_in: mapping transfer direction
- * @sg: the scatterlist to synchronize
- * @n_hw_ents: the positive return value from usb_buffer_map_sg
- *
- * Use this when you are re-using a scatterlist's data buffers for
- * another USB request.
- */
-void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!dev
- || !(bus = dev->bus)
- || !(controller = bus->sysdev)
- || !controller->dma_mask)
- return;
-
- dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
- is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
-#endif
-
-#if 0
-/**
- * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
- * @dev: device to which the scatterlist will be mapped
- * @is_in: mapping transfer direction
- * @sg: the scatterlist to unmap
- * @n_hw_ents: the positive return value from usb_buffer_map_sg
- *
- * Reverses the effect of usb_buffer_map_sg().
- */
-void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents)
-{
- struct usb_bus *bus;
- struct device *controller;
-
- if (!dev
- || !(bus = dev->bus)
- || !(controller = bus->sysdev)
- || !controller->dma_mask)
- return;
-
- dma_unmap_sg(controller, sg, n_hw_ents,
- is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
-#endif
-
/*
* Notifications of device and interface registration
*/
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bd8d01f85a13..cf4783cf661a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -95,6 +95,9 @@ extern int usb_runtime_idle(struct device *dev);
extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
+extern void usbfs_notify_suspend(struct usb_device *udev);
+extern void usbfs_notify_resume(struct usb_device *udev);
+
#else
static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
@@ -153,6 +156,11 @@ static inline int is_usb_port(const struct device *dev)
return dev->type == &usb_port_device_type;
}
+static inline int is_root_hub(struct usb_device *udev)
+{
+ return (udev->parent == NULL);
+}
+
/* Do the same for device drivers and interface drivers. */
static inline int is_usb_device_driver(struct device_driver *drv)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bff48a8a1984..6be10e496e10 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3224,14 +3224,15 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *ep,
int result)
{
- struct dwc2_hsotg_req *req, *treq;
unsigned int size;
ep->req = NULL;
- list_for_each_entry_safe(req, treq, &ep->queue, queue)
- dwc2_hsotg_complete_request(hsotg, ep, req,
- result);
+ while (!list_empty(&ep->queue)) {
+ struct dwc2_hsotg_req *req = get_ep_head(ep);
+
+ dwc2_hsotg_complete_request(hsotg, ep, req, result);
+ }
if (!hsotg->dedicated_fifos)
return;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index ee144ff8af5b..81afe553aa66 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
buf = urb->transfer_buffer;
- if (hcd->self.uses_dma) {
+ if (hcd_uses_dma(hcd)) {
if (!buf && (urb->transfer_dma & 3)) {
dev_err(hsotg->dev,
"%s: unaligned transfer with no transfer_buffer",
@@ -5062,13 +5062,13 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
dwc2_hc_driver.reset_device = dwc2_reset_device;
}
+ if (hsotg->params.host_dma)
+ dwc2_hc_driver.flags |= HCD_DMA;
+
hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
if (!hcd)
goto error1;
- if (!hsotg->params.host_dma)
- hcd->self.uses_dma = 0;
-
hcd->has_tt = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 55f841a54015..31e090ac9f1e 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -404,10 +404,7 @@ static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
device_property_read_u32(hsotg->dev, "g-np-tx-fifo-size",
&p->g_np_tx_fifo_size);
- num = device_property_read_u32_array(hsotg->dev,
- "g-tx-fifo-size",
- NULL, 0);
-
+ num = device_property_count_u32(hsotg->dev, "g-tx-fifo-size");
if (num > 0) {
num = min(num, 15);
memset(p->g_tx_fifo_size, 0,
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 80fd3c6dcd1c..3c6ce09a6db5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -407,10 +407,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
spin_lock_init(&hsotg->lock);
hsotg->irq = platform_get_irq(dev, 0);
- if (hsotg->irq < 0) {
- dev_err(&dev->dev, "missing IRQ resource\n");
+ if (hsotg->irq < 0)
return hsotg->irq;
- }
dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
hsotg->irq);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c9bb93a2c81e..999ce5e84d3c 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -168,7 +168,6 @@ static void __dwc3_set_mode(struct work_struct *work)
otg_set_vbus(dwc->usb2_phy->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
- phy_calibrate(dwc->usb2_generic_phy);
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -252,12 +251,25 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
reg |= DWC3_DCTL_CSFTRST;
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ /*
+ * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
+ * is cleared only after all the clocks are synchronized. This can
+ * take a little more than 50ms. Set the polling rate at 20ms
+ * for 10 times instead.
+ */
+ if (dwc3_is_usb31(dwc) && dwc->revision >= DWC3_USB31_REVISION_190A)
+ retries = 10;
+
do {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (!(reg & DWC3_DCTL_CSFTRST))
goto done;
- udelay(1);
+ if (dwc3_is_usb31(dwc) &&
+ dwc->revision >= DWC3_USB31_REVISION_190A)
+ msleep(20);
+ else
+ udelay(1);
} while (--retries);
phy_exit(dwc->usb3_generic_phy);
@@ -267,11 +279,11 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
done:
/*
- * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared,
- * we must wait at least 50ms before accessing the PHY domain
- * (synchronization delay). DWC_usb31 programming guide section 1.3.2.
+ * For DWC_usb31 controller 1.80a and prior, once DCTL.CSFRST bit
+ * is cleared, we must wait at least 50ms before accessing the PHY
+ * domain (synchronization delay).
*/
- if (dwc3_is_usb31(dwc))
+ if (dwc3_is_usb31(dwc) && dwc->revision <= DWC3_USB31_REVISION_180A)
msleep(50);
return 0;
@@ -686,8 +698,7 @@ static void dwc3_core_exit(struct dwc3 *dwc)
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
- clk_bulk_disable(dwc->num_clks, dwc->clks);
- clk_bulk_unprepare(dwc->num_clks, dwc->clks);
+ clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
reset_control_assert(dwc->reset);
}
@@ -813,8 +824,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
* result = 1, means INCRx burst mode supported.
* result > 1, means undefined length burst mode supported.
*/
- ntype = device_property_read_u32_array(dev,
- "snps,incr-burst-type-adjustment", NULL, 0);
+ ntype = device_property_count_u32(dev, "snps,incr-burst-type-adjustment");
if (ntype <= 0)
return;
@@ -1166,7 +1176,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dev_err(dev, "failed to initialize host\n");
return ret;
}
- phy_calibrate(dwc->usb2_generic_phy);
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
@@ -1310,8 +1319,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
- dwc->hird_threshold = hird_threshold
- | (dwc->is_utmi_l1_suspend << 4);
+ dwc->hird_threshold = hird_threshold;
dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
dwc->rx_max_burst_prd = rx_max_burst_prd;
@@ -1436,7 +1444,7 @@ static int dwc3_probe(struct platform_device *pdev)
if (dev->of_node) {
dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
- ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+ ret = devm_clk_bulk_get(dev, dwc->num_clks, dwc->clks);
if (ret == -EPROBE_DEFER)
return ret;
/*
@@ -1449,16 +1457,12 @@ static int dwc3_probe(struct platform_device *pdev)
ret = reset_control_deassert(dwc->reset);
if (ret)
- goto put_clks;
+ return ret;
- ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
+ ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
- ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
- if (ret)
- goto unprepare_clks;
-
if (!dwc3_core_is_valid(dwc)) {
dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
ret = -ENODEV;
@@ -1531,13 +1535,9 @@ err1:
pm_runtime_disable(&pdev->dev);
disable_clks:
- clk_bulk_disable(dwc->num_clks, dwc->clks);
-unprepare_clks:
- clk_bulk_unprepare(dwc->num_clks, dwc->clks);
+ clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
-put_clks:
- clk_bulk_put(dwc->num_clks, dwc->clks);
return ret;
}
@@ -1560,7 +1560,6 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_free_event_buffers(dwc);
dwc3_free_scratch_buffers(dwc);
- clk_bulk_put(dwc->num_clks, dwc->clks);
return 0;
}
@@ -1574,14 +1573,10 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
if (ret)
return ret;
- ret = clk_bulk_prepare(dwc->num_clks, dwc->clks);
+ ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
- ret = clk_bulk_enable(dwc->num_clks, dwc->clks);
- if (ret)
- goto unprepare_clks;
-
ret = dwc3_core_init(dwc);
if (ret)
goto disable_clks;
@@ -1589,9 +1584,7 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
return 0;
disable_clks:
- clk_bulk_disable(dwc->num_clks, dwc->clks);
-unprepare_clks:
- clk_bulk_unprepare(dwc->num_clks, dwc->clks);
+ clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 3dd783b889cb..1c8b349379af 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1137,6 +1137,8 @@ struct dwc3 {
#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
#define DWC3_USB31_REVISION_160A (0x3136302a | DWC3_REVISION_IS_DWC31)
#define DWC3_USB31_REVISION_170A (0x3137302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_180A (0x3138302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_190A (0x3139302a | DWC3_REVISION_IS_DWC31)
u32 version_type;
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 068259fdfb0c..9baabed87d61 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -246,258 +246,6 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size,
return str;
}
-static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str,
- size_t size)
-{
- switch (t & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- snprintf(str, size, "Get Device Status(Length = %d)", l);
- break;
- case USB_RECIP_INTERFACE:
- snprintf(str, size, "Get Interface Status(Intf = %d, Length = %d)",
- i, l);
- break;
- case USB_RECIP_ENDPOINT:
- snprintf(str, size, "Get Endpoint Status(ep%d%s)",
- i & ~USB_DIR_IN,
- i & USB_DIR_IN ? "in" : "out");
- break;
- }
-}
-
-static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
- __u16 i, char *str, size_t size)
-{
- switch (t & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- snprintf(str, size, "%s Device Feature(%s%s)",
- b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
- ({char *s;
- switch (v) {
- case USB_DEVICE_SELF_POWERED:
- s = "Self Powered";
- break;
- case USB_DEVICE_REMOTE_WAKEUP:
- s = "Remote Wakeup";
- break;
- case USB_DEVICE_TEST_MODE:
- s = "Test Mode";
- break;
- case USB_DEVICE_U1_ENABLE:
- s = "U1 Enable";
- break;
- case USB_DEVICE_U2_ENABLE:
- s = "U2 Enable";
- break;
- case USB_DEVICE_LTM_ENABLE:
- s = "LTM Enable";
- break;
- default:
- s = "UNKNOWN";
- } s; }),
- v == USB_DEVICE_TEST_MODE ?
- ({ char *s;
- switch (i) {
- case TEST_J:
- s = ": TEST_J";
- break;
- case TEST_K:
- s = ": TEST_K";
- break;
- case TEST_SE0_NAK:
- s = ": TEST_SE0_NAK";
- break;
- case TEST_PACKET:
- s = ": TEST_PACKET";
- break;
- case TEST_FORCE_EN:
- s = ": TEST_FORCE_EN";
- break;
- default:
- s = ": UNKNOWN";
- } s; }) : "");
- break;
- case USB_RECIP_INTERFACE:
- snprintf(str, size, "%s Interface Feature(%s)",
- b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
- v == USB_INTRF_FUNC_SUSPEND ?
- "Function Suspend" : "UNKNOWN");
- break;
- case USB_RECIP_ENDPOINT:
- snprintf(str, size, "%s Endpoint Feature(%s ep%d%s)",
- b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
- v == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN",
- i & ~USB_DIR_IN,
- i & USB_DIR_IN ? "in" : "out");
- break;
- }
-}
-
-static inline void dwc3_decode_set_address(__u16 v, char *str, size_t size)
-{
- snprintf(str, size, "Set Address(Addr = %02x)", v);
-}
-
-static inline void dwc3_decode_get_set_descriptor(__u8 t, __u8 b, __u16 v,
- __u16 i, __u16 l, char *str, size_t size)
-{
- snprintf(str, size, "%s %s Descriptor(Index = %d, Length = %d)",
- b == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set",
- ({ char *s;
- switch (v >> 8) {
- case USB_DT_DEVICE:
- s = "Device";
- break;
- case USB_DT_CONFIG:
- s = "Configuration";
- break;
- case USB_DT_STRING:
- s = "String";
- break;
- case USB_DT_INTERFACE:
- s = "Interface";
- break;
- case USB_DT_ENDPOINT:
- s = "Endpoint";
- break;
- case USB_DT_DEVICE_QUALIFIER:
- s = "Device Qualifier";
- break;
- case USB_DT_OTHER_SPEED_CONFIG:
- s = "Other Speed Config";
- break;
- case USB_DT_INTERFACE_POWER:
- s = "Interface Power";
- break;
- case USB_DT_OTG:
- s = "OTG";
- break;
- case USB_DT_DEBUG:
- s = "Debug";
- break;
- case USB_DT_INTERFACE_ASSOCIATION:
- s = "Interface Association";
- break;
- case USB_DT_BOS:
- s = "BOS";
- break;
- case USB_DT_DEVICE_CAPABILITY:
- s = "Device Capability";
- break;
- case USB_DT_PIPE_USAGE:
- s = "Pipe Usage";
- break;
- case USB_DT_SS_ENDPOINT_COMP:
- s = "SS Endpoint Companion";
- break;
- case USB_DT_SSP_ISOC_ENDPOINT_COMP:
- s = "SSP Isochronous Endpoint Companion";
- break;
- default:
- s = "UNKNOWN";
- break;
- } s; }), v & 0xff, l);
-}
-
-
-static inline void dwc3_decode_get_configuration(__u16 l, char *str,
- size_t size)
-{
- snprintf(str, size, "Get Configuration(Length = %d)", l);
-}
-
-static inline void dwc3_decode_set_configuration(__u8 v, char *str, size_t size)
-{
- snprintf(str, size, "Set Configuration(Config = %d)", v);
-}
-
-static inline void dwc3_decode_get_intf(__u16 i, __u16 l, char *str,
- size_t size)
-{
- snprintf(str, size, "Get Interface(Intf = %d, Length = %d)", i, l);
-}
-
-static inline void dwc3_decode_set_intf(__u8 v, __u16 i, char *str, size_t size)
-{
- snprintf(str, size, "Set Interface(Intf = %d, Alt.Setting = %d)", i, v);
-}
-
-static inline void dwc3_decode_synch_frame(__u16 i, __u16 l, char *str,
- size_t size)
-{
- snprintf(str, size, "Synch Frame(Endpoint = %d, Length = %d)", i, l);
-}
-
-static inline void dwc3_decode_set_sel(__u16 l, char *str, size_t size)
-{
- snprintf(str, size, "Set SEL(Length = %d)", l);
-}
-
-static inline void dwc3_decode_set_isoch_delay(__u8 v, char *str, size_t size)
-{
- snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", v);
-}
-
-/**
- * dwc3_decode_ctrl - returns a string represetion of ctrl request
- */
-static inline const char *dwc3_decode_ctrl(char *str, size_t size,
- __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex,
- __u16 wLength)
-{
- switch (bRequest) {
- case USB_REQ_GET_STATUS:
- dwc3_decode_get_status(bRequestType, wIndex, wLength, str,
- size);
- break;
- case USB_REQ_CLEAR_FEATURE:
- case USB_REQ_SET_FEATURE:
- dwc3_decode_set_clear_feature(bRequestType, bRequest, wValue,
- wIndex, str, size);
- break;
- case USB_REQ_SET_ADDRESS:
- dwc3_decode_set_address(wValue, str, size);
- break;
- case USB_REQ_GET_DESCRIPTOR:
- case USB_REQ_SET_DESCRIPTOR:
- dwc3_decode_get_set_descriptor(bRequestType, bRequest, wValue,
- wIndex, wLength, str, size);
- break;
- case USB_REQ_GET_CONFIGURATION:
- dwc3_decode_get_configuration(wLength, str, size);
- break;
- case USB_REQ_SET_CONFIGURATION:
- dwc3_decode_set_configuration(wValue, str, size);
- break;
- case USB_REQ_GET_INTERFACE:
- dwc3_decode_get_intf(wIndex, wLength, str, size);
- break;
- case USB_REQ_SET_INTERFACE:
- dwc3_decode_set_intf(wValue, wIndex, str, size);
- break;
- case USB_REQ_SYNCH_FRAME:
- dwc3_decode_synch_frame(wIndex, wLength, str, size);
- break;
- case USB_REQ_SET_SEL:
- dwc3_decode_set_sel(wLength, str, size);
- break;
- case USB_REQ_SET_ISOCH_DELAY:
- dwc3_decode_set_isoch_delay(wValue, str, size);
- break;
- default:
- snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
- bRequestType, bRequest,
- cpu_to_le16(wValue) & 0xff,
- cpu_to_le16(wValue) >> 8,
- cpu_to_le16(wIndex) & 0xff,
- cpu_to_le16(wIndex) >> 8,
- cpu_to_le16(wLength) & 0xff,
- cpu_to_le16(wLength) >> 8);
- }
-
- return str;
-}
-
/**
* dwc3_ep_event_string - returns event name
* @event: then event code
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index cbee5fb9b9fb..1e14a6f4884b 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -81,7 +81,6 @@ static int kdwc3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_keystone *kdwc;
- struct resource *res;
int error, irq;
kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
@@ -92,8 +91,7 @@ static int kdwc3_probe(struct platform_device *pdev)
kdwc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kdwc->usbss = devm_ioremap_resource(dev, res);
+ kdwc->usbss = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
@@ -112,7 +110,6 @@ static int kdwc3_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "missing irq\n");
error = irq;
goto err_irq;
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index bca7e92a10e9..8a3ec1a951fe 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -386,7 +386,6 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *base;
- struct resource *res;
enum phy_mode otg_id;
int ret, i, irq;
@@ -394,8 +393,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -564,7 +562,13 @@ static int __maybe_unused dwc3_meson_g12a_runtime_resume(struct device *dev)
static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
{
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
- int i;
+ int i, ret;
+
+ if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
+ ret = regulator_disable(priv->vbus);
+ if (ret)
+ return ret;
+ }
for (i = 0 ; i < PHY_COUNT ; ++i) {
phy_power_off(priv->phys[i]);
@@ -599,6 +603,12 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
return ret;
}
+ if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index ed8b86517675..8c3de2d258bf 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -14,7 +14,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/dwc3-omap.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/ioport.h>
@@ -106,6 +105,12 @@
#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2)
#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1)
+enum dwc3_omap_utmi_mode {
+ DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
+ DWC3_OMAP_UTMI_MODE_HW,
+ DWC3_OMAP_UTMI_MODE_SW,
+};
+
struct dwc3_omap {
struct device *dev;
@@ -446,7 +451,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct dwc3_omap *omap;
- struct resource *res;
struct device *dev = &pdev->dev;
struct regulator *vbus_reg = NULL;
@@ -469,13 +473,10 @@ static int dwc3_omap_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, omap);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 16081383c401..c682420f25ca 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -255,24 +255,26 @@ static int st_dwc3_probe(struct platform_device *pdev)
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
- goto undo_softreset;
+ goto err_node_put;
}
/* Allocate and initialize the core */
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto undo_softreset;
+ goto err_node_put;
}
child_pdev = of_find_device_by_node(child);
if (!child_pdev) {
dev_err(dev, "failed to find dwc3 core device\n");
ret = -ENODEV;
- goto undo_softreset;
+ goto err_node_put;
}
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
+ of_node_put(child);
+ of_dev_put(child_pdev);
/*
* Configure the USB port as device or host according to the static
@@ -292,6 +294,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc3_data);
return 0;
+err_node_put:
+ of_node_put(child);
undo_softreset:
reset_control_assert(dwc3_data->rstc_rst);
undo_powerdown:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 173f5329d3d9..8adb59f8e4f1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2078,6 +2078,26 @@ static void dwc3_gadget_config_params(struct usb_gadget *g,
{
struct dwc3 *dwc = gadget_to_dwc(g);
+ params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
+ params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
+
+ /* Recommended BESL */
+ if (!dwc->dis_enblslpm_quirk) {
+ /*
+ * If the recommended BESL baseline is 0 or if the BESL deep is
+ * less than 2, Microsoft's Windows 10 host usb stack will issue
+ * a usb reset immediately after it receives the extended BOS
+ * descriptor and the enumeration will fail. To maintain
+ * compatibility with the Windows' usb stack, let's set the
+ * recommended BESL baseline to 1 and clamp the BESL deep to be
+ * within 2 to 15.
+ */
+ params->besl_baseline = 1;
+ if (dwc->is_utmi_l1_suspend)
+ params->besl_deep =
+ clamp_t(u8, dwc->hird_threshold, 2, 15);
+ }
+
/* U1 Device exit Latency */
if (dwc->dis_u1_entry_quirk)
params->bU1devExitLat = 0;
@@ -2868,7 +2888,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
- reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
+ reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
+ (dwc->is_utmi_l1_suspend << 4));
/*
* When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
@@ -3318,7 +3339,6 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
- dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
dwc->gadget.lpm_capable = true;
/*
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f55947294f7c..8deea8c91e03 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -85,7 +85,7 @@ int dwc3_host_init(struct dwc3 *dwc)
DWC3_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
- goto err1;
+ goto err;
}
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
@@ -112,37 +112,23 @@ int dwc3_host_init(struct dwc3 *dwc)
ret = platform_device_add_properties(xhci, props);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
- goto err1;
+ goto err;
}
}
- phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(dwc->dev));
- phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(dwc->dev));
-
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
- goto err2;
+ goto err;
}
return 0;
-err2:
- phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(dwc->dev));
- phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(dwc->dev));
-err1:
+err:
platform_device_put(xhci);
return ret;
}
void dwc3_host_exit(struct dwc3 *dwc)
{
- phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
- dev_name(dwc->dev));
- phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
- dev_name(dwc->dev));
platform_device_unregister(dwc->xhci);
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 818a63da1a44..9edff17111f7 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -86,7 +86,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
__entry->wIndex = le16_to_cpu(ctrl->wIndex);
__entry->wLength = le16_to_cpu(ctrl->wLength);
),
- TP_printk("%s", dwc3_decode_ctrl(__get_str(str), DWC3_MSG_MAX,
+ TP_printk("%s", usb_decode_ctrl(__get_str(str), DWC3_MSG_MAX,
__entry->bRequestType,
__entry->bRequest, __entry->wValue,
__entry->wIndex, __entry->wLength)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9118b42c70b6..d516e8d6cd7f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -612,6 +612,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
struct usb_ext_cap_descriptor *usb_ext;
struct usb_dcd_config_params dcd_config_params;
struct usb_bos_descriptor *bos = cdev->req->buf;
+ unsigned int besl = 0;
bos->bLength = USB_DT_BOS_SIZE;
bos->bDescriptorType = USB_DT_BOS;
@@ -619,6 +620,29 @@ static int bos_desc(struct usb_composite_dev *cdev)
bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
bos->bNumDeviceCaps = 0;
+ /* Get Controller configuration */
+ if (cdev->gadget->ops->get_config_params) {
+ cdev->gadget->ops->get_config_params(cdev->gadget,
+ &dcd_config_params);
+ } else {
+ dcd_config_params.besl_baseline =
+ USB_DEFAULT_BESL_UNSPECIFIED;
+ dcd_config_params.besl_deep =
+ USB_DEFAULT_BESL_UNSPECIFIED;
+ dcd_config_params.bU1devExitLat =
+ USB_DEFAULT_U1_DEV_EXIT_LAT;
+ dcd_config_params.bU2DevExitLat =
+ cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+ }
+
+ if (dcd_config_params.besl_baseline != USB_DEFAULT_BESL_UNSPECIFIED)
+ besl = USB_BESL_BASELINE_VALID |
+ USB_SET_BESL_BASELINE(dcd_config_params.besl_baseline);
+
+ if (dcd_config_params.besl_deep != USB_DEFAULT_BESL_UNSPECIFIED)
+ besl |= USB_BESL_DEEP_VALID |
+ USB_SET_BESL_DEEP(dcd_config_params.besl_deep);
+
/*
* A SuperSpeed device shall include the USB2.0 extension descriptor
* and shall support LPM when operating in USB2.0 HS mode.
@@ -629,7 +653,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
+ USB_BESL_SUPPORT | besl);
/*
* The Superspeed USB Capability descriptor shall be implemented by all
@@ -650,17 +675,6 @@ static int bos_desc(struct usb_composite_dev *cdev)
USB_HIGH_SPEED_OPERATION |
USB_5GBPS_OPERATION);
ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
-
- /* Get Controller configuration */
- if (cdev->gadget->ops->get_config_params) {
- cdev->gadget->ops->get_config_params(cdev->gadget,
- &dcd_config_params);
- } else {
- dcd_config_params.bU1devExitLat =
- USB_DEFAULT_U1_DEV_EXIT_LAT;
- dcd_config_params.bU2DevExitLat =
- cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
- }
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
}
@@ -1976,6 +1990,7 @@ void composite_disconnect(struct usb_gadget *gadget)
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
+ cdev->suspended = 0;
if (cdev->config)
reset_config(cdev);
if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 29cc5693e05c..7c96c4665178 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
struct fsg_common {
struct usb_gadget *gadget;
struct usb_composite_dev *cdev;
- struct fsg_dev *fsg, *new_fsg;
+ struct fsg_dev *fsg;
wait_queue_head_t io_wait;
wait_queue_head_t fsg_wait;
@@ -290,6 +290,7 @@ struct fsg_common {
unsigned int bulk_out_maxpacket;
enum fsg_state state; /* For exception handling */
unsigned int exception_req_tag;
+ void *exception_arg;
enum data_direction data_dir;
u32 data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* These routines may be called in process context or in_irq */
-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
+ void *arg)
{
unsigned long flags;
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
if (common->state <= new_state) {
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
+ common->exception_arg = arg;
if (common->thread_task)
send_sig_info(SIGUSR1, SEND_SIG_PRIV,
common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
spin_unlock_irqrestore(&common->lock, flags);
}
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+ __raise_exception(common, new_state, NULL);
+}
/*-------------------------------------------------------------------------*/
@@ -2285,16 +2292,16 @@ reset:
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = fsg;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
return USB_GADGET_DELAYED_STATUS;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
}
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
enum fsg_state old_state;
struct fsg_lun *curlun;
unsigned int exception_req_tag;
+ struct fsg_dev *new_fsg;
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
common->next_buffhd_to_fill = &common->buffhds[0];
common->next_buffhd_to_drain = &common->buffhds[0];
exception_req_tag = common->exception_req_tag;
+ new_fsg = common->exception_arg;
old_state = common->state;
common->state = FSG_STATE_NORMAL;
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
break;
case FSG_STATE_CONFIG_CHANGE:
- do_set_interface(common, common->new_fsg);
- if (common->new_fsg)
+ do_set_interface(common, new_fsg);
+ if (new_fsg)
usb_composite_setup_continue(common->cdev);
break;
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(fsg, "unbind\n");
if (fsg->common->fsg == fsg) {
- fsg->common->new_fsg = NULL;
- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
/* FIXME: make interruptible or killable somehow? */
wait_event(common->fsg_wait, common->fsg != fsg);
}
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index ef0259a950ba..d7e611645533 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -45,7 +45,8 @@ config USB_AT91
config USB_LPC32XX
tristate "LPC32XX USB Peripheral Controller"
- depends on ARCH_LPC32XX && I2C
+ depends on ARCH_LPC32XX
+ depends on I2C
select USB_ISP1301
help
This option selects the USB device controller in the LPC32xx SoC.
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index db3628be38c0..90b134d5dca9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -65,14 +65,16 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
void ast_vhub_nuke(struct ast_vhub_ep *ep, int status)
{
struct ast_vhub_req *req;
-
- EPDBG(ep, "Nuking\n");
+ int count = 0;
/* Beware, lock will be dropped & req-acquired by done() */
while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
ast_vhub_done(ep, req, status);
+ count++;
}
+ if (count)
+ EPDBG(ep, "Nuked %d request(s)\n", count);
}
struct usb_request *ast_vhub_alloc_request(struct usb_ep *u_ep,
@@ -348,7 +350,6 @@ static int ast_vhub_probe(struct platform_device *pdev)
/* Find interrupt and install handler */
vhub->irq = platform_get_irq(pdev, 0);
if (vhub->irq < 0) {
- dev_err(&pdev->dev, "Failed to get interrupt\n");
rc = vhub->irq;
goto err;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 6b1b16b17d7d..4008e7a51188 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -50,11 +50,14 @@ void ast_vhub_dev_irq(struct ast_vhub_dev *d)
static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
{
- u32 reg, hmsk;
+ u32 reg, hmsk, i;
if (d->enabled)
return;
+ /* Cleanup EP0 state */
+ ast_vhub_reset_ep0(d);
+
/* Enable device and its EP0 interrupts */
reg = VHUB_DEV_EN_ENABLE_PORT |
VHUB_DEV_EN_EP0_IN_ACK_IRQEN |
@@ -73,6 +76,19 @@ static void ast_vhub_dev_enable(struct ast_vhub_dev *d)
/* Set EP0 DMA buffer address */
writel(d->ep0.buf_dma, d->regs + AST_VHUB_DEV_EP0_DATA);
+ /* Clear stall on all EPs */
+ for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
+ struct ast_vhub_ep *ep = d->epns[i];
+
+ if (ep && (ep->epn.stalled || ep->epn.wedged)) {
+ ep->epn.stalled = false;
+ ep->epn.wedged = false;
+ ast_vhub_update_epn_stall(ep);
+ }
+ }
+
+ /* Additional cleanups */
+ d->wakeup_en = false;
d->enabled = true;
}
@@ -93,7 +109,6 @@ static void ast_vhub_dev_disable(struct ast_vhub_dev *d)
writel(0, d->regs + AST_VHUB_DEV_EN_CTRL);
d->gadget.speed = USB_SPEED_UNKNOWN;
d->enabled = false;
- d->suspended = false;
}
static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
@@ -201,14 +216,19 @@ int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
u16 wValue, wIndex;
/* No driver, we shouldn't be enabled ... */
- if (!d->driver || !d->enabled || d->suspended) {
+ if (!d->driver || !d->enabled) {
EPDBG(ep,
- "Device is wrong state driver=%p enabled=%d"
- " suspended=%d\n",
- d->driver, d->enabled, d->suspended);
+ "Device is wrong state driver=%p enabled=%d\n",
+ d->driver, d->enabled);
return std_req_stall;
}
+ /*
+ * Note: we used to reject/stall requests while suspended,
+ * we don't do that anymore as we seem to have cases of
+ * mass storage getting very upset.
+ */
+
/* First packet, grab speed */
if (d->gadget.speed == USB_SPEED_UNKNOWN) {
d->gadget.speed = ep->vhub->speed;
@@ -449,8 +469,7 @@ static const struct usb_gadget_ops ast_vhub_udc_ops = {
void ast_vhub_dev_suspend(struct ast_vhub_dev *d)
{
- d->suspended = true;
- if (d->driver) {
+ if (d->driver && d->driver->suspend) {
spin_unlock(&d->vhub->lock);
d->driver->suspend(&d->gadget);
spin_lock(&d->vhub->lock);
@@ -459,8 +478,7 @@ void ast_vhub_dev_suspend(struct ast_vhub_dev *d)
void ast_vhub_dev_resume(struct ast_vhub_dev *d)
{
- d->suspended = false;
- if (d->driver) {
+ if (d->driver && d->driver->resume) {
spin_unlock(&d->vhub->lock);
d->driver->resume(&d->gadget);
spin_lock(&d->vhub->lock);
@@ -469,46 +487,28 @@ void ast_vhub_dev_resume(struct ast_vhub_dev *d)
void ast_vhub_dev_reset(struct ast_vhub_dev *d)
{
- /*
- * If speed is not set, we enable the port. If it is,
- * send reset to the gadget and reset "speed".
- *
- * Speed is an indication that we have got the first
- * setup packet to the device.
- */
- if (d->gadget.speed == USB_SPEED_UNKNOWN && !d->enabled) {
- DDBG(d, "Reset at unknown speed of disabled device, enabling...\n");
- ast_vhub_dev_enable(d);
- d->suspended = false;
+ /* No driver, just disable the device and return */
+ if (!d->driver) {
+ ast_vhub_dev_disable(d);
+ return;
}
- if (d->gadget.speed != USB_SPEED_UNKNOWN && d->driver) {
- unsigned int i;
- DDBG(d, "Reset at known speed of bound device, resetting...\n");
+ /* If the port isn't enabled, just enable it */
+ if (!d->enabled) {
+ DDBG(d, "Reset of disabled device, enabling...\n");
+ ast_vhub_dev_enable(d);
+ } else {
+ DDBG(d, "Reset of enabled device, resetting...\n");
spin_unlock(&d->vhub->lock);
- d->driver->reset(&d->gadget);
+ usb_gadget_udc_reset(&d->gadget, d->driver);
spin_lock(&d->vhub->lock);
/*
- * Disable/re-enable HW, this will clear the address
+ * Disable and maybe re-enable HW, this will clear the address
* and speed setting.
*/
ast_vhub_dev_disable(d);
ast_vhub_dev_enable(d);
-
- /* Clear stall on all EPs */
- for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++) {
- struct ast_vhub_ep *ep = d->epns[i];
-
- if (ep && ep->epn.stalled) {
- ep->epn.stalled = false;
- ast_vhub_update_epn_stall(ep);
- }
- }
-
- /* Additional cleanups */
- d->wakeup_en = false;
- d->suspended = false;
}
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index e2927fb083cf..022b777b85f8 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -105,18 +105,20 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
(crq.bRequestType & USB_DIR_IN) ? "in" : "out",
ep->ep0.state);
- /* Check our state, cancel pending requests if needed */
- if (ep->ep0.state != ep0_state_token) {
+ /*
+ * Check our state, cancel pending requests if needed
+ *
+ * Note: Under some circumstances, we can get a new setup
+ * packet while waiting for the stall ack, just accept it.
+ *
+ * In any case, a SETUP packet in wrong state should have
+ * reset the HW state machine, so let's just log, nuke
+ * requests, move on.
+ */
+ if (ep->ep0.state != ep0_state_token &&
+ ep->ep0.state != ep0_state_stall) {
EPDBG(ep, "wrong state\n");
ast_vhub_nuke(ep, -EIO);
-
- /*
- * Accept the packet regardless, this seems to happen
- * when stalling a SETUP packet that has an OUT data
- * phase.
- */
- ast_vhub_nuke(ep, 0);
- goto stall;
}
/* Calculate next state for EP0 */
@@ -165,7 +167,7 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
stall:
EPDBG(ep, "stalling\n");
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
- ep->ep0.state = ep0_state_status;
+ ep->ep0.state = ep0_state_stall;
ep->ep0.dir_in = false;
return;
@@ -299,8 +301,8 @@ void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
(!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
(ep->ep0.dir_in != in_ack)) {
+ /* In that case, ignore interrupt */
dev_warn(dev, "irq state mismatch");
- stall = true;
break;
}
/*
@@ -335,12 +337,22 @@ void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
dev_warn(dev, "status direction mismatch\n");
stall = true;
}
+ break;
+ case ep0_state_stall:
+ /*
+ * There shouldn't be any request left, but nuke just in case
+ * otherwise the stale request will block subsequent ones
+ */
+ ast_vhub_nuke(ep, -EIO);
+ break;
}
- /* Reset to token state */
- ep->ep0.state = ep0_state_token;
- if (stall)
+ /* Reset to token state or stall */
+ if (stall) {
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
+ ep->ep0.state = ep0_state_stall;
+ } else
+ ep->ep0.state = ep0_state_token;
}
static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
@@ -367,7 +379,7 @@ static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
return -EINVAL;
/* Disabled device */
- if (ep->dev && (!ep->dev->enabled || ep->dev->suspended))
+ if (ep->dev && !ep->dev->enabled)
return -ESHUTDOWN;
/* Data, no buffer and not internal ? */
@@ -390,8 +402,12 @@ static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
spin_lock_irqsave(&vhub->lock, flags);
/* EP0 can only support a single request at a time */
- if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) {
+ if (!list_empty(&ep->queue) ||
+ ep->ep0.state == ep0_state_token ||
+ ep->ep0.state == ep0_state_stall) {
dev_warn(dev, "EP0: Request in wrong state\n");
+ EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
+ list_empty(&ep->queue), ep->ep0.state);
spin_unlock_irqrestore(&vhub->lock, flags);
return -EBUSY;
}
@@ -459,6 +475,15 @@ static const struct usb_ep_ops ast_vhub_ep0_ops = {
.free_request = ast_vhub_free_request,
};
+void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
+{
+ struct ast_vhub_ep *ep = &dev->ep0;
+
+ ast_vhub_nuke(ep, -EIO);
+ ep->ep0.state = ep0_state_token;
+}
+
+
void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
struct ast_vhub_dev *dev)
{
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 35941dc125f9..7475c74aa5c5 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -352,7 +352,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
- !ep->dev->enabled || ep->dev->suspended) {
+ !ep->dev->enabled) {
EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 7c040f56100e..19b3517e04c0 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -449,8 +449,15 @@ static void ast_vhub_change_port_stat(struct ast_vhub *vhub,
USB_PORT_STAT_C_OVERCURRENT |
USB_PORT_STAT_C_RESET |
USB_PORT_STAT_C_L1;
- p->change |= chg;
+ /*
+ * We only set USB_PORT_STAT_C_ENABLE if we are disabling
+ * the port as per USB spec, otherwise MacOS gets upset
+ */
+ if (p->status & USB_PORT_STAT_ENABLE)
+ chg &= ~USB_PORT_STAT_C_ENABLE;
+
+ p->change = chg;
ast_vhub_update_hub_ep1(vhub, port);
}
}
@@ -723,6 +730,12 @@ enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
case ClearPortFeature:
EPDBG(ep, "ClearPortFeature(%d,%d)\n", wIndex & 0xf, wValue);
return ast_vhub_clr_port_feature(ep, wIndex & 0xf, wValue);
+ case ClearTTBuffer:
+ case ResetTT:
+ case StopTT:
+ return std_req_complete;
+ case GetTTState:
+ return ast_vhub_simple_reply(ep, 0, 0, 0, 0);
default:
EPDBG(ep, "Unknown class request\n");
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 4ed03d33a5a9..761919e220d3 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -257,6 +257,7 @@ enum ep0_state {
ep0_state_token,
ep0_state_data,
ep0_state_status,
+ ep0_state_stall,
};
/*
@@ -353,7 +354,6 @@ struct ast_vhub_dev {
struct usb_gadget_driver *driver;
bool registered : 1;
bool wakeup_en : 1;
- bool suspended : 1;
bool enabled : 1;
/* Endpoint structures */
@@ -507,6 +507,7 @@ void ast_vhub_init_hw(struct ast_vhub *vhub);
/* ep0.c */
void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack);
void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep);
+void ast_vhub_reset_ep0(struct ast_vhub_dev *dev);
void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
struct ast_vhub_dev *dev);
int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 503d275bc4c4..86ffc8307864 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -327,6 +327,7 @@ static int usba_config_fifo_table(struct usba_udc *udc)
switch (fifo_mode) {
default:
fifo_mode = 0;
+ /* fall through */
case 0:
udc->fifo_cfg = NULL;
n = 0;
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index c1fcc77403ea..97b16463f3ef 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2328,10 +2328,8 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource #0\n");
+ if (irq < 0)
goto out_uninit;
- }
if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
dev_name(dev), udc) < 0)
goto report_request_failure;
@@ -2339,10 +2337,8 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
irq = platform_get_irq(pdev, i + 1);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource #%d\n", i + 1);
+ if (irq < 0)
goto out_uninit;
- }
if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
dev_name(dev), &udc->iudma[i]) < 0)
goto report_request_failure;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index ccbd1d34eb2a..cc4a16e253ac 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -515,10 +515,8 @@ static int bdc_probe(struct platform_device *pdev)
return -ENOMEM;
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "platform_get_irq failed:%d\n", irq);
+ if (irq < 0)
return irq;
- }
spin_lock_init(&bdc->lock);
platform_set_drvdata(pdev, bdc);
bdc->irq = irq;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 7cf34beb50df..92af8dc98c3d 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1143,7 +1143,7 @@ static int check_pending_gadget_drivers(struct usb_udc *udc)
dev_name(&udc->dev)) == 0) {
ret = udc_bind_to_driver(udc, driver);
if (ret != -EPROBE_DEFER)
- list_del(&driver->pending);
+ list_del_init(&driver->pending);
break;
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 729e60e49564..7a0e9a58c2d8 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2134,19 +2134,15 @@ static int gr_probe(struct platform_device *pdev)
return PTR_ERR(regs);
dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq <= 0) {
- dev_err(dev->dev, "No irq found\n");
+ if (dev->irq <= 0)
return -ENODEV;
- }
/* Some core configurations has separate irqs for IN and OUT events */
dev->irqi = platform_get_irq(pdev, 1);
if (dev->irqi > 0) {
dev->irqo = platform_get_irq(pdev, 2);
- if (dev->irqo <= 0) {
- dev_err(dev->dev, "Found irqi but not irqo\n");
+ if (dev->irqo <= 0)
return -ENODEV;
- }
} else {
dev->irqi = 0;
}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 5f1b14f3e5a0..b3e073fb88c6 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/prefetch.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/usb/ch9.h>
@@ -35,8 +36,6 @@
#include <linux/seq_file.h>
#endif
-#include <mach/hardware.h>
-
/*
* USB device configuration structure
*/
@@ -742,7 +741,6 @@ static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
* response data */
static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
{
- u32 tmp;
int to = 1000;
/* Write a command and read data from the protocol engine */
@@ -752,7 +750,6 @@ static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
/* Write command code */
udc_protocol_cmd_w(udc, cmd);
- tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
&& (to > 0))
to--;
@@ -1992,7 +1989,7 @@ void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
/* DMA end of transfer completion */
static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
- u32 status, epstatus;
+ u32 status;
struct lpc32xx_request *req;
struct lpc32xx_usbd_dd_gad *dd;
@@ -2086,7 +2083,7 @@ static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
udc_clearep_getsts(udc, ep->hwep_num);
uda_enable_hwepint(udc, ep->hwep_num);
- epstatus = udc_clearep_getsts(udc, ep->hwep_num);
+ udc_clearep_getsts(udc, ep->hwep_num);
/* Let the EP interrupt handle the ZLP */
return;
@@ -2198,7 +2195,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
struct usb_ctrlrequest ctrlpkt;
int i, bytes;
- u16 wIndex, wValue, wLength, reqtype, req, tmp;
+ u16 wIndex, wValue, reqtype, req, tmp;
/* Nuke previous transfers */
nuke(ep0, -EPROTO);
@@ -2214,7 +2211,6 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
/* Native endianness */
wIndex = le16_to_cpu(ctrlpkt.wIndex);
wValue = le16_to_cpu(ctrlpkt.wValue);
- wLength = le16_to_cpu(ctrlpkt.wLength);
reqtype = le16_to_cpu(ctrlpkt.bRequestType);
/* Set direction of EP0 */
@@ -2265,7 +2261,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
default:
break;
}
-
+ break;
case USB_REQ_SET_ADDRESS:
if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
@@ -3061,11 +3057,8 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
/* Get IRQs */
for (i = 0; i < 4; i++) {
udc->udp_irq[i] = platform_get_irq(pdev, i);
- if (udc->udp_irq[i] < 0) {
- dev_err(udc->dev,
- "irq resource %d not available!\n", i);
+ if (udc->udp_irq[i] < 0)
return udc->udp_irq[i];
- }
}
udc->udp_baseaddr = devm_ioremap_resource(dev, res);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index b6bbe2e448ba..51efee21915f 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2244,30 +2244,40 @@ static void usb_reinit_338x(struct net2280 *dev)
}
/* Hardware Defect and Workaround */
- val = readl(&dev->ll_lfps_regs->ll_lfps_5);
+ val = readl(&dev->llregs->ll_lfps_5);
val &= ~(0xf << TIMER_LFPS_6US);
val |= 0x5 << TIMER_LFPS_6US;
- writel(val, &dev->ll_lfps_regs->ll_lfps_5);
+ writel(val, &dev->llregs->ll_lfps_5);
- val = readl(&dev->ll_lfps_regs->ll_lfps_6);
+ val = readl(&dev->llregs->ll_lfps_6);
val &= ~(0xffff << TIMER_LFPS_80US);
val |= 0x0100 << TIMER_LFPS_80US;
- writel(val, &dev->ll_lfps_regs->ll_lfps_6);
+ writel(val, &dev->llregs->ll_lfps_6);
/*
* AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
* Hot Reset Exit Handshake may Fail in Specific Case using
* Default Register Settings. Workaround for Enumeration test.
*/
- val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
+ val = readl(&dev->llregs->ll_tsn_counters_2);
val &= ~(0x1f << HOT_TX_NORESET_TS2);
val |= 0x10 << HOT_TX_NORESET_TS2;
- writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
+ writel(val, &dev->llregs->ll_tsn_counters_2);
- val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
+ val = readl(&dev->llregs->ll_tsn_counters_3);
val &= ~(0x1f << HOT_RX_RESET_TS2);
val |= 0x3 << HOT_RX_RESET_TS2;
- writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
+ writel(val, &dev->llregs->ll_tsn_counters_3);
+
+ /*
+ * AB errata. Errata 11. Workaround for Default Duration of LFPS
+ * Handshake Signaling for Device-Initiated U1 Exit is too short.
+ * Without this, various enumeration failures observed with
+ * modern superspeed hosts.
+ */
+ val = readl(&dev->llregs->ll_lfps_timers_2);
+ writel((val & 0xffff0000) | LFPS_TIMERS_2_WORKAROUND_VALUE,
+ &dev->llregs->ll_lfps_timers_2);
/*
* Set Recovery Idle to Recover bit:
@@ -2276,10 +2286,10 @@ static void usb_reinit_338x(struct net2280 *dev)
* - It is safe to set for all connection speeds; all chip revisions.
* - R-M-W to leave other bits undisturbed.
* - Reference PLX TT-7372
- */
- val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
+ */
+ val = readl(&dev->llregs->ll_tsn_chicken_bit);
val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
- writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
+ writel(val, &dev->llregs->ll_tsn_chicken_bit);
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
@@ -3669,12 +3679,6 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(base + 0x00b4);
dev->llregs = (struct usb338x_ll_regs __iomem *)
(base + 0x0700);
- dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
- (base + 0x0748);
- dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
- (base + 0x077c);
- dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
- (base + 0x079c);
dev->plregs = (struct usb338x_pl_regs __iomem *)
(base + 0x0800);
usbstat = readl(&dev->usb->usbstat);
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index b65a797544d7..85d3ca1698ba 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -178,9 +178,6 @@ struct net2280 {
struct net2280_dep_regs __iomem *dep;
struct net2280_ep_regs __iomem *epregs;
struct usb338x_ll_regs __iomem *llregs;
- struct usb338x_ll_lfps_regs __iomem *ll_lfps_regs;
- struct usb338x_ll_tsn_regs __iomem *ll_tsn_regs;
- struct usb338x_ll_chi_regs __iomem *ll_chicken_reg;
struct usb338x_pl_regs __iomem *plregs;
struct dma_pool *requests;
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index cded51f36fc1..265dab2bbfac 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -3046,8 +3046,7 @@ static void pch_udc_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int pch_udc_suspend(struct device *d)
{
- struct pci_dev *pdev = to_pci_dev(d);
- struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+ struct pch_udc_dev *dev = dev_get_drvdata(d);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 87062d22134d..e098f16c01cb 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/sys_soc.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
@@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (usb3->forced_b_device)
return -EBUSY;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode_is_host = true;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode_is_host = false;
else
return -EINVAL;
@@ -2743,10 +2744,8 @@ static int renesas_usb3_probe(struct platform_device *pdev)
priv = of_device_get_match_data(&pdev->dev);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 31c7c5587cf9..858993c73442 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1311,10 +1311,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
s3c_hsudc_setup_ep(hsudc);
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to obtain IRQ number\n");
+ if (ret < 0)
goto err_res;
- }
hsudc->irq = ret;
ret = devm_request_irq(&pdev->dev, hsudc->irq, s3c_hsudc_irq, 0,
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index af3e63316ace..f82208fbc249 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -312,6 +312,7 @@ static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
switch (idx) {
default:
idx = 0;
+ /* fall through */
case 0:
fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
break;
@@ -416,6 +417,7 @@ static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
switch (idx) {
default:
idx = 0;
+ /* fall through */
case 0:
fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
break;
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index b1f4104d1283..29d8e5f8bb58 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2074,10 +2074,8 @@ static int xudc_probe(struct platform_device *pdev)
return PTR_ERR(udc->addr);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, xudc_irq, 0,
dev_name(&pdev->dev), udc);
if (ret < 0) {
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 40b5de597112..79b2e79dddd0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -287,12 +287,6 @@ config USB_EHCI_MV
Dova, Armada 370 and Armada XP. See "Support for Marvell EBU
on-chip EHCI USB controller" for those.
-config USB_W90X900_EHCI
- tristate "W90X900(W90P910) EHCI support"
- depends on ARCH_W90X900
- ---help---
- Enables support for the W90X900 USB controller
-
config USB_CNS3XXX_EHCI
bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
depends on ARCH_CNS3XXX
@@ -441,7 +435,8 @@ config USB_OHCI_HCD_S3C2410
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && ARCH_LPC32XX
+ depends on USB_OHCI_HCD
+ depends on ARCH_LPC32XX || COMPILE_TEST
depends on USB_ISP1301
default y
---help---
@@ -717,32 +712,6 @@ config USB_RENESAS_USBHS_HCD
To compile this driver as a module, choose M here: the
module will be called renesas-usbhs.
-config USB_WHCI_HCD
- tristate "Wireless USB Host Controller Interface (WHCI) driver"
- depends on USB_PCI && USB && UWB
- select USB_WUSB
- select UWB_WHCI
- help
- A driver for PCI-based Wireless USB Host Controllers that are
- compliant with the WHCI specification.
-
- To compile this driver a module, choose M here: the module
- will be called "whci-hcd".
-
-config USB_HWA_HCD
- tristate "Host Wire Adapter (HWA) driver"
- depends on USB && UWB
- select USB_WUSB
- select UWB_HWA
- help
- This driver enables you to connect Wireless USB devices to
- your system using a Host Wire Adaptor USB dongle. This is an
- UWB Radio Controller and WUSB Host Controller connected to
- your machine via USB (specified in WUSB1.0).
-
- To compile this driver a module, choose M here: the module
- will be called "hwa-hc".
-
config USB_IMX21_HCD
tristate "i.MX21 HCD support"
depends on ARM && ARCH_MXC
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 84514f71ae44..b191361257cc 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -35,8 +35,6 @@ ifneq ($(CONFIG_DEBUG_FS),)
xhci-hcd-y += xhci-debugfs.o
endif
-obj-$(CONFIG_USB_WHCI_HCD) += whci/
-
obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -51,7 +49,6 @@ obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
-obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
@@ -82,7 +79,6 @@ obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
-obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 3ba140ceaf52..e893467d659c 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -100,9 +100,6 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
retval = -ENODEV;
goto fail_create_hcd;
}
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 3a29a1a8519c..01debfd03d4a 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -41,6 +41,7 @@ struct exynos_ehci_hcd {
struct clk *clk;
struct device_node *of_node;
struct phy *phy[PHY_NUMBER];
+ bool legacy_phy;
};
#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
@@ -50,10 +51,22 @@ static int exynos_ehci_get_phy(struct device *dev,
{
struct device_node *child;
struct phy *phy;
- int phy_number;
+ int phy_number, num_phys;
int ret;
/* Get PHYs for the controller */
+ num_phys = of_count_phandle_with_args(dev->of_node, "phys",
+ "#phy-cells");
+ for (phy_number = 0; phy_number < num_phys; phy_number++) {
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, phy_number);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+ exynos_ehci->phy[phy_number] = phy;
+ }
+ if (num_phys > 0)
+ return 0;
+
+ /* Get PHYs using legacy bindings */
for_each_available_child_of_node(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
@@ -84,6 +97,7 @@ static int exynos_ehci_get_phy(struct device *dev,
}
}
+ exynos_ehci->legacy_phy = true;
return 0;
}
@@ -205,11 +219,12 @@ static int exynos_ehci_probe(struct platform_device *pdev)
ehci->caps = hcd->regs;
/*
- * Workaround: reset of_node pointer to avoid conflict between Exynos
- * EHCI port subnodes and generic USB device bindings
+ * Workaround: reset of_node pointer to avoid conflict between legacy
+ * Exynos EHCI port subnodes and generic USB device bindings
*/
exynos_ehci->of_node = pdev->dev.of_node;
- pdev->dev.of_node = NULL;
+ if (exynos_ehci->legacy_phy)
+ pdev->dev.of_node = NULL;
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 656b8c08efc8..a2c3b4ec8a8b 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -30,7 +30,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9da7e22848c9..cf2b7ae93b7e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1193,7 +1193,7 @@ static const struct hc_driver ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 7d20296cbe9f..fc125b3d06e7 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -115,10 +115,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "EHCI irq failed: %d\n", irq);
+ if (irq < 0)
return irq;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 790acf3633e8..a319b1df3011 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -223,9 +223,6 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
err = -ENODEV;
goto err;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index fe9422d3bcdc..b0882c13a1d1 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_AMD:
/* AMD PLL quirk */
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
@@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_ATI:
/* AMD PLL quirk */
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/*
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 4c306fb6b069..769749ca5961 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -145,10 +145,8 @@ static int ehci_platform_probe(struct platform_device *dev)
}
irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "no irq provided");
+ if (irq < 0)
return irq;
- }
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 46e160370d6e..a2b610dbedfc 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -250,7 +250,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 576f7d79ad4e..6bbaee74f7e7 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -31,7 +31,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 454d8c624a3f..fb52133c3557 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -59,7 +59,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index a9ee767952c1..2afde14dc425 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -33,7 +33,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+ .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
@@ -85,9 +85,6 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
ret = -ENODEV;
goto fail_create_hcd;
}
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index ccb4e611001d..f74433aac948 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -158,10 +158,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
return -ENODEV;
irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "no irq provided");
+ if (irq < 0)
return irq;
- }
res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res_mem) {
dev_err(&dev->dev, "no memory resource provided");
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
deleted file mode 100644
index 6d77ace1697b..000000000000
--- a/drivers/usb/host/ehci-w90x900.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/driver/usb/host/ehci-w90x900.c
- *
- * Copyright (c) 2008 Nuvoton technology corporation.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-
-#include "ehci.h"
-
-/* enable phy0 and phy1 for w90p910 */
-#define ENPHY (0x01<<8)
-#define PHY0_CTR (0xA4)
-#define PHY1_CTR (0xA8)
-
-#define DRIVER_DESC "EHCI w90x900 driver"
-
-static const char hcd_name[] = "ehci-w90x900 ";
-
-static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
-
-static int ehci_w90x900_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- struct resource *res;
- int retval = 0, irq;
- unsigned long val;
-
- hcd = usb_create_hcd(&ehci_w90x900_hc_driver,
- &pdev->dev, "w90x900 EHCI");
- if (!hcd) {
- retval = -ENOMEM;
- goto err1;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- retval = PTR_ERR(hcd->regs);
- goto err2;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- ehci = hcd_to_ehci(hcd);
- ehci->caps = hcd->regs;
- ehci->regs = hcd->regs +
- HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
-
- /* enable PHY 0,1,the regs only apply to w90p910
- * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
- * w90p910 IC relative to ehci->regs.
- */
- val = __raw_readl(ehci->regs+PHY0_CTR);
- val |= ENPHY;
- __raw_writel(val, ehci->regs+PHY0_CTR);
-
- val = __raw_readl(ehci->regs+PHY1_CTR);
- val |= ENPHY;
- __raw_writel(val, ehci->regs+PHY1_CTR);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- retval = irq;
- goto err2;
- }
-
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (retval != 0)
- goto err2;
-
- device_wakeup_enable(hcd->self.controller);
- return retval;
-err2:
- usb_put_hcd(hcd);
-err1:
- return retval;
-}
-
-static int ehci_w90x900_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver ehci_hcd_w90x900_driver = {
- .probe = ehci_w90x900_probe,
- .remove = ehci_w90x900_remove,
- .driver = {
- .name = "w90x900-ehci",
- },
-};
-
-static int __init ehci_w90X900_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
- ehci_init_driver(&ehci_w90x900_hc_driver, NULL);
- return platform_driver_register(&ehci_hcd_w90x900_driver);
-}
-module_init(ehci_w90X900_init);
-
-static void __exit ehci_w90X900_cleanup(void)
-{
- platform_driver_unregister(&ehci_hcd_w90x900_driver);
-}
-module_exit(ehci_w90X900_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_ALIAS("platform:w90p910-ehci");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index d2a27578e440..67a6ee8cb5d8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -66,7 +66,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 48fe9e6c2465..04733876c9c6 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -538,7 +538,7 @@ static const struct hc_driver fhci_driver = {
/* generic hardware linkage */
.irq = fhci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
+ .flags = HCD_DMA | HCD_USB11 | HCD_MEMORY,
/* basic lifecycle operation */
.start = fhci_start,
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 77cc36efae95..9e0c98d6bdb0 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* see what we found out */
temp = check_reset_complete(fotg210, wIndex, status_reg,
fotg210_readl(fotg210, status_reg));
+
+ /* restart schedule */
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
}
if (!(temp & (PORT_RESUME|PORT_RESET))) {
@@ -5504,7 +5508,7 @@ static const struct hc_driver fotg210_fotg210_hc_driver = {
* generic hardware linkage
*/
.irq = fotg210_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 6e3dad19d369..5835f9966204 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1771,7 +1771,7 @@ static const struct hc_driver imx21_hc_driver = {
.product_desc = "IMX21 USB Host Controller",
.hcd_priv_size = sizeof(struct imx21),
- .flags = HCD_USB11,
+ .flags = HCD_DMA | HCD_USB11,
.irq = imx21_irq,
.reset = imx21_hc_reset,
@@ -1836,10 +1836,8 @@ static int imx21_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
hcd = usb_create_hcd(&imx21_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 74da136d322a..a87c0b26279e 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1581,12 +1581,6 @@ static int isp116x_probe(struct platform_device *pdev)
irq = ires->start;
irqflags = ires->flags & IRQF_TRIGGER_MASK;
- if (pdev->dev.dma_mask) {
- DBG("DMA not supported\n");
- ret = -EINVAL;
- goto err1;
- }
-
if (!request_mem_region(addr->start, 2, hcd_name)) {
ret = -EBUSY;
goto err1;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 28bf8bfb091e..96f8daa11f25 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2645,11 +2645,6 @@ static int isp1362_probe(struct platform_device *pdev)
if (pdev->num_resources < 3)
return -ENODEV;
- if (pdev->dev.dma_mask) {
- DBG(1, "won't do DMA");
- return -ENODEV;
- }
-
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res)
return -ENODEV;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index afa321ab55fc..8819f502b6a6 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1800,21 +1800,6 @@ max3421_bus_resume(struct usb_hcd *hcd)
return -1;
}
-/*
- * The SPI driver already takes care of DMA-mapping/unmapping, so no
- * reason to do it twice.
- */
-static int
-max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
-{
- return 0;
-}
-
-static void
-max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
-}
-
static const struct hc_driver max3421_hcd_desc = {
.description = "max3421",
.product_desc = DRIVER_DESC,
@@ -1826,8 +1811,6 @@ static const struct hc_driver max3421_hcd_desc = {
.get_frame_number = max3421_get_frame_number,
.urb_enqueue = max3421_urb_enqueue,
.urb_dequeue = max3421_urb_dequeue,
- .map_urb_for_dma = max3421_map_urb_for_dma,
- .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
.endpoint_disable = max3421_endpoint_disable,
.hub_status_data = max3421_hub_status_data,
.hub_control = max3421_hub_control,
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 905c6317e0c3..d5ce98e205c7 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -32,6 +32,7 @@ struct exynos_ohci_hcd {
struct clk *clk;
struct device_node *of_node;
struct phy *phy[PHY_NUMBER];
+ bool legacy_phy;
};
static int exynos_ohci_get_phy(struct device *dev,
@@ -39,10 +40,22 @@ static int exynos_ohci_get_phy(struct device *dev,
{
struct device_node *child;
struct phy *phy;
- int phy_number;
+ int phy_number, num_phys;
int ret;
/* Get PHYs for the controller */
+ num_phys = of_count_phandle_with_args(dev->of_node, "phys",
+ "#phy-cells");
+ for (phy_number = 0; phy_number < num_phys; phy_number++) {
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, phy_number);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+ exynos_ohci->phy[phy_number] = phy;
+ }
+ if (num_phys > 0)
+ return 0;
+
+ /* Get PHYs using legacy bindings */
for_each_available_child_of_node(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
@@ -73,6 +86,7 @@ static int exynos_ohci_get_phy(struct device *dev,
}
}
+ exynos_ohci->legacy_phy = true;
return 0;
}
@@ -172,11 +186,12 @@ static int exynos_ohci_probe(struct platform_device *pdev)
}
/*
- * Workaround: reset of_node pointer to avoid conflict between Exynos
- * OHCI port subnodes and generic USB device bindings
+ * Workaround: reset of_node pointer to avoid conflict between legacy
+ * Exynos OHCI port subnodes and generic USB device bindings
*/
exynos_ohci->of_node = pdev->dev.of_node;
- pdev->dev.of_node = NULL;
+ if (exynos_ohci->legacy_phy)
+ pdev->dev.of_node = NULL;
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b457fdaff297..4de91653a2c7 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
@@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd)
ohci->rh_state = OHCI_RH_HALTED;
}
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ohci->lock, flags);
+ _ohci_shutdown(hcd);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
@@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t)
died:
usb_hc_died(ohci_to_hcd(ohci));
ohci_dump(ohci);
- ohci_shutdown(ohci_to_hcd(ohci));
+ _ohci_shutdown(ohci_to_hcd(ohci));
goto done;
} else {
/* No write back because the done queue was empty */
@@ -1178,7 +1187,7 @@ static const struct hc_driver ohci_hc_driver = {
* generic hardware linkage
*/
.irq = ohci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index f5f532601092..c561881d0e79 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -29,10 +29,7 @@
#include "ohci.h"
-#include <mach/hardware.h>
-
#define USB_CONFIG_BASE 0x31020000
-#define USB_OTG_STAT_CONTROL IO_ADDRESS(USB_CONFIG_BASE + 0x110)
/* USB_OTG_STAT_CONTROL bit defines */
#define TRANSPARENT_I2C_EN (1 << 7)
@@ -122,19 +119,33 @@ static inline void isp1301_vbus_off(void)
static void ohci_nxp_start_hc(void)
{
- unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+ void __iomem *usb_otg_stat_control = ioremap(USB_CONFIG_BASE + 0x110, 4);
+ unsigned long tmp;
+
+ if (WARN_ON(!usb_otg_stat_control))
+ return;
+
+ tmp = __raw_readl(usb_otg_stat_control) | HOST_EN;
- __raw_writel(tmp, USB_OTG_STAT_CONTROL);
+ __raw_writel(tmp, usb_otg_stat_control);
isp1301_vbus_on();
+
+ iounmap(usb_otg_stat_control);
}
static void ohci_nxp_stop_hc(void)
{
+ void __iomem *usb_otg_stat_control = ioremap(USB_CONFIG_BASE + 0x110, 4);
unsigned long tmp;
+ if (WARN_ON(!usb_otg_stat_control))
+ return;
+
isp1301_vbus_off();
- tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
- __raw_writel(tmp, USB_OTG_STAT_CONTROL);
+ tmp = __raw_readl(usb_otg_stat_control) & ~HOST_EN;
+ __raw_writel(tmp, usb_otg_stat_control);
+
+ iounmap(usb_otg_stat_control);
}
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a033f7d855e0..f4e13a3fddee 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ohci->flags |= OHCI_QUIRK_AMD_PLL;
/* SB800 needs pre-fetch fix */
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 65a1c3fdc88c..7addfc2cbadc 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -111,10 +111,8 @@ static int ohci_platform_probe(struct platform_device *dev)
return err;
irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "no irq provided");
+ if (irq < 0)
return irq;
- }
hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 76a9b40b08f1..45f7cceb6df3 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -50,7 +50,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
+ .flags = HCD_USB11 | HCD_DMA | HCD_MEMORY,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 395f9d3bc849..f77cd6af0ccf 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -46,7 +46,7 @@ static const struct hc_driver ps3_ohci_hc_driver = {
.product_desc = "PS3 OHCI Host Controller",
.hcd_priv_size = sizeof(struct ohci_hcd),
.irq = ohci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
.reset = ps3_ohci_hc_reset,
.start = ps3_ohci_hc_start,
.stop = ohci_stop,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 3e2474959735..7679fb583e41 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -148,7 +148,7 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
uhcrhda |= RH_A_NPS;
break;
case PMM_GLOBAL_MODE:
- uhcrhda &= ~(RH_A_NPS & RH_A_PSM);
+ uhcrhda &= ~(RH_A_NPS | RH_A_PSM);
break;
case PMM_PERPORT_MODE:
uhcrhda &= ~(RH_A_NPS);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index ebec9a7699e3..8e19a5eb5b62 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -84,7 +84,7 @@ static const struct hc_driver ohci_sa1111_hc_driver = {
* generic hardware linkage
*/
.irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
+ .flags = HCD_USB11 | HCD_DMA | HCD_MEMORY,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 638a92bd2cdc..ac796ccd93ef 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -138,10 +138,8 @@ static int st_ohci_platform_probe(struct platform_device *dev)
return -ENODEV;
irq = platform_get_irq(dev, 0);
- if (irq < 0) {
- dev_err(&dev->dev, "no irq provided");
+ if (irq < 0)
return irq;
- }
res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res_mem) {
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index d5a293a707b6..fb6f5e9ae5c6 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -97,10 +97,13 @@ static void tmio_stop_hc(struct platform_device *dev)
switch (ohci->num_ports) {
default:
dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
+ /* fall through */
case 3:
pm |= CCR_PM_USBPW3;
+ /* fall through */
case 2:
pm |= CCR_PM_USBPW2;
+ /* fall through */
case 1:
pm |= CCR_PM_USBPW1;
}
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 47c5515a9ce4..e67242e437ed 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -31,10 +31,449 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
-#include "oxu210hp.h"
-
#define DRIVER_VERSION "0.0.50"
+#define OXU_DEVICEID 0x00
+ #define OXU_REV_MASK 0xffff0000
+ #define OXU_REV_SHIFT 16
+ #define OXU_REV_2100 0x2100
+ #define OXU_BO_SHIFT 8
+ #define OXU_BO_MASK (0x3 << OXU_BO_SHIFT)
+ #define OXU_MAJ_REV_SHIFT 4
+ #define OXU_MAJ_REV_MASK (0xf << OXU_MAJ_REV_SHIFT)
+ #define OXU_MIN_REV_SHIFT 0
+ #define OXU_MIN_REV_MASK (0xf << OXU_MIN_REV_SHIFT)
+#define OXU_HOSTIFCONFIG 0x04
+#define OXU_SOFTRESET 0x08
+ #define OXU_SRESET (1 << 0)
+
+#define OXU_PIOBURSTREADCTRL 0x0C
+
+#define OXU_CHIPIRQSTATUS 0x10
+#define OXU_CHIPIRQEN_SET 0x14
+#define OXU_CHIPIRQEN_CLR 0x18
+ #define OXU_USBSPHLPWUI 0x00000080
+ #define OXU_USBOTGLPWUI 0x00000040
+ #define OXU_USBSPHI 0x00000002
+ #define OXU_USBOTGI 0x00000001
+
+#define OXU_CLKCTRL_SET 0x1C
+ #define OXU_SYSCLKEN 0x00000008
+ #define OXU_USBSPHCLKEN 0x00000002
+ #define OXU_USBOTGCLKEN 0x00000001
+
+#define OXU_ASO 0x68
+ #define OXU_SPHPOEN 0x00000100
+ #define OXU_OVRCCURPUPDEN 0x00000800
+ #define OXU_ASO_OP (1 << 10)
+ #define OXU_COMPARATOR 0x000004000
+
+#define OXU_USBMODE 0x1A8
+ #define OXU_VBPS 0x00000020
+ #define OXU_ES_LITTLE 0x00000000
+ #define OXU_CM_HOST_ONLY 0x00000003
+
+/*
+ * Proper EHCI structs & defines
+ */
+
+/* Magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT 0
+#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT 1
+#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
+
+struct oxu_hcd;
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct ehci_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
+#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
+#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
+#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
+#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
+#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
+#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+} __packed;
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct ehci_regs {
+ /* USBCMD: offset 0x00 */
+ u32 command;
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved[9];
+
+ /* CONFIGFLAG: offset 0x40 */
+ u32 configured_flag;
+#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status[0]; /* up to N_PORTS */
+/* 31:23 reserved */
+#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
+/* 19:16 for port testing */
+#define PORT_LED_OFF (0<<14)
+#define PORT_LED_AMBER (1<<14)
+#define PORT_LED_GREEN (2<<14)
+#define PORT_LED_MASK (3<<14)
+#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
+#define PORT_POWER (1<<12) /* true: has power (see PPC) */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+/* 9 reserved */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_OCC (1<<5) /* over current change */
+#define PORT_OC (1<<4) /* over current active */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+} __packed;
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+} __packed;
+
+#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct ehci_qtd {
+ /* first part defined by EHCI spec */
+ __le32 hw_next; /* see EHCI 3.5.1 */
+ __le32 hw_alt_next; /* see EHCI 3.5.2 */
+ __le32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+ __le32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __le32 hw_buf_hi[5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+
+ u32 qtd_buffer_len;
+ void *buffer;
+ dma_addr_t buffer_dma;
+ void *transfer_buffer;
+ void *transfer_dma;
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK cpu_to_le32 (~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/* Type tag from {qh, itd, sitd, fstn}->hw_next */
+#define Q_NEXT_TYPE(dma) ((dma) & cpu_to_le32 (3 << 1))
+
+/* values for that type tag */
+#define Q_TYPE_QH cpu_to_le32 (1 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define EHCI_LIST_END cpu_to_le32(1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union ehci_shadow {
+ struct ehci_qh *qh; /* Q_TYPE_QH */
+ __le32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+struct ehci_qh {
+ /* first part defined by EHCI spec */
+ __le32 hw_next; /* see EHCI 3.6.1 */
+ __le32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_HEAD 0x00008000
+ __le32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct ehci_qtd) */
+ __le32 hw_qtd_next;
+ __le32 hw_alt_next;
+ __le32 hw_token;
+ __le32 hw_buf[5];
+ __le32 hw_buf_hi[5];
+
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union ehci_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct ehci_qtd *dummy;
+ struct ehci_qh *reclaim; /* next to reclaim */
+
+ struct oxu_hcd *oxu;
+ struct kref kref;
+ unsigned int stamp;
+
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+ struct usb_device *dev; /* access to TT */
+} __aligned(32);
+
+/*
+ * Proper OXU210HP structs
+ */
+
+#define OXU_OTG_CORE_OFFSET 0x00400
+#define OXU_OTG_CAP_OFFSET (OXU_OTG_CORE_OFFSET + 0x100)
+#define OXU_SPH_CORE_OFFSET 0x00800
+#define OXU_SPH_CAP_OFFSET (OXU_SPH_CORE_OFFSET + 0x100)
+
+#define OXU_OTG_MEM 0xE000
+#define OXU_SPH_MEM 0x16000
+
+/* Only how many elements & element structure are specifies here. */
+/* 2 host controllers are enabled - total size <= 28 kbytes */
+#define DEFAULT_I_TDPS 1024
+#define QHEAD_NUM 16
+#define QTD_NUM 32
+#define SITD_NUM 8
+#define MURB_NUM 8
+
+#define BUFFER_NUM 8
+#define BUFFER_SIZE 512
+
+struct oxu_info {
+ struct usb_hcd *hcd[2];
+};
+
+struct oxu_buf {
+ u8 buffer[BUFFER_SIZE];
+} __aligned(BUFFER_SIZE);
+
+struct oxu_onchip_mem {
+ struct oxu_buf db_pool[BUFFER_NUM];
+
+ u32 frame_list[DEFAULT_I_TDPS];
+ struct ehci_qh qh_pool[QHEAD_NUM];
+ struct ehci_qtd qtd_pool[QTD_NUM];
+} __aligned(4 << 10);
+
+#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+
+struct oxu_murb {
+ struct urb urb;
+ struct urb *main;
+ u8 last;
+};
+
+struct oxu_hcd { /* one per controller */
+ unsigned int is_otg:1;
+
+ u8 qh_used[QHEAD_NUM];
+ u8 qtd_used[QTD_NUM];
+ u8 db_used[BUFFER_NUM];
+ u8 murb_used[MURB_NUM];
+
+ struct oxu_onchip_mem __iomem *mem;
+ spinlock_t mem_lock;
+
+ struct timer_list urb_timer;
+
+ struct ehci_caps __iomem *caps;
+ struct ehci_regs __iomem *regs;
+
+ u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+
+ /* async schedule support */
+ struct ehci_qh *async;
+ struct ehci_qh *reclaim;
+ unsigned int reclaim_ready:1;
+ unsigned int scanning:1;
+
+ /* periodic schedule support */
+ unsigned int periodic_size;
+ __le32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ unsigned int i_thresh; /* uframes HC might cache */
+
+ union ehci_shadow *pshadow; /* mirror hw periodic table */
+ int next_uframe; /* scan periodic, start here */
+ unsigned int periodic_sched; /* periodic activity count */
+
+ /* per root hub port */
+ unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ * already suspended at the
+ * start of a bus suspend
+ */
+ unsigned long companion_ports;/* which ports are dedicated
+ * to the companion controller
+ */
+
+ struct timer_list watchdog;
+ unsigned long actions;
+ unsigned int stamp;
+ unsigned long next_statechange;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ struct list_head urb_list; /* this is the head to urb
+ * queue that didn't get enough
+ * resources
+ */
+ struct oxu_murb *murb_pool; /* murb per split big urb */
+ unsigned int urb_len;
+
+ u8 sbrn; /* packed release number */
+};
+
+#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
+#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
+#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
+#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
+
+enum ehci_timer_action {
+ TIMER_IO_WATCHDOG,
+ TIMER_IAA_WATCHDOG,
+ TIMER_ASYNC_SHRINK,
+ TIMER_ASYNC_OFF,
+};
+
/*
* Main defines
*/
@@ -2649,9 +3088,6 @@ static int oxu_reset(struct usb_hcd *hcd)
INIT_LIST_HEAD(&oxu->urb_list);
oxu->urb_len = 0;
- /* FIMXE */
- hcd->self.controller->dma_mask = NULL;
-
if (oxu->is_otg) {
oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
diff --git a/drivers/usb/host/oxu210hp.h b/drivers/usb/host/oxu210hp.h
deleted file mode 100644
index 437044147862..000000000000
--- a/drivers/usb/host/oxu210hp.h
+++ /dev/null
@@ -1,448 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Host interface registers
- */
-
-#define OXU_DEVICEID 0x00
- #define OXU_REV_MASK 0xffff0000
- #define OXU_REV_SHIFT 16
- #define OXU_REV_2100 0x2100
- #define OXU_BO_SHIFT 8
- #define OXU_BO_MASK (0x3 << OXU_BO_SHIFT)
- #define OXU_MAJ_REV_SHIFT 4
- #define OXU_MAJ_REV_MASK (0xf << OXU_MAJ_REV_SHIFT)
- #define OXU_MIN_REV_SHIFT 0
- #define OXU_MIN_REV_MASK (0xf << OXU_MIN_REV_SHIFT)
-#define OXU_HOSTIFCONFIG 0x04
-#define OXU_SOFTRESET 0x08
- #define OXU_SRESET (1 << 0)
-
-#define OXU_PIOBURSTREADCTRL 0x0C
-
-#define OXU_CHIPIRQSTATUS 0x10
-#define OXU_CHIPIRQEN_SET 0x14
-#define OXU_CHIPIRQEN_CLR 0x18
- #define OXU_USBSPHLPWUI 0x00000080
- #define OXU_USBOTGLPWUI 0x00000040
- #define OXU_USBSPHI 0x00000002
- #define OXU_USBOTGI 0x00000001
-
-#define OXU_CLKCTRL_SET 0x1C
- #define OXU_SYSCLKEN 0x00000008
- #define OXU_USBSPHCLKEN 0x00000002
- #define OXU_USBOTGCLKEN 0x00000001
-
-#define OXU_ASO 0x68
- #define OXU_SPHPOEN 0x00000100
- #define OXU_OVRCCURPUPDEN 0x00000800
- #define OXU_ASO_OP (1 << 10)
- #define OXU_COMPARATOR 0x000004000
-
-#define OXU_USBMODE 0x1A8
- #define OXU_VBPS 0x00000020
- #define OXU_ES_LITTLE 0x00000000
- #define OXU_CM_HOST_ONLY 0x00000003
-
-/*
- * Proper EHCI structs & defines
- */
-
-/* Magic numbers that can affect system performance */
-#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
-#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
-#define EHCI_TUNE_RL_TT 0
-#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
-#define EHCI_TUNE_MULT_TT 1
-#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
-
-struct oxu_hcd;
-
-/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
-
-/* Section 2.2 Host Controller Capability Registers */
-struct ehci_caps {
- /* these fields are specified as 8 and 16 bit registers,
- * but some hosts can't perform 8 or 16 bit PCI accesses.
- */
- u32 hc_capbase;
-#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
-#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
- u32 hcs_params; /* HCSPARAMS - offset 0x4 */
-#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
-#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
-#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
-#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
-#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
-#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
-#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
-
- u32 hcc_params; /* HCCPARAMS - offset 0x8 */
-#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
-#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
-#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
-#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
-#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
-#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
- u8 portroute[8]; /* nibbles for routing - offset 0xC */
-} __attribute__ ((packed));
-
-
-/* Section 2.3 Host Controller Operational Registers */
-struct ehci_regs {
- /* USBCMD: offset 0x00 */
- u32 command;
-/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
-#define CMD_PARK (1<<11) /* enable "park" on async qh */
-#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
-#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
-#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
-#define CMD_ASE (1<<5) /* async schedule enable */
-#define CMD_PSE (1<<4) /* periodic schedule enable */
-/* 3:2 is periodic frame list size */
-#define CMD_RESET (1<<1) /* reset HC not bus */
-#define CMD_RUN (1<<0) /* start/stop HC */
-
- /* USBSTS: offset 0x04 */
- u32 status;
-#define STS_ASS (1<<15) /* Async Schedule Status */
-#define STS_PSS (1<<14) /* Periodic Schedule Status */
-#define STS_RECL (1<<13) /* Reclamation */
-#define STS_HALT (1<<12) /* Not running (any reason) */
-/* some bits reserved */
- /* these STS_* flags are also intr_enable bits (USBINTR) */
-#define STS_IAA (1<<5) /* Interrupted on async advance */
-#define STS_FATAL (1<<4) /* such as some PCI access errors */
-#define STS_FLR (1<<3) /* frame list rolled over */
-#define STS_PCD (1<<2) /* port change detect */
-#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
-#define STS_INT (1<<0) /* "normal" completion (short, ...) */
-
-#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
-
- /* USBINTR: offset 0x08 */
- u32 intr_enable;
-
- /* FRINDEX: offset 0x0C */
- u32 frame_index; /* current microframe number */
- /* CTRLDSSEGMENT: offset 0x10 */
- u32 segment; /* address bits 63:32 if needed */
- /* PERIODICLISTBASE: offset 0x14 */
- u32 frame_list; /* points to periodic list */
- /* ASYNCLISTADDR: offset 0x18 */
- u32 async_next; /* address of next async queue head */
-
- u32 reserved[9];
-
- /* CONFIGFLAG: offset 0x40 */
- u32 configured_flag;
-#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
-
- /* PORTSC: offset 0x44 */
- u32 port_status[0]; /* up to N_PORTS */
-/* 31:23 reserved */
-#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
-#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
-#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
-/* 19:16 for port testing */
-#define PORT_LED_OFF (0<<14)
-#define PORT_LED_AMBER (1<<14)
-#define PORT_LED_GREEN (2<<14)
-#define PORT_LED_MASK (3<<14)
-#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
-#define PORT_POWER (1<<12) /* true: has power (see PPC) */
-#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
-/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
-/* 9 reserved */
-#define PORT_RESET (1<<8) /* reset port */
-#define PORT_SUSPEND (1<<7) /* suspend port */
-#define PORT_RESUME (1<<6) /* resume it */
-#define PORT_OCC (1<<5) /* over current change */
-#define PORT_OC (1<<4) /* over current active */
-#define PORT_PEC (1<<3) /* port enable change */
-#define PORT_PE (1<<2) /* port enable */
-#define PORT_CSC (1<<1) /* connect status change */
-#define PORT_CONNECT (1<<0) /* device connected */
-#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
-} __attribute__ ((packed));
-
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct ehci_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-} __attribute__ ((packed));
-
-
-#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
-
-/*
- * EHCI Specification 0.95 Section 3.5
- * QTD: describe data transfer components (buffer, direction, ...)
- * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
- *
- * These are associated only with "QH" (Queue Head) structures,
- * used with control, bulk, and interrupt transfers.
- */
-struct ehci_qtd {
- /* first part defined by EHCI spec */
- __le32 hw_next; /* see EHCI 3.5.1 */
- __le32 hw_alt_next; /* see EHCI 3.5.2 */
- __le32 hw_token; /* see EHCI 3.5.3 */
-#define QTD_TOGGLE (1 << 31) /* data toggle */
-#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
-#define QTD_IOC (1 << 15) /* interrupt on complete */
-#define QTD_CERR(tok) (((tok)>>10) & 0x3)
-#define QTD_PID(tok) (((tok)>>8) & 0x3)
-#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
-#define QTD_STS_HALT (1 << 6) /* halted on error */
-#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
-#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
-#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
-#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
-#define QTD_STS_STS (1 << 1) /* split transaction state */
-#define QTD_STS_PING (1 << 0) /* issue PING? */
- __le32 hw_buf[5]; /* see EHCI 3.5.4 */
- __le32 hw_buf_hi[5]; /* Appendix B */
-
- /* the rest is HCD-private */
- dma_addr_t qtd_dma; /* qtd address */
- struct list_head qtd_list; /* sw qtd list */
- struct urb *urb; /* qtd's urb */
- size_t length; /* length of buffer */
-
- u32 qtd_buffer_len;
- void *buffer;
- dma_addr_t buffer_dma;
- void *transfer_buffer;
- void *transfer_dma;
-} __attribute__ ((aligned(32)));
-
-/* mask NakCnt+T in qh->hw_alt_next */
-#define QTD_MASK cpu_to_le32 (~0x1f)
-
-#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
-
-/* Type tag from {qh, itd, sitd, fstn}->hw_next */
-#define Q_NEXT_TYPE(dma) ((dma) & cpu_to_le32 (3 << 1))
-
-/* values for that type tag */
-#define Q_TYPE_QH cpu_to_le32 (1 << 1)
-
-/* next async queue entry, or pointer to interrupt/periodic QH */
-#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
-
-/* for periodic/async schedules and qtd lists, mark end of list */
-#define EHCI_LIST_END cpu_to_le32(1) /* "null pointer" to hw */
-
-/*
- * Entries in periodic shadow table are pointers to one of four kinds
- * of data structure. That's dictated by the hardware; a type tag is
- * encoded in the low bits of the hardware's periodic schedule. Use
- * Q_NEXT_TYPE to get the tag.
- *
- * For entries in the async schedule, the type tag always says "qh".
- */
-union ehci_shadow {
- struct ehci_qh *qh; /* Q_TYPE_QH */
- __le32 *hw_next; /* (all types) */
- void *ptr;
-};
-
-/*
- * EHCI Specification 0.95 Section 3.6
- * QH: describes control/bulk/interrupt endpoints
- * See Fig 3-7 "Queue Head Structure Layout".
- *
- * These appear in both the async and (for interrupt) periodic schedules.
- */
-
-struct ehci_qh {
- /* first part defined by EHCI spec */
- __le32 hw_next; /* see EHCI 3.6.1 */
- __le32 hw_info1; /* see EHCI 3.6.2 */
-#define QH_HEAD 0x00008000
- __le32 hw_info2; /* see EHCI 3.6.2 */
-#define QH_SMASK 0x000000ff
-#define QH_CMASK 0x0000ff00
-#define QH_HUBADDR 0x007f0000
-#define QH_HUBPORT 0x3f800000
-#define QH_MULT 0xc0000000
- __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
-
- /* qtd overlay (hardware parts of a struct ehci_qtd) */
- __le32 hw_qtd_next;
- __le32 hw_alt_next;
- __le32 hw_token;
- __le32 hw_buf[5];
- __le32 hw_buf_hi[5];
-
- /* the rest is HCD-private */
- dma_addr_t qh_dma; /* address of qh */
- union ehci_shadow qh_next; /* ptr to qh; or periodic */
- struct list_head qtd_list; /* sw qtd list */
- struct ehci_qtd *dummy;
- struct ehci_qh *reclaim; /* next to reclaim */
-
- struct oxu_hcd *oxu;
- struct kref kref;
- unsigned stamp;
-
- u8 qh_state;
-#define QH_STATE_LINKED 1 /* HC sees this */
-#define QH_STATE_UNLINK 2 /* HC may still see this */
-#define QH_STATE_IDLE 3 /* HC doesn't see this */
-#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
-#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
-
- /* periodic schedule info */
- u8 usecs; /* intr bandwidth */
- u8 gap_uf; /* uframes split/csplit gap */
- u8 c_usecs; /* ... split completion bw */
- u16 tt_usecs; /* tt downstream bandwidth */
- unsigned short period; /* polling interval */
- unsigned short start; /* where polling starts */
-#define NO_FRAME ((unsigned short)~0) /* pick new start */
- struct usb_device *dev; /* access to TT */
-} __attribute__ ((aligned(32)));
-
-/*
- * Proper OXU210HP structs
- */
-
-#define OXU_OTG_CORE_OFFSET 0x00400
-#define OXU_OTG_CAP_OFFSET (OXU_OTG_CORE_OFFSET + 0x100)
-#define OXU_SPH_CORE_OFFSET 0x00800
-#define OXU_SPH_CAP_OFFSET (OXU_SPH_CORE_OFFSET + 0x100)
-
-#define OXU_OTG_MEM 0xE000
-#define OXU_SPH_MEM 0x16000
-
-/* Only how many elements & element structure are specifies here. */
-/* 2 host controllers are enabled - total size <= 28 kbytes */
-#define DEFAULT_I_TDPS 1024
-#define QHEAD_NUM 16
-#define QTD_NUM 32
-#define SITD_NUM 8
-#define MURB_NUM 8
-
-#define BUFFER_NUM 8
-#define BUFFER_SIZE 512
-
-struct oxu_info {
- struct usb_hcd *hcd[2];
-};
-
-struct oxu_buf {
- u8 buffer[BUFFER_SIZE];
-} __attribute__ ((aligned(BUFFER_SIZE)));
-
-struct oxu_onchip_mem {
- struct oxu_buf db_pool[BUFFER_NUM];
-
- u32 frame_list[DEFAULT_I_TDPS];
- struct ehci_qh qh_pool[QHEAD_NUM];
- struct ehci_qtd qtd_pool[QTD_NUM];
-} __attribute__ ((aligned(4 << 10)));
-
-#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
-
-struct oxu_murb {
- struct urb urb;
- struct urb *main;
- u8 last;
-};
-
-struct oxu_hcd { /* one per controller */
- unsigned int is_otg:1;
-
- u8 qh_used[QHEAD_NUM];
- u8 qtd_used[QTD_NUM];
- u8 db_used[BUFFER_NUM];
- u8 murb_used[MURB_NUM];
-
- struct oxu_onchip_mem __iomem *mem;
- spinlock_t mem_lock;
-
- struct timer_list urb_timer;
-
- struct ehci_caps __iomem *caps;
- struct ehci_regs __iomem *regs;
-
- __u32 hcs_params; /* cached register copy */
- spinlock_t lock;
-
- /* async schedule support */
- struct ehci_qh *async;
- struct ehci_qh *reclaim;
- unsigned reclaim_ready:1;
- unsigned scanning:1;
-
- /* periodic schedule support */
- unsigned periodic_size;
- __le32 *periodic; /* hw periodic table */
- dma_addr_t periodic_dma;
- unsigned i_thresh; /* uframes HC might cache */
-
- union ehci_shadow *pshadow; /* mirror hw periodic table */
- int next_uframe; /* scan periodic, start here */
- unsigned periodic_sched; /* periodic activity count */
-
- /* per root hub port */
- unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
- /* bit vectors (one bit per port) */
- unsigned long bus_suspended; /* which ports were
- * already suspended at the
- * start of a bus suspend
- */
- unsigned long companion_ports;/* which ports are dedicated
- * to the companion controller
- */
-
- struct timer_list watchdog;
- unsigned long actions;
- unsigned stamp;
- unsigned long next_statechange;
- u32 command;
-
- /* SILICON QUIRKS */
- struct list_head urb_list; /* this is the head to urb
- * queue that didn't get enough
- * resources
- */
- struct oxu_murb *murb_pool; /* murb per split big urb */
- unsigned urb_len;
-
- u8 sbrn; /* packed release number */
-};
-
-#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
-#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
-#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
-#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
-
-enum ehci_timer_action {
- TIMER_IO_WATCHDOG,
- TIMER_IAA_WATCHDOG,
- TIMER_ASYNC_SHRINK,
- TIMER_ASYNC_OFF,
-};
-
-#include <linux/oxu210hp.h>
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3ce71cbfbb58..f6d04491df60 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -132,7 +132,7 @@ static struct amd_chipset_info {
struct amd_chipset_type sb_type;
int isoc_reqs;
int probe_count;
- int probe_result;
+ bool need_pll_quirk;
} amd_chipset;
static DEFINE_SPINLOCK(amd_lock);
@@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on)
}
EXPORT_SYMBOL_GPL(sb800_prefetch);
-int usb_amd_find_chipset_info(void)
+static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- int ret;
+ info.need_pll_quirk = 0;
spin_lock_irqsave(&amd_lock, flags);
@@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void)
if (amd_chipset.probe_count > 0) {
amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
- return amd_chipset.probe_result;
+ return;
}
memset(&info, 0, sizeof(info));
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
- ret = 0;
goto commit;
}
- /* Below chipset generations needn't enable AMD PLL quirk */
- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
- info.sb_type.gen == AMD_CHIPSET_SB600 ||
- info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
- (info.sb_type.gen == AMD_CHIPSET_SB700 &&
- info.sb_type.rev > 0x3b)) {
+ switch (info.sb_type.gen) {
+ case AMD_CHIPSET_SB700:
+ info.need_pll_quirk = info.sb_type.rev <= 0x3B;
+ break;
+ case AMD_CHIPSET_SB800:
+ case AMD_CHIPSET_HUDSON2:
+ case AMD_CHIPSET_BOLTON:
+ info.need_pll_quirk = 1;
+ break;
+ default:
+ info.need_pll_quirk = 0;
+ break;
+ }
+
+ if (!info.need_pll_quirk) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
- ret = 0;
goto commit;
}
@@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void)
}
}
- ret = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit:
@@ -263,7 +269,6 @@ commit:
/* Mark that we where here */
amd_chipset.probe_count++;
- ret = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -276,10 +281,7 @@ commit:
amd_chipset = info;
spin_unlock_irqrestore(&amd_lock, flags);
}
-
- return ret;
}
-EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
@@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void)
}
EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
+bool usb_amd_quirk_pll_check(void)
+{
+ usb_amd_find_chipset_info();
+ return amd_chipset.need_pll_quirk;
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
+
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
@@ -520,7 +529,7 @@ void usb_amd_dev_put(void)
amd_chipset.nb_type = 0;
memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
- amd_chipset.probe_result = 0;
+ amd_chipset.need_pll_quirk = 0;
spin_unlock_irqrestore(&amd_lock, flags);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 63c633077d9e..e729de21fad7 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -5,11 +5,11 @@
#ifdef CONFIG_USB_PCI
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
-int usb_amd_find_chipset_info(void);
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
bool usb_amd_hang_symptom_quirk(void);
bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
+bool usb_amd_quirk_pll_check(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 42668aeca57c..0c03ac6b0213 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2411,12 +2411,6 @@ static int r8a66597_probe(struct platform_device *pdev)
if (usb_disabled())
return -ENODEV;
- if (pdev->dev.dma_mask) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "dma not supported\n");
- goto clean_up;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b061e599948..72a34a1eb618 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1632,12 +1632,6 @@ sl811h_probe(struct platform_device *dev)
irq = ires->start;
irqflags = ires->flags & IRQF_TRIGGER_MASK;
- /* refuse to confuse usbcore */
- if (dev->dev.dma_mask) {
- dev_dbg(&dev->dev, "no we won't dma\n");
- return -EINVAL;
- }
-
/* the chip may be wired for either kind of addressing */
addr = platform_get_resource(dev, IORESOURCE_MEM, 0);
data = platform_get_resource(dev, IORESOURCE_MEM, 1);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 400c40bc43a6..4efee34f154f 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3077,8 +3077,6 @@ static int u132_probe(struct platform_device *pdev)
retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
if (retval)
return retval;
- if (pdev->dev.dma_mask)
- return -EINVAL;
hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 2103b1ed0f8f..0a201a73b196 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -63,7 +63,7 @@ static const struct hc_driver uhci_grlib_hc_driver = {
/* Generic hardware linkage */
.irq = uhci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_grlib_init,
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 0dd944277c99..0fa3d72bae26 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -261,7 +261,7 @@ static const struct hc_driver uhci_driver = {
/* Generic hardware linkage */
.irq = uhci_irq,
- .flags = HCD_USB11,
+ .flags = HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_pci_init,
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 89700e26fb29..70dbd95c3f06 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -41,7 +41,7 @@ static const struct hc_driver uhci_platform_hc_driver = {
/* Generic hardware linkage */
.irq = uhci_irq,
- .flags = HCD_MEMORY | HCD_USB11,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB11,
/* Basic lifecycle operations */
.reset = uhci_platform_init,
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 52e32644a4b2..93e2cca5262d 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -22,7 +22,6 @@ dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
size, dma_handle, flags);
- memset(vaddr, 0, size);
return vaddr;
}
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index aff79ff5aba4..be726c791323 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -139,14 +139,14 @@ xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
struct dbc_request *req;
for (i = 0; i < DBC_QUEUE_SIZE; i++) {
- req = dbc_alloc_request(dep, GFP_ATOMIC);
+ req = dbc_alloc_request(dep, GFP_KERNEL);
if (!req)
break;
req->length = DBC_MAX_PACKET;
req->buf = kmalloc(req->length, GFP_KERNEL);
if (!req->buf) {
- xhci_dbc_free_req(dep, req);
+ dbc_free_request(dep, req);
break;
}
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
index 399113f9fc5c..f498160df969 100644
--- a/drivers/usb/host/xhci-ext-caps.c
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -6,11 +6,20 @@
*/
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pci.h>
#include "xhci.h"
#define USB_SW_DRV_NAME "intel_xhci_usb_sw"
#define USB_SW_RESOURCE_SIZE 0x400
+#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
+
+static const struct property_entry role_switch_props[] = {
+ PROPERTY_ENTRY_BOOL("sw_switch_disable"),
+ {},
+};
+
static void xhci_intel_unregister_pdev(void *arg)
{
platform_device_unregister(arg);
@@ -21,6 +30,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct device *dev = hcd->self.controller;
struct platform_device *pdev;
+ struct pci_dev *pci = to_pci_dev(dev);
struct resource res = { 0, };
int ret;
@@ -43,6 +53,14 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
return ret;
}
+ if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ ret = platform_device_add_properties(pdev, role_switch_props);
+ if (ret) {
+ dev_err(dev, "failed to register device properties\n");
+ return ret;
+ }
+ }
+
pdev->dev.parent = dev;
ret = platform_device_add(pdev);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 3abe70ff1b1e..b7d23c438756 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1149,7 +1149,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
port_li = readl(ports[wIndex]->addr + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
- put_unaligned_le32(cpu_to_le32(status), &buf[4]);
+ put_unaligned_le32(status, &buf[4]);
}
break;
case SetPortFeature:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index cf5e17962179..e16eda6e2b8b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2399,7 +2399,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
flags);
if (!xhci->dcbaa)
goto fail;
- memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 026fe18972d3..b18a6baef204 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -216,6 +216,10 @@ static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
return PTR_ERR(mtk->sys_clk);
}
+ mtk->xhci_clk = devm_clk_get_optional(dev, "xhci_ck");
+ if (IS_ERR(mtk->xhci_clk))
+ return PTR_ERR(mtk->xhci_clk);
+
mtk->ref_clk = devm_clk_get_optional(dev, "ref_ck");
if (IS_ERR(mtk->ref_clk))
return PTR_ERR(mtk->ref_clk);
@@ -244,6 +248,12 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
goto sys_clk_err;
}
+ ret = clk_prepare_enable(mtk->xhci_clk);
+ if (ret) {
+ dev_err(mtk->dev, "failed to enable xhci_clk\n");
+ goto xhci_clk_err;
+ }
+
ret = clk_prepare_enable(mtk->mcu_clk);
if (ret) {
dev_err(mtk->dev, "failed to enable mcu_clk\n");
@@ -261,6 +271,8 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
dma_clk_err:
clk_disable_unprepare(mtk->mcu_clk);
mcu_clk_err:
+ clk_disable_unprepare(mtk->xhci_clk);
+xhci_clk_err:
clk_disable_unprepare(mtk->sys_clk);
sys_clk_err:
clk_disable_unprepare(mtk->ref_clk);
@@ -272,6 +284,7 @@ static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk)
{
clk_disable_unprepare(mtk->dma_clk);
clk_disable_unprepare(mtk->mcu_clk);
+ clk_disable_unprepare(mtk->xhci_clk);
clk_disable_unprepare(mtk->sys_clk);
clk_disable_unprepare(mtk->ref_clk);
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 8be8c5f7ff62..5ac458b7d2e0 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -139,6 +139,7 @@ struct xhci_hcd_mtk {
struct regulator *vusb33;
struct regulator *vbus;
struct clk *sys_clk; /* sys and mac clock */
+ struct clk *xhci_clk;
struct clk *ref_clk;
struct clk *mcu_clk;
struct clk *dma_clk;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2fe218e051f..1e0236e90687 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_AMD_0x96_HOST;
/* AMD PLL quirk */
- if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check())
xhci->quirks |= XHCI_AMD_PLL_FIX;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 998241f5fce3..d90cd5ec09cf 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -66,12 +66,14 @@ static int xhci_priv_resume_quirk(struct usb_hcd *hcd)
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
- xhci->quirks |= XHCI_PLAT;
+ xhci->quirks |= XHCI_PLAT | priv->quirks;
}
/* called during probe() after chip reset completes */
@@ -103,17 +105,11 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
- .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1,
- .init_quirk = xhci_rcar_init_quirk,
- .plat_start = xhci_rcar_start,
- .resume_quirk = xhci_rcar_resume_quirk,
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
- .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
- .init_quirk = xhci_rcar_init_quirk,
- .plat_start = xhci_rcar_start,
- .resume_quirk = xhci_rcar_resume_quirk,
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -307,7 +303,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
ret = usb_phy_init(hcd->usb_phy);
if (ret)
goto put_usb3_hcd;
- hcd->skip_phy_initialization = 1;
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index ae29f22ff5bd..5681723fc9cd 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -12,10 +12,12 @@
struct xhci_plat_priv {
const char *firmware_name;
+ unsigned long long quirks;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*resume_quirk)(struct usb_hcd *);
};
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
+#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 671bce18782c..c1025d321a41 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -104,16 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
- of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
-}
-
-static int xhci_rcar_is_gen3(struct device *dev)
-{
- struct device_node *node = dev->of_node;
-
- return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
- of_device_is_compatible(node, "renesas,xhci-r8a7796") ||
- of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
+ of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
void xhci_rcar_start(struct usb_hcd *hcd)
@@ -226,27 +217,13 @@ static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
/* This function needs to initialize a "phy" of usb before */
int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
/* If hcd->regs is NULL, we don't just call the following function */
if (!hcd->regs)
return 0;
- /*
- * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
- * to 1. However, these SoCs don't support 64-bit address memory
- * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
- * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
- * xhci_gen_setup().
- */
- if (xhci_rcar_is_gen2(hcd->self.controller) ||
- xhci_rcar_is_gen3(hcd->self.controller))
- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
-
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 804b6ab4246f..012744a63a49 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -31,4 +31,25 @@ static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
return 0;
}
#endif
+
+/*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
+ *
+ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+ * and the process speed is down when the roothub port enters U3,
+ * long delay for the handshake of STS_HALT is neeed in xhci_suspend()
+ * by using the XHCI_SLOW_SUSPEND quirk.
+ */
+#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
+ .firmware_name = firmware, \
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
+ XHCI_SLOW_SUSPEND, \
+ .init_quirk = xhci_rcar_init_quirk, \
+ .plat_start = xhci_rcar_start, \
+ .resume_quirk = xhci_rcar_resume_quirk,
+
#endif /* _XHCI_RCAR_H */
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index dafc65911fc0..2ff7c911fbd0 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra_xusb_config(tegra, regs);
+ /*
+ * The XUSB Falcon microcontroller can only address 40 bits, so set
+ * the DMA mask accordingly.
+ */
+ err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ goto put_rpm;
+ }
+
err = tegra_xusb_load_firmware(tegra);
if (err < 0) {
dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 248cd7a8b163..500865975687 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3089,8 +3089,18 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
return;
udev = (struct usb_device *) host_ep->hcpriv;
vdev = xhci->devs[udev->slot_id];
+
+ /*
+ * vdev may be lost due to xHC restore error and re-initialization
+ * during S3/S4 resume. A new vdev will be allocated later by
+ * xhci_discover_or_reset_device()
+ */
+ if (!udev->slot_id || !vdev)
+ return;
ep_index = xhci_get_endpoint_index(&host_ep->desc);
ep = &vdev->eps[ep_index];
+ if (!ep)
+ return;
/* Bail out if toggle is already being cleared by a endpoint reset */
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
@@ -3804,7 +3814,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
virt_dev->udev = NULL;
ret = xhci_disable_slot(xhci, udev->slot_id);
if (ret)
@@ -3822,6 +3831,8 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
if (!command)
return -ENOMEM;
+ xhci_debugfs_remove_slot(xhci, slot_id);
+
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
@@ -5207,7 +5218,7 @@ static const struct hc_driver xhci_hc_driver = {
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7a264962a1a9..f9f88626a57a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&
- !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
+ !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) &&
+ !urb->num_sgs)
return true;
return false;
@@ -2336,12 +2337,13 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
break;
case TRB_RESET_EP:
sprintf(str,
- "%s: ctx %08x%08x slot %d ep %d flags %c",
+ "%s: ctx %08x%08x slot %d ep %d flags %c:%c",
xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
/* Macro decrements 1, maybe it shouldn't?!? */
TRB_TO_EP_INDEX(field3) + 1,
+ field3 & TRB_TSP ? 'T' : 't',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STOP_RING:
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index 55b94fd10331..fdeb4cf97cc5 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -120,9 +120,6 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
(!IS_ENABLED(CONFIG_USB_ISP1761_UDC) || udc_disabled))
return -ENODEV;
- /* prevent usb-core allocating DMA pages */
- dev->dma_mask = NULL;
-
isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
if (!isp)
return -ENOMEM;
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 241a00d75027..07cc82ff327c 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -139,7 +139,6 @@ static int isp1761_pci_probe(struct pci_dev *dev,
pci_set_master(dev);
- dev->dev.dma_mask = NULL;
ret = isp1760_register(&dev->resource[3], dev->irq, 0, &dev->dev,
devflags);
if (ret < 0)
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 9d780b77314b..14faec51d7a5 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -183,6 +183,7 @@ static ssize_t port0_show(struct device *dev,
{
return read_port(dev, attr, buf, 0, CYPRESS_READ_PORT_ID0);
}
+static DEVICE_ATTR_RW(port0);
/* attribute callback handler (read) */
static ssize_t port1_show(struct device *dev,
@@ -190,11 +191,14 @@ static ssize_t port1_show(struct device *dev,
{
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
-
-static DEVICE_ATTR_RW(port0);
-
static DEVICE_ATTR_RW(port1);
+static struct attribute *cypress_attrs[] = {
+ &dev_attr_port0.attr,
+ &dev_attr_port1.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cypress);
static int cypress_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -212,26 +216,11 @@ static int cypress_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- /* create device attribute files */
- retval = device_create_file(&interface->dev, &dev_attr_port0);
- if (retval)
- goto error;
- retval = device_create_file(&interface->dev, &dev_attr_port1);
- if (retval)
- goto error;
-
/* let the user know that the device is now attached */
dev_info(&interface->dev,
"Cypress CY7C63xxx device now attached\n");
return 0;
-error:
- device_remove_file(&interface->dev, &dev_attr_port0);
- device_remove_file(&interface->dev, &dev_attr_port1);
- usb_set_intfdata(interface, NULL);
- usb_put_dev(dev->udev);
- kfree(dev);
-
error_mem:
return retval;
}
@@ -242,9 +231,6 @@ static void cypress_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata(interface);
- /* remove device attribute files */
- device_remove_file(&interface->dev, &dev_attr_port0);
- device_remove_file(&interface->dev, &dev_attr_port1);
/* the intfdata can be set to NULL only after the
* device files have been removed */
usb_set_intfdata(interface, NULL);
@@ -262,6 +248,7 @@ static struct usb_driver cypress_driver = {
.probe = cypress_probe,
.disconnect = cypress_disconnect,
.id_table = cypress_table,
+ .dev_groups = cypress_groups,
};
module_usb_driver(cypress_driver);
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 8b15ab5e1450..3e3802aaefa3 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -36,20 +36,6 @@ struct usb_cytherm {
};
-/* local function prototypes */
-static int cytherm_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void cytherm_disconnect(struct usb_interface *interface);
-
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver cytherm_driver = {
- .name = "cytherm",
- .probe = cytherm_probe,
- .disconnect = cytherm_disconnect,
- .id_table = id_table,
-};
-
/* Vendor requests */
/* They all operate on one byte at a time */
#define PING 0x00
@@ -304,6 +290,15 @@ static ssize_t port1_store(struct device *dev, struct device_attribute *attr, co
}
static DEVICE_ATTR_RW(port1);
+static struct attribute *cytherm_attrs[] = {
+ &dev_attr_brightness.attr,
+ &dev_attr_temp.attr,
+ &dev_attr_button.attr,
+ &dev_attr_port0.attr,
+ &dev_attr_port1.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cytherm);
static int cytherm_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -322,34 +317,10 @@ static int cytherm_probe(struct usb_interface *interface,
dev->brightness = 0xFF;
- retval = device_create_file(&interface->dev, &dev_attr_brightness);
- if (retval)
- goto error;
- retval = device_create_file(&interface->dev, &dev_attr_temp);
- if (retval)
- goto error;
- retval = device_create_file(&interface->dev, &dev_attr_button);
- if (retval)
- goto error;
- retval = device_create_file(&interface->dev, &dev_attr_port0);
- if (retval)
- goto error;
- retval = device_create_file(&interface->dev, &dev_attr_port1);
- if (retval)
- goto error;
-
dev_info (&interface->dev,
"Cypress thermometer device now attached\n");
return 0;
-error:
- device_remove_file(&interface->dev, &dev_attr_brightness);
- device_remove_file(&interface->dev, &dev_attr_temp);
- device_remove_file(&interface->dev, &dev_attr_button);
- device_remove_file(&interface->dev, &dev_attr_port0);
- device_remove_file(&interface->dev, &dev_attr_port1);
- usb_set_intfdata (interface, NULL);
- usb_put_dev(dev->udev);
- kfree(dev);
+
error_mem:
return retval;
}
@@ -360,12 +331,6 @@ static void cytherm_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata (interface);
- device_remove_file(&interface->dev, &dev_attr_brightness);
- device_remove_file(&interface->dev, &dev_attr_temp);
- device_remove_file(&interface->dev, &dev_attr_button);
- device_remove_file(&interface->dev, &dev_attr_port0);
- device_remove_file(&interface->dev, &dev_attr_port1);
-
/* first remove the files, then NULL the pointer */
usb_set_intfdata (interface, NULL);
@@ -376,6 +341,15 @@ static void cytherm_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "Cypress thermometer now disconnected\n");
}
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver cytherm_driver = {
+ .name = "cytherm",
+ .probe = cytherm_probe,
+ .disconnect = cytherm_disconnect,
+ .id_table = id_table,
+ .dev_groups = cytherm_groups,
+};
+
module_usb_driver(cytherm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ba05dd80a020..f5bed9f29e56 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata(interface);
mutex_lock(&iowarrior_open_disc_lock);
usb_set_intfdata(interface, NULL);
+ /* prevent device read, write and ioctl */
+ dev->present = 0;
minor = dev->minor;
+ mutex_unlock(&iowarrior_open_disc_lock);
+ /* give back our minor - this will call close() locks need to be dropped at this point*/
- /* give back our minor */
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
/* prevent device read, write and ioctl */
- dev->present = 0;
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_open_disc_lock);
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index e5c03c6d16e9..407fe7570f3b 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -310,7 +310,7 @@ static ssize_t enable_compliance_store(struct device *dev,
}
static DEVICE_ATTR_WO(enable_compliance);
-static struct attribute *lvs_attributes[] = {
+static struct attribute *lvs_attrs[] = {
&dev_attr_get_dev_desc.attr,
&dev_attr_u1_timeout.attr,
&dev_attr_u2_timeout.attr,
@@ -321,10 +321,7 @@ static struct attribute *lvs_attributes[] = {
&dev_attr_enable_compliance.attr,
NULL
};
-
-static const struct attribute_group lvs_attr_group = {
- .attrs = lvs_attributes,
-};
+ATTRIBUTE_GROUPS(lvs);
static void lvs_rh_work(struct work_struct *work)
{
@@ -439,12 +436,6 @@ static int lvs_rh_probe(struct usb_interface *intf,
INIT_WORK(&lvs->rh_work, lvs_rh_work);
- ret = sysfs_create_group(&intf->dev.kobj, &lvs_attr_group);
- if (ret < 0) {
- dev_err(&intf->dev, "Failed to create sysfs node %d\n", ret);
- goto free_urb;
- }
-
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
usb_fill_int_urb(lvs->urb, hdev, pipe, &lvs->buffer[0], maxp,
@@ -453,13 +444,11 @@ static int lvs_rh_probe(struct usb_interface *intf,
ret = usb_submit_urb(lvs->urb, GFP_KERNEL);
if (ret < 0) {
dev_err(&intf->dev, "couldn't submit lvs urb %d\n", ret);
- goto sysfs_remove;
+ goto free_urb;
}
return ret;
-sysfs_remove:
- sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
free_urb:
usb_free_urb(lvs->urb);
return ret;
@@ -469,7 +458,6 @@ static void lvs_rh_disconnect(struct usb_interface *intf)
{
struct lvs_rh *lvs = usb_get_intfdata(intf);
- sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
usb_poison_urb(lvs->urb); /* used in scheduled work */
flush_work(&lvs->rh_work);
usb_free_urb(lvs->urb);
@@ -479,6 +467,7 @@ static struct usb_driver lvs_driver = {
.name = "lvs",
.probe = lvs_rh_probe,
.disconnect = lvs_rh_disconnect,
+ .dev_groups = lvs_groups,
};
module_usb_driver(lvs_driver);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 27e9c78a791e..30cae5e1954d 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -454,51 +454,55 @@ static int probe_rio(struct usb_interface *intf,
{
struct usb_device *dev = interface_to_usbdev(intf);
struct rio_usb_data *rio = &rio_instance;
- int retval = 0;
+ int retval = -ENOMEM;
+ char *ibuf, *obuf;
- mutex_lock(&rio500_mutex);
if (rio->present) {
dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
- retval = -EBUSY;
- goto bail_out;
- } else {
- dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
- }
-
- retval = usb_register_dev(intf, &usb_rio_class);
- if (retval) {
- dev_err(&dev->dev,
- "Not able to get a minor for this device.\n");
- retval = -ENOMEM;
- goto bail_out;
+ return -EBUSY;
}
+ dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
- rio->rio_dev = dev;
-
- if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) {
+ obuf = kmalloc(OBUF_SIZE, GFP_KERNEL);
+ if (!obuf) {
dev_err(&dev->dev,
"probe_rio: Not enough memory for the output buffer\n");
- usb_deregister_dev(intf, &usb_rio_class);
- retval = -ENOMEM;
- goto bail_out;
+ goto err_obuf;
}
- dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf);
+ dev_dbg(&intf->dev, "obuf address: %p\n", obuf);
- if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) {
+ ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL);
+ if (!ibuf) {
dev_err(&dev->dev,
"probe_rio: Not enough memory for the input buffer\n");
- usb_deregister_dev(intf, &usb_rio_class);
- kfree(rio->obuf);
- retval = -ENOMEM;
- goto bail_out;
+ goto err_ibuf;
}
- dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
+ dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf);
- usb_set_intfdata (intf, rio);
+ mutex_lock(&rio500_mutex);
+ rio->rio_dev = dev;
+ rio->ibuf = ibuf;
+ rio->obuf = obuf;
rio->present = 1;
-bail_out:
mutex_unlock(&rio500_mutex);
+ retval = usb_register_dev(intf, &usb_rio_class);
+ if (retval) {
+ dev_err(&dev->dev,
+ "Not able to get a minor for this device.\n");
+ goto err_register;
+ }
+
+ usb_set_intfdata(intf, rio);
+ return retval;
+
+ err_register:
+ mutex_lock(&rio500_mutex);
+ rio->present = 0;
+ mutex_unlock(&rio500_mutex);
+ err_ibuf:
+ kfree(obuf);
+ err_obuf:
return retval;
}
@@ -507,10 +511,10 @@ static void disconnect_rio(struct usb_interface *intf)
struct rio_usb_data *rio = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
- mutex_lock(&rio500_mutex);
if (rio) {
usb_deregister_dev(intf, &usb_rio_class);
+ mutex_lock(&rio500_mutex);
if (rio->isopen) {
rio->isopen = 0;
/* better let it finish - the release will do whats needed */
@@ -524,8 +528,8 @@ static void disconnect_rio(struct usb_interface *intf)
dev_info(&intf->dev, "USB Rio disconnected.\n");
rio->present = 0;
+ mutex_unlock(&rio500_mutex);
}
- mutex_unlock(&rio500_mutex);
}
static const struct usb_device_id rio_table[] = {
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index ac357ce2d1a6..a3dfc77578ea 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -71,9 +71,14 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
}
return count;
}
-
static DEVICE_ATTR_RW(speed);
+static struct attribute *tv_attrs[] = {
+ &dev_attr_speed.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(tv);
+
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -89,15 +94,9 @@ static int tv_probe(struct usb_interface *interface,
dev->udev = usb_get_dev(udev);
usb_set_intfdata(interface, dev);
- retval = device_create_file(&interface->dev, &dev_attr_speed);
- if (retval)
- goto error_create_file;
return 0;
-error_create_file:
- usb_put_dev(udev);
- usb_set_intfdata(interface, NULL);
error:
kfree(dev);
return retval;
@@ -108,7 +107,6 @@ static void tv_disconnect(struct usb_interface *interface)
struct trancevibrator *dev;
dev = usb_get_intfdata (interface);
- device_remove_file(&interface->dev, &dev_attr_speed);
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
@@ -120,6 +118,7 @@ static struct usb_driver tv_driver = {
.probe = tv_probe,
.disconnect = tv_disconnect,
.id_table = id_table,
+ .dev_groups = tv_groups,
};
module_usb_driver(tv_driver);
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d6ae3795a88..6ca9111d150a 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,7 +375,8 @@ out_err:
#ifdef CONFIG_OF
static void usb251xb_get_ports_field(struct usb251xb *hub,
- const char *prop_name, u8 port_cnt, u8 *fld)
+ const char *prop_name, u8 port_cnt,
+ bool ds_only, u8 *fld)
{
struct device *dev = hub->dev;
struct property *prop;
@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
u32 port;
of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
- if ((port >= 1) && (port <= port_cnt))
+ if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
*fld |= BIT(port);
else
dev_warn(dev, "port %u doesn't exist\n", port);
@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
- &hub->non_rem_dev);
+ true, &hub->non_rem_dev);
hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
- &hub->port_disable_sp);
+ true, &hub->port_disable_sp);
hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
- &hub->port_disable_bp);
+ true, &hub->port_disable_bp);
hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
if (!of_property_read_u32(np, "sp-max-total-current-microamp",
@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
- &hub->port_swap);
- if (of_get_property(np, "swap-us-lanes", NULL))
- hub->port_swap |= BIT(0);
+ false, &hub->port_swap);
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 1923d5b6d9c9..551074f5b7ad 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -316,7 +316,7 @@ MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_msb, update_display_mode);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_lsb, update_display_mode);
-static struct attribute *dev_attrs[] = {
+static struct attribute *sevseg_attrs[] = {
&dev_attr_powered.attr,
&dev_attr_text.attr,
&dev_attr_textmode.attr,
@@ -325,10 +325,7 @@ static struct attribute *dev_attrs[] = {
&dev_attr_mode_lsb.attr,
NULL
};
-
-static const struct attribute_group dev_attr_grp = {
- .attrs = dev_attrs,
-};
+ATTRIBUTE_GROUPS(sevseg);
static int sevseg_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -354,17 +351,9 @@ static int sevseg_probe(struct usb_interface *interface,
mydev->mode_msb = 0x06; /* 6 characters */
mydev->mode_lsb = 0x3f; /* scanmode for 6 chars */
- rc = sysfs_create_group(&interface->dev.kobj, &dev_attr_grp);
- if (rc)
- goto error;
-
dev_info(&interface->dev, "USB 7 Segment device now attached\n");
return 0;
-error:
- usb_set_intfdata(interface, NULL);
- usb_put_dev(mydev->udev);
- kfree(mydev);
error_mem:
return rc;
}
@@ -374,7 +363,6 @@ static void sevseg_disconnect(struct usb_interface *interface)
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(interface);
- sysfs_remove_group(&interface->dev.kobj, &dev_attr_grp);
usb_set_intfdata(interface, NULL);
usb_put_dev(mydev->udev);
kfree(mydev);
@@ -423,6 +411,7 @@ static struct usb_driver sevseg_driver = {
.resume = sevseg_resume,
.reset_resume = sevseg_reset_resume,
.id_table = id_table,
+ .dev_groups = sevseg_groups,
.supports_autosuspend = 1,
};
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 7b306aa22d25..6715a128e6c8 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
dev_dbg(&dev->interface->dev, "%s\n", __func__);
- usb_put_dev(dev->udev);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
+ usb_put_dev(dev->udev);
kfree(dev);
}
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index 928c2cd6fc00..bf98fd36341d 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -44,6 +44,7 @@ config USB_MTU3_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3))
depends on (EXTCON=y || EXTCON=USB_MTU3)
+ select USB_ROLE_SWITCH
help
This is the default mode of working of MTU3 controller where
both host and gadget features are enabled.
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 76ecf12fdf62..6087be236a35 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -199,6 +199,9 @@ struct mtu3_gpd_ring {
* @id_nb : notifier for iddig(idpin) detection
* @id_work : work of iddig detection notifier
* @id_event : event of iddig detecion notifier
+* @role_sw : use USB Role Switch to support dual-role switch, can't use
+* extcon at the same time, and extcon is deprecated.
+* @role_sw_used : true when the USB Role Switch is used.
* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
* @manual_drd_enabled: it's true when supports dual-role device by debugfs
* to switch host/device modes depending on user input.
@@ -212,6 +215,8 @@ struct otg_switch_mtk {
struct notifier_block id_nb;
struct work_struct id_work;
unsigned long id_event;
+ struct usb_role_switch *role_sw;
+ bool role_sw_used;
bool is_u3_drd;
bool manual_drd_enabled;
};
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index f8bd1d57e795..c3d5c1206eec 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -835,10 +835,8 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return -ENOMEM;
mtu->irq = platform_get_irq(pdev, 0);
- if (mtu->irq < 0) {
- dev_err(dev, "fail to get irq number\n");
+ if (mtu->irq < 0)
return mtu->irq;
- }
dev_info(dev, "irq %d\n", mtu->irq);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
index 62c57ddc554e..c96e5dab0a48 100644
--- a/drivers/usb/mtu3/mtu3_debugfs.c
+++ b/drivers/usb/mtu3/mtu3_debugfs.c
@@ -453,9 +453,9 @@ static ssize_t ssusb_mode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
- ssusb_mode_manual_switch(ssusb, 1);
+ ssusb_mode_switch(ssusb, 1);
} else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
- ssusb_mode_manual_switch(ssusb, 0);
+ ssusb_mode_switch(ssusb, 0);
} else {
dev_err(ssusb->dev, "wrong or duplicated setting\n");
return -EINVAL;
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 5fcb71af875a..08e18448e8b8 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -7,6 +7,8 @@
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
*/
+#include <linux/usb/role.h>
+
#include "mtu3.h"
#include "mtu3_dr.h"
#include "mtu3_debug.h"
@@ -280,7 +282,7 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
* This is useful in special cases, such as uses TYPE-A receptacle but also
* wants to support dual-role mode.
*/
-void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
@@ -318,6 +320,47 @@ void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
}
+static int ssusb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ bool to_host = false;
+
+ if (role == USB_ROLE_HOST)
+ to_host = true;
+
+ if (to_host ^ ssusb->is_host)
+ ssusb_mode_switch(ssusb, to_host);
+
+ return 0;
+}
+
+static enum usb_role ssusb_role_sw_get(struct device *dev)
+{
+ struct ssusb_mtk *ssusb = dev_get_drvdata(dev);
+ enum usb_role role;
+
+ role = ssusb->is_host ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+
+ return role;
+}
+
+static int ssusb_role_sw_register(struct otg_switch_mtk *otg_sx)
+{
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ struct ssusb_mtk *ssusb =
+ container_of(otg_sx, struct ssusb_mtk, otg_switch);
+
+ if (!otg_sx->role_sw_used)
+ return 0;
+
+ role_sx_desc.set = ssusb_role_sw_set;
+ role_sx_desc.get = ssusb_role_sw_get;
+ role_sx_desc.fwnode = dev_fwnode(ssusb->dev);
+ otg_sx->role_sw = usb_role_switch_register(ssusb->dev, &role_sx_desc);
+
+ return PTR_ERR_OR_ZERO(otg_sx->role_sw);
+}
+
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
@@ -328,6 +371,8 @@ int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
if (otg_sx->manual_drd_enabled)
ssusb_dr_debugfs_init(ssusb);
+ else if (otg_sx->role_sw_used)
+ ret = ssusb_role_sw_register(otg_sx);
else
ret = ssusb_extcon_register(otg_sx);
@@ -340,4 +385,5 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
cancel_work_sync(&otg_sx->id_work);
cancel_work_sync(&otg_sx->vbus_work);
+ usb_role_switch_unregister(otg_sx->role_sw);
}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index ba6fe357ce29..5e58c4dbd54a 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -71,7 +71,7 @@ static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
-void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host);
+void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host);
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
enum mtu3_dr_force_mode mode);
@@ -86,8 +86,8 @@ static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{}
-static inline void
-ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host) {}
+static inline void ssusb_mode_switch(struct ssusb_mtk *ssusb, int to_host)
+{}
static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
{
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index fd0f6c5dfbc1..9c256ea3cdf5 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -299,8 +299,9 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
otg_sx->manual_drd_enabled =
of_property_read_bool(node, "enable-manual-drd");
+ otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch");
- if (of_property_read_bool(node, "extcon")) {
+ if (!otg_sx->role_sw_used && of_property_read_bool(node, "extcon")) {
otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
if (IS_ERR(otg_sx->edev)) {
dev_err(ssusb->dev, "couldn't get extcon device\n");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 9f5a4819a744..bd63450af76a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1721,7 +1721,7 @@ mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
- int ret = -EINVAL;
+ int ret;
spin_lock_irqsave(&musb->lock, flags);
ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
@@ -1829,16 +1829,13 @@ static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_WO(srp);
-static struct attribute *musb_attributes[] = {
+static struct attribute *musb_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_vbus.attr,
&dev_attr_srp.attr,
NULL
};
-
-static const struct attribute_group musb_attr_group = {
- .attrs = musb_attributes,
-};
+ATTRIBUTE_GROUPS(musb);
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
@@ -2038,10 +2035,6 @@ static void musb_free(struct musb *musb)
* cleanup after everything's been de-activated.
*/
-#ifdef CONFIG_SYSFS
- sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
-#endif
-
if (musb->nIrq >= 0) {
if (musb->irq_wake)
disable_irq_wake(musb->nIrq);
@@ -2390,22 +2383,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_init_debugfs(musb);
- status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
- if (status)
- goto fail5;
-
musb->is_initialized = 1;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
-fail5:
- musb_exit_debugfs(musb);
-
- musb_gadget_cleanup(musb);
- musb_host_cleanup(musb);
-
fail3:
cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
@@ -2798,6 +2781,7 @@ static struct platform_driver musb_driver = {
.name = (char *)musb_driver_name,
.bus = &platform_bus_type,
.pm = MUSB_DEV_PM_OPS,
+ .dev_groups = musb_groups,
},
.probe = musb_probe,
.remove = musb_remove,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index eb308ec35c66..5a44b70372d9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2689,7 +2689,7 @@ static const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
.hcd_priv_size = sizeof(struct musb *),
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
* those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index aaf363f19714..4bb4b1d42f32 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -330,6 +330,7 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
switch (lsts) {
case USB_LINK_ACA_RID_B_8505:
event = UX500_MUSB_RIDB;
+ /* Fall through */
case USB_LINK_NOT_CONFIGURED_8505:
case USB_LINK_RESERVED0_8505:
case USB_LINK_RESERVED1_8505:
@@ -350,6 +351,7 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
case USB_LINK_ACA_RID_C_NM_8505:
event = UX500_MUSB_RIDC;
+ /* Fall through */
case USB_LINK_STD_HOST_NC_8505:
case USB_LINK_STD_HOST_C_NS_8505:
case USB_LINK_STD_HOST_C_S_8505:
@@ -368,6 +370,7 @@ static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
case USB_LINK_ACA_RID_A_8505:
case USB_LINK_ACA_DOCK_CHGR_8505:
event = UX500_MUSB_RIDA;
+ /* Fall through */
case USB_LINK_HM_IDGND_8505:
if (ab->mode == USB_IDLE) {
ab->mode = USB_HOST;
@@ -422,6 +425,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
switch (lsts) {
case USB_LINK_ACA_RID_B_8500:
event = UX500_MUSB_RIDB;
+ /* Fall through */
case USB_LINK_NOT_CONFIGURED_8500:
case USB_LINK_NOT_VALID_LINK_8500:
ab->mode = USB_IDLE;
@@ -438,6 +442,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
case USB_LINK_ACA_RID_C_HS_8500:
case USB_LINK_ACA_RID_C_HS_CHIRP_8500:
event = UX500_MUSB_RIDC;
+ /* Fall through */
case USB_LINK_STD_HOST_NC_8500:
case USB_LINK_STD_HOST_C_NS_8500:
case USB_LINK_STD_HOST_C_S_8500:
@@ -457,6 +462,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
case USB_LINK_ACA_RID_A_8500:
event = UX500_MUSB_RIDA;
+ /* Fall through */
case USB_LINK_HM_IDGND_8500:
if (ab->mode == USB_IDLE) {
ab->mode = USB_HOST;
@@ -712,10 +718,8 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
if (ab->flags & AB8500_USB_FLAG_USE_LINK_STATUS_IRQ) {
irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
- if (irq < 0) {
- dev_err(&pdev->dev, "Link status irq not found\n");
+ if (irq < 0)
return irq;
- }
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_link_status_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
@@ -728,10 +732,8 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
if (ab->flags & AB8500_USB_FLAG_USE_ID_WAKEUP_IRQ) {
irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
- if (irq < 0) {
- dev_err(&pdev->dev, "ID fall irq not found\n");
+ if (irq < 0)
return irq;
- }
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
@@ -744,10 +746,8 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
if (ab->flags & AB8500_USB_FLAG_USE_VBUS_DET_IRQ) {
irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
- if (irq < 0) {
- dev_err(&pdev->dev, "VBUS fall irq not found\n");
+ if (irq < 0)
return irq;
- }
err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index f7c96d209eda..b451f4695f3f 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -65,7 +65,7 @@ struct fsl_otg_timer *b_data_pulse_tmr, *b_vbus_pulse_tmr, *b_srp_fail_tmr,
static struct list_head active_timers;
-static struct fsl_otg_config fsl_otg_initdata = {
+static const struct fsl_otg_config fsl_otg_initdata = {
.otg_port = 1,
};
@@ -1043,6 +1043,11 @@ static ssize_t show_fsl_usb2_otg_state(struct device *dev,
static DEVICE_ATTR(fsl_usb2_otg_state, S_IRUGO, show_fsl_usb2_otg_state, NULL);
+static struct attribute *fsl_otg_attrs[] = {
+ &dev_attr_fsl_usb2_otg_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fsl_otg);
/* Char driver interface to control some OTG input */
@@ -1132,10 +1137,6 @@ static int fsl_otg_probe(struct platform_device *pdev)
return ret;
}
- ret = device_create_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state);
- if (ret)
- dev_warn(&pdev->dev, "Can't register sysfs attribute\n");
-
return ret;
}
@@ -1152,8 +1153,6 @@ static int fsl_otg_remove(struct platform_device *pdev)
kfree(fsl_otg_dev->phy.otg);
kfree(fsl_otg_dev);
- device_remove_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state);
-
unregister_chrdev(FSL_OTG_MAJOR, FSL_OTG_NAME);
if (pdata->exit)
@@ -1168,6 +1167,7 @@ struct platform_driver fsl_otg_driver = {
.driver = {
.name = driver_name,
.owner = THIS_MODULE,
+ .dev_groups = fsl_otg_groups,
},
};
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index cf7ecdc9a9d4..06b47f1028b3 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -641,12 +641,15 @@ static const struct attribute_group inputs_attr_group = {
.attrs = inputs_attrs,
};
+static const struct attribute_group *mv_otg_groups[] = {
+ &inputs_attr_group,
+ NULL,
+};
+
static int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
-
if (mvotg->qwork) {
flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
@@ -809,13 +812,6 @@ static int mv_otg_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
- if (retval < 0) {
- dev_dbg(&pdev->dev,
- "Can't register sysfs attr group: %d\n", retval);
- goto err_remove_phy;
- }
-
spin_lock_init(&mvotg->wq_lock);
if (spin_trylock(&mvotg->wq_lock)) {
mv_otg_run_state_machine(mvotg, 2 * HZ);
@@ -828,8 +824,6 @@ static int mv_otg_probe(struct platform_device *pdev)
return 0;
-err_remove_phy:
- usb_remove_phy(&mvotg->phy);
err_disable_clk:
mv_otg_disable_internal(mvotg);
err_destroy_workqueue:
@@ -883,6 +877,7 @@ static struct platform_driver mv_otg_driver = {
.remove = mv_otg_remove,
.driver = {
.name = driver_name,
+ .dev_groups = mv_otg_groups,
},
#ifdef CONFIG_PM
.suspend = mv_otg_suspend,
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 0981abc3d1ad..baebb1f5a973 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -312,15 +312,12 @@ static ssize_t otg_mode_store(struct device *device,
}
static DEVICE_ATTR_RW(otg_mode);
-static struct attribute *tahvo_attributes[] = {
+static struct attribute *tahvo_attrs[] = {
&dev_attr_vbus.attr,
&dev_attr_otg_mode.attr,
NULL
};
-
-static const struct attribute_group tahvo_attr_group = {
- .attrs = tahvo_attributes,
-};
+ATTRIBUTE_GROUPS(tahvo);
static int tahvo_usb_probe(struct platform_device *pdev)
{
@@ -406,17 +403,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
goto err_remove_phy;
}
- /* Attributes */
- ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group);
- if (ret) {
- dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret);
- goto err_free_irq;
- }
-
return 0;
-err_free_irq:
- free_irq(tu->irq, tu);
err_remove_phy:
usb_remove_phy(&tu->phy);
err_disable_clk:
@@ -430,7 +418,6 @@ static int tahvo_usb_remove(struct platform_device *pdev)
{
struct tahvo_usb *tu = platform_get_drvdata(pdev);
- sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
free_irq(tu->irq, tu);
usb_remove_phy(&tu->phy);
if (!IS_ERR(tu->ick))
@@ -444,6 +431,7 @@ static struct platform_driver tahvo_usb_driver = {
.remove = tahvo_usb_remove,
.driver = {
.name = "tahvo-usb",
+ .dev_groups = tahvo_groups,
},
};
module_platform_driver(tahvo_usb_driver);
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index dade34d70419..bfebf1f2e991 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -196,6 +196,12 @@ static ssize_t vbus_show(struct device *dev,
}
static DEVICE_ATTR_RO(vbus);
+static struct attribute *twl6030_attrs[] = {
+ &dev_attr_vbus.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(twl6030);
+
static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
{
struct twl6030_usb *twl = _twl;
@@ -361,8 +367,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, twl);
- if (device_create_file(&pdev->dev, &dev_attr_vbus))
- dev_warn(&pdev->dev, "could not create sysfs file\n");
INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
@@ -373,7 +377,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq1, status);
- device_remove_file(twl->dev, &dev_attr_vbus);
return status;
}
@@ -384,7 +387,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
twl->irq2, status);
free_irq(twl->irq1, twl);
- device_remove_file(twl->dev, &dev_attr_vbus);
return status;
}
@@ -408,7 +410,6 @@ static int twl6030_usb_remove(struct platform_device *pdev)
free_irq(twl->irq1, twl);
free_irq(twl->irq2, twl);
regulator_put(twl->usb3v3);
- device_remove_file(twl->dev, &dev_attr_vbus);
cancel_work_sync(&twl->set_vbus_work);
return 0;
@@ -426,6 +427,7 @@ static struct platform_driver twl6030_usb_driver = {
.driver = {
.name = "twl6030_usb",
.of_match_table = of_match_ptr(twl6030_usb_id_table),
+ .dev_groups = twl6030_groups,
},
};
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index ddd3be48f948..ae54221011c3 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1283,7 +1283,7 @@ static const struct hc_driver usbhsh_driver = {
/*
* generic hardware linkage
*/
- .flags = HCD_USB2,
+ .flags = HCD_DMA | HCD_USB2,
.start = usbhsh_host_start,
.stop = usbhsh_host_stop,
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 86defca6623e..94b4e7db2b94 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -85,16 +85,6 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
-static int switch_fwnode_match(struct device *dev, const void *fwnode)
-{
- return dev_fwnode(dev) == fwnode;
-}
-
-static int switch_name_match(struct device *dev, const void *name)
-{
- return !strcmp((const char *)name, dev_name(dev));
-}
-
static void *usb_role_switch_match(struct device_connection *con, int ep,
void *data)
{
@@ -104,16 +94,27 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
if (con->id && !fwnode_property_present(con->fwnode, con->id))
return NULL;
- dev = class_find_device(role_class, NULL, con->fwnode,
- switch_fwnode_match);
+ dev = class_find_device_by_fwnode(role_class, con->fwnode);
} else {
- dev = class_find_device(role_class, NULL, con->endpoint[ep],
- switch_name_match);
+ dev = class_find_device_by_name(role_class, con->endpoint[ep]);
}
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
+static struct usb_role_switch *
+usb_role_switch_is_parent(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ struct device *dev;
+
+ if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
+ return NULL;
+
+ dev = class_find_device_by_fwnode(role_class, parent);
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+}
+
/**
* usb_role_switch_get - Find USB role switch linked with the caller
* @dev: The caller device
@@ -125,8 +126,10 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
{
struct usb_role_switch *sw;
- sw = device_connection_find_match(dev, "usb-role-switch", NULL,
- usb_role_switch_match);
+ sw = usb_role_switch_is_parent(dev_fwnode(dev));
+ if (!sw)
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+ usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
@@ -136,6 +139,28 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
EXPORT_SYMBOL_GPL(usb_role_switch_get);
/**
+ * fwnode_usb_role_switch_get - Find USB role switch linked with the caller
+ * @fwnode: The caller device node
+ *
+ * This is similar to the usb_role_switch_get() function above, but it searches
+ * the switch using fwnode instead of device entry.
+ */
+struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
+{
+ struct usb_role_switch *sw;
+
+ sw = usb_role_switch_is_parent(fwnode);
+ if (!sw)
+ sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
+ NULL, usb_role_switch_match);
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+ return sw;
+}
+EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
+
+/**
* usb_role_switch_put - Release handle to a switch
* @sw: USB Role Switch
*
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 277de96181f9..409851306e99 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/usb/role.h>
/* register definition */
@@ -26,6 +27,12 @@
#define SW_VBUS_VALID BIT(24)
#define SW_IDPIN_EN BIT(21)
#define SW_IDPIN BIT(20)
+#define SW_SWITCH_EN BIT(16)
+
+#define DRD_CONFIG_DYNAMIC 0
+#define DRD_CONFIG_STATIC_HOST 1
+#define DRD_CONFIG_STATIC_DEVICE 2
+#define DRD_CONFIG_MASK 3
#define DUAL_ROLE_CFG1 0x6c
#define HOST_MODE BIT(29)
@@ -37,6 +44,11 @@
struct intel_xhci_usb_data {
struct usb_role_switch *role_sw;
void __iomem *base;
+ bool enable_sw_switch;
+};
+
+static const struct software_node intel_xhci_usb_node = {
+ "intel-xhci-usb-sw",
};
static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
@@ -45,6 +57,7 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
unsigned long timeout;
acpi_status status;
u32 glk, val;
+ u32 drd_config = DRD_CONFIG_DYNAMIC;
/*
* On many CHT devices ACPI event (_AEI) handlers read / modify /
@@ -59,24 +72,35 @@ static int intel_xhci_usb_set_role(struct device *dev, enum usb_role role)
pm_runtime_get_sync(dev);
- /* Set idpin value as requested */
+ /*
+ * Set idpin value as requested.
+ * Since some devices rely on firmware setting DRD_CONFIG and
+ * SW_SWITCH_EN bits to be zero for role switch,
+ * do not set these bits for those devices.
+ */
val = readl(data->base + DUAL_ROLE_CFG0);
switch (role) {
case USB_ROLE_NONE:
val |= SW_IDPIN;
val &= ~SW_VBUS_VALID;
+ drd_config = DRD_CONFIG_DYNAMIC;
break;
case USB_ROLE_HOST:
val &= ~SW_IDPIN;
val &= ~SW_VBUS_VALID;
+ drd_config = DRD_CONFIG_STATIC_HOST;
break;
case USB_ROLE_DEVICE:
val |= SW_IDPIN;
val |= SW_VBUS_VALID;
+ drd_config = DRD_CONFIG_STATIC_DEVICE;
break;
}
val |= SW_IDPIN_EN;
-
+ if (data->enable_sw_switch) {
+ val &= ~DRD_CONFIG_MASK;
+ val |= SW_SWITCH_EN | drd_config;
+ }
writel(val, data->base + DUAL_ROLE_CFG0);
acpi_release_global_lock(glk);
@@ -122,17 +146,13 @@ static enum usb_role intel_xhci_usb_get_role(struct device *dev)
return role;
}
-static const struct usb_role_switch_desc sw_desc = {
- .set = intel_xhci_usb_set_role,
- .get = intel_xhci_usb_get_role,
- .allow_userspace_control = true,
-};
-
static int intel_xhci_usb_probe(struct platform_device *pdev)
{
+ struct usb_role_switch_desc sw_desc = { };
struct device *dev = &pdev->dev;
struct intel_xhci_usb_data *data;
struct resource *res;
+ int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -147,9 +167,23 @@ static int intel_xhci_usb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ ret = software_node_register(&intel_xhci_usb_node);
+ if (ret)
+ return ret;
+
+ sw_desc.set = intel_xhci_usb_set_role,
+ sw_desc.get = intel_xhci_usb_get_role,
+ sw_desc.allow_userspace_control = true,
+ sw_desc.fwnode = software_node_fwnode(&intel_xhci_usb_node);
+
+ data->enable_sw_switch = !device_property_read_bool(dev,
+ "sw_switch_disable");
+
data->role_sw = usb_role_switch_register(dev, &sw_desc);
- if (IS_ERR(data->role_sw))
+ if (IS_ERR(data->role_sw)) {
+ fwnode_handle_put(sw_desc.fwnode);
return PTR_ERR(data->role_sw);
+ }
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -164,6 +198,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
usb_role_switch_unregister(data->role_sw);
+ fwnode_handle_put(software_node_fwnode(&intel_xhci_usb_node));
+
return 0;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4b3a049561f3..f0688c44b04c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2023,6 +2023,46 @@ static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr,
return 0;
}
+static int ftdi_gpio_init_ft232h(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ u16 cbus_config;
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ftdi_read_eeprom(port->serial, buf, 0x1a, 4);
+ if (ret < 0)
+ goto out_free;
+
+ /*
+ * FT232H CBUS Memory Map
+ *
+ * 0x1a: X- (upper nibble -> AC5)
+ * 0x1b: -X (lower nibble -> AC6)
+ * 0x1c: XX (upper nibble -> AC9 | lower nibble -> AC8)
+ */
+ cbus_config = buf[2] << 8 | (buf[1] & 0xf) << 4 | (buf[0] & 0xf0) >> 4;
+
+ priv->gc.ngpio = 4;
+ priv->gpio_altfunc = 0xff;
+
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if ((cbus_config & 0xf) == FTDI_FTX_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ cbus_config >>= 4;
+ }
+
+out_free:
+ kfree(buf);
+
+ return ret;
+}
+
static int ftdi_gpio_init_ft232r(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
@@ -2098,6 +2138,9 @@ static int ftdi_gpio_init(struct usb_serial_port *port)
int result;
switch (priv->chip_type) {
+ case FT232H:
+ result = ftdi_gpio_init_ft232h(port);
+ break;
case FT232RL:
result = ftdi_gpio_init_ft232r(port);
break;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c1582fbd1150..38e920ac7f82 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+ /* Motorola devices */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 6d64f342f587..16ce06039a4d 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -29,8 +29,6 @@
#include <linux/kernel.h>
-#define USB_STORAGE "usb-storage: "
-
#ifdef CONFIG_USB_STORAGE_DEBUG
void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb);
void usb_stor_show_sense(const struct us_data *us, unsigned char key,
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 2b474d60b4db..28e1128d53a4 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1511,7 +1511,7 @@ static int isd200_Initialization(struct us_data *us)
static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
{
- int sendToTransport = 1, orig_bufflen;
+ int sendToTransport, orig_bufflen;
union ata_cdb ataCdb;
/* Make sure driver was initialized */
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index cc794e25a0b6..1d9ce9cbc831 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
static int auto_delink_en = 1;
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
#ifdef CONFIG_REALTEK_AUTOPM
static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
goto INIT_FAIL;
}
- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
- CHECK_FW_VER(chip, 0x5901))
- SET_AUTO_DELINK(chip);
- if (STATUS_LEN(chip) == 16) {
- if (SUPPORT_AUTO_DELINK(chip))
+ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+ CHECK_PID(chip, 0x0159)) {
+ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+ CHECK_FW_VER(chip, 0x5901))
SET_AUTO_DELINK(chip);
+ if (STATUS_LEN(chip) == 16) {
+ if (SUPPORT_AUTO_DELINK(chip))
+ SET_AUTO_DELINK(chip);
+ }
}
#ifdef CONFIG_REALTEK_AUTOPM
if (ss_en)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 30790240aec6..6737fab94959 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -28,6 +28,8 @@
* status of a command.
*/
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -38,6 +40,7 @@
#include <scsi/scsi_eh.h>
#include "usb.h"
+#include <linux/usb/hcd.h>
#include "scsiglue.h"
#include "debug.h"
#include "transport.h"
@@ -99,6 +102,7 @@ static int slave_alloc (struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
+ struct device *dev = us->pusb_dev->bus->sysdev;
/*
* Many devices have trouble transferring more than 32KB at a time,
@@ -129,12 +133,19 @@ static int slave_configure(struct scsi_device *sdev)
}
/*
+ * The max_hw_sectors should be up to maximum size of a mapping for
+ * the device. Otherwise, a DMA API might fail on swiotlb environment.
+ */
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT));
+
+ /*
* Some USB host controllers can't do DMA; they have to use PIO.
- * They indicate this by setting their dma_mask to NULL. For
- * such controllers we need to make sure the block layer sets
+ * For such controllers we need to make sure the block layer sets
* up bounce buffers in addressable memory.
*/
- if (!us->pusb_dev->bus->controller->dma_mask)
+ if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)))
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
/*
@@ -368,7 +379,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* check for state-transition errors */
if (us->srb != NULL) {
- printk(KERN_ERR USB_STORAGE "Error in %s: us->srb = %p\n",
+ printk(KERN_ERR "usb-storage: Error in %s: us->srb = %p\n",
__func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ea0d27a94afe..1cd9b6305b06 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
US_FL_IGNORE_RESIDUE ),
/* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 89d9193bd1cf..895e2418de53 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -53,6 +53,7 @@ source "drivers/usb/typec/ucsi/Kconfig"
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
+ select REGMAP_I2C
help
Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
Delivery controller.
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index a18285a990a8..94a3eda62add 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -205,16 +205,6 @@ static void typec_altmode_put_partner(struct altmode *altmode)
put_device(&adev->dev);
}
-static int typec_port_fwnode_match(struct device *dev, const void *fwnode)
-{
- return dev_fwnode(dev) == fwnode;
-}
-
-static int typec_port_name_match(struct device *dev, const void *name)
-{
- return !strcmp((const char *)name, dev_name(dev));
-}
-
static void *typec_port_match(struct device_connection *con, int ep, void *data)
{
struct device *dev;
@@ -224,11 +214,9 @@ static void *typec_port_match(struct device_connection *con, int ep, void *data)
* we need to return ERR_PTR(-PROBE_DEFER) when there is no device.
*/
if (con->fwnode)
- return class_find_device(typec_class, NULL, con->fwnode,
- typec_port_fwnode_match);
+ return class_find_device_by_fwnode(typec_class, con->fwnode);
- dev = class_find_device(typec_class, NULL, con->endpoint[ep],
- typec_port_name_match);
+ dev = class_find_device_by_name(typec_class, con->endpoint[ep]);
return dev ? dev : ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 61b7bc58dd81..57907f26f681 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -215,7 +215,7 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
}
/* Alternate Mode muxes */
- nval = fwnode_property_read_u16_array(con->fwnode, "svid", NULL, 0);
+ nval = fwnode_property_count_u16(con->fwnode, "svid");
if (nval <= 0)
return NULL;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index c524088246ee..ed8655c6af8c 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/usb.h>
#include <linux/usb/typec.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/pd.h>
@@ -75,7 +76,6 @@ struct fusb302_chip {
struct i2c_client *i2c_client;
struct tcpm_port *tcpm_port;
struct tcpc_dev tcpc_dev;
- struct tcpc_config tcpc_config;
struct regulator *vbus;
@@ -207,23 +207,19 @@ static int fusb302_debug_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(fusb302_debug);
-static struct dentry *rootdir;
-
static void fusb302_debugfs_init(struct fusb302_chip *chip)
{
- mutex_init(&chip->logbuffer_lock);
- if (!rootdir)
- rootdir = debugfs_create_dir("fusb302", NULL);
+ char name[NAME_MAX];
- chip->dentry = debugfs_create_file(dev_name(chip->dev),
- S_IFREG | 0444, rootdir,
+ mutex_init(&chip->logbuffer_lock);
+ snprintf(name, NAME_MAX, "fusb302-%s", dev_name(chip->dev));
+ chip->dentry = debugfs_create_file(name, S_IFREG | 0444, usb_debug_root,
chip, &fusb302_debug_fops);
}
static void fusb302_debugfs_exit(struct fusb302_chip *chip)
{
debugfs_remove(chip->dentry);
- debugfs_remove(rootdir);
}
#else
@@ -1110,23 +1106,6 @@ done:
mutex_unlock(&chip->lock);
}
-#define PDO_FIXED_FLAGS \
- (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
-
-static const u32 src_pdo[] = {
- PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
-};
-
-static const struct tcpc_config fusb302_tcpc_config = {
- .src_pdo = src_pdo,
- .nr_src_pdo = ARRAY_SIZE(src_pdo),
- .operating_snk_mw = 2500,
- .type = TYPEC_PORT_DRP,
- .data = TYPEC_PORT_DRD,
- .default_role = TYPEC_SINK,
- .alt_modes = NULL,
-};
-
static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
{
fusb302_tcpc_dev->init = tcpm_init;
@@ -1670,27 +1649,36 @@ static int init_gpio(struct fusb302_chip *chip)
return 0;
}
-static int fusb302_composite_snk_pdo_array(struct fusb302_chip *chip)
-{
- struct device *dev = chip->dev;
- u32 max_uv, max_ua;
+#define PDO_FIXED_FLAGS \
+ (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
- chip->snk_pdo[0] = PDO_FIXED(5000, 400, PDO_FIXED_FLAGS);
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS)
+};
- /*
- * As max_snk_ma/mv/mw is not needed for tcpc_config,
- * those settings should be passed in via sink PDO, so
- * "fcs, max-sink-*" properties will be deprecated, to
- * perserve compatibility with existing users of them,
- * we read those properties to convert them to be a var
- * PDO.
- */
- if (device_property_read_u32(dev, "fcs,max-sink-microvolt", &max_uv) ||
- device_property_read_u32(dev, "fcs,max-sink-microamp", &max_ua))
- return 1;
+static const u32 snk_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS)
+};
+
+static const struct property_entry port_props[] = {
+ PROPERTY_ENTRY_STRING("data-role", "dual"),
+ PROPERTY_ENTRY_STRING("power-role", "dual"),
+ PROPERTY_ENTRY_STRING("try-power-role", "sink"),
+ PROPERTY_ENTRY_U32_ARRAY("source-pdos", src_pdo),
+ PROPERTY_ENTRY_U32_ARRAY("sink-pdos", snk_pdo),
+ PROPERTY_ENTRY_U32("op-sink-microwatt", 2500),
+ { }
+};
- chip->snk_pdo[1] = PDO_VAR(5000, max_uv / 1000, max_ua / 1000);
- return 2;
+static struct fwnode_handle *fusb302_fwnode_get(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ fwnode = device_get_named_child_node(dev, "connector");
+ if (!fwnode)
+ fwnode = fwnode_create_software_node(port_props, NULL);
+
+ return fwnode;
}
static int fusb302_probe(struct i2c_client *client,
@@ -1701,7 +1689,6 @@ static int fusb302_probe(struct i2c_client *client,
struct device *dev = &client->dev;
const char *name;
int ret = 0;
- u32 v;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(&client->dev,
@@ -1714,20 +1701,8 @@ static int fusb302_probe(struct i2c_client *client,
chip->i2c_client = client;
chip->dev = &client->dev;
- chip->tcpc_config = fusb302_tcpc_config;
- chip->tcpc_dev.config = &chip->tcpc_config;
mutex_init(&chip->lock);
- chip->tcpc_dev.fwnode =
- device_get_named_child_node(dev, "connector");
-
- if (!device_property_read_u32(dev, "fcs,operating-sink-microwatt", &v))
- chip->tcpc_config.operating_snk_mw = v / 1000;
-
- /* Composite sink PDO */
- chip->tcpc_config.nr_snk_pdo = fusb302_composite_snk_pdo_array(chip);
- chip->tcpc_config.snk_pdo = chip->snk_pdo;
-
/*
* Devicetree platforms should get extcon via phandle (not yet
* supported). On ACPI platforms, we get the name from a device prop.
@@ -1753,6 +1728,7 @@ static int fusb302_probe(struct i2c_client *client,
INIT_WORK(&chip->irq_work, fusb302_irq_work);
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
+ fusb302_debugfs_init(chip);
if (client->irq) {
chip->gpio_int_n_irq = client->irq;
@@ -1762,8 +1738,15 @@ static int fusb302_probe(struct i2c_client *client,
goto destroy_workqueue;
}
+ chip->tcpc_dev.fwnode = fusb302_fwnode_get(dev);
+ if (IS_ERR(chip->tcpc_dev.fwnode)) {
+ ret = PTR_ERR(chip->tcpc_dev.fwnode);
+ goto destroy_workqueue;
+ }
+
chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
if (IS_ERR(chip->tcpm_port)) {
+ fwnode_handle_put(chip->tcpc_dev.fwnode);
ret = PTR_ERR(chip->tcpm_port);
if (ret != -EPROBE_DEFER)
dev_err(dev, "cannot register tcpm port, ret=%d", ret);
@@ -1778,14 +1761,15 @@ static int fusb302_probe(struct i2c_client *client,
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
- fusb302_debugfs_init(chip);
i2c_set_clientdata(client, chip);
return ret;
tcpm_unregister_port:
tcpm_unregister_port(chip->tcpm_port);
+ fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue:
+ fusb302_debugfs_exit(chip);
destroy_workqueue(chip->wq);
return ret;
@@ -1800,6 +1784,7 @@ static int fusb302_remove(struct i2c_client *client)
cancel_work_sync(&chip->irq_work);
cancel_delayed_work_sync(&chip->bc_lvl_handler);
tcpm_unregister_port(chip->tcpm_port);
+ fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue(chip->wq);
fusb302_debugfs_exit(chip);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index fba32d84e578..96562744101c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/usb.h>
#include <linux/usb/pd.h>
#include <linux/usb/pd_ado.h>
#include <linux/usb/pd_bdo.h>
@@ -379,7 +380,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config->default_role == TYPEC_SINK)
+ else if (port->tcpc->config &&
+ port->tcpc->config->default_role == TYPEC_SINK)
return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
@@ -570,22 +572,27 @@ static int tcpm_debug_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
-static struct dentry *rootdir;
-
static void tcpm_debugfs_init(struct tcpm_port *port)
{
- mutex_init(&port->logbuffer_lock);
- /* /sys/kernel/debug/tcpm/usbcX */
- if (!rootdir)
- rootdir = debugfs_create_dir("tcpm", NULL);
+ char name[NAME_MAX];
- port->dentry = debugfs_create_file(dev_name(port->dev),
- S_IFREG | 0444, rootdir,
+ mutex_init(&port->logbuffer_lock);
+ snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
+ port->dentry = debugfs_create_file(name, S_IFREG | 0444, usb_debug_root,
port, &tcpm_debug_fops);
}
static void tcpm_debugfs_exit(struct tcpm_port *port)
{
+ int i;
+
+ mutex_lock(&port->logbuffer_lock);
+ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
+ kfree(port->logbuffer[i]);
+ port->logbuffer[i] = NULL;
+ }
+ mutex_unlock(&port->logbuffer_lock);
+
debugfs_remove(port->dentry);
}
@@ -1095,7 +1102,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
break;
case CMD_ATTENTION:
/* Attention command does not have response */
- typec_altmode_attention(adev, p[1]);
+ if (adev)
+ typec_altmode_attention(adev, p[1]);
return 0;
default:
break;
@@ -1147,20 +1155,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
}
break;
case CMD_ENTER_MODE:
- typec_altmode_update_active(pdev, true);
-
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- return 1;
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, true);
+
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ response[0] = VDO(adev->svid, 1,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ return 1;
+ }
}
return 0;
case CMD_EXIT_MODE:
- typec_altmode_update_active(pdev, false);
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, false);
- /* Back to USB Operation */
- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
- NULL));
+ /* Back to USB Operation */
+ WARN_ON(typec_altmode_notify(adev,
+ TYPEC_STATE_USB,
+ NULL));
+ }
break;
default:
break;
@@ -1170,8 +1184,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
switch (cmd) {
case CMD_ENTER_MODE:
/* Back to USB Operation */
- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
- NULL));
+ if (adev)
+ WARN_ON(typec_altmode_notify(adev,
+ TYPEC_STATE_USB,
+ NULL));
break;
default:
break;
@@ -1182,7 +1198,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
}
/* Informing the alternate mode drivers about everything */
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ if (adev)
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
return rlen;
}
@@ -1422,7 +1439,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
else if ((pdo_min_voltage(pdo[i]) ==
pdo_min_voltage(pdo[i - 1])) &&
(pdo_max_voltage(pdo[i]) ==
- pdo_min_voltage(pdo[i - 1])))
+ pdo_max_voltage(pdo[i - 1])))
return PDO_ERR_DUPE_PDO;
break;
/*
@@ -4114,7 +4131,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && !tcpc->config->try_role_hw)
+ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4410,8 +4427,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
goto sink;
/* Get source pdos */
- ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
- NULL, 0);
+ ret = fwnode_property_count_u32(fwnode, "source-pdos");
if (ret <= 0)
return -EINVAL;
@@ -4435,8 +4451,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
return -EINVAL;
sink:
/* Get sink pdos */
- ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
- NULL, 0);
+ ret = fwnode_property_count_u32(fwnode, "sink-pdos");
if (ret <= 0)
return -EINVAL;
@@ -4701,7 +4716,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
port->typec_caps.prefer_role = tcfg->default_role;
port->typec_caps.type = tcfg->type;
port->typec_caps.data = tcfg->data;
- port->self_powered = port->tcpc->config->self_powered;
+ port->self_powered = tcfg->self_powered;
return 0;
}
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 6b317c150bdd..edc271da14f4 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -617,10 +617,8 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->regmap = pmic->regmap;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ if (irq < 0)
return irq;
- }
irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
if (irq < 0)
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index f7a79a23ebed..907e20e1a71e 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -1018,7 +1018,7 @@ release_fw:
******************************************************************************/
static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
{
- int err;
+ int err = 0;
while (flash_mode != FLASH_NOT_NEEDED) {
err = do_flash(uc, flash_mode);
@@ -1104,14 +1104,11 @@ static ssize_t do_flash_store(struct device *dev,
static DEVICE_ATTR_WO(do_flash);
-static struct attribute *ucsi_ccg_sysfs_attrs[] = {
+static struct attribute *ucsi_ccg_attrs[] = {
&dev_attr_do_flash.attr,
NULL,
};
-
-static struct attribute_group ucsi_ccg_attr_group = {
- .attrs = ucsi_ccg_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(ucsi_ccg);
static int ucsi_ccg_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -1189,10 +1186,6 @@ static int ucsi_ccg_probe(struct i2c_client *client,
i2c_set_clientdata(client, uc);
- status = sysfs_create_group(&uc->dev->kobj, &ucsi_ccg_attr_group);
- if (status)
- dev_err(uc->dev, "cannot create sysfs group: %d\n", status);
-
pm_runtime_set_active(uc->dev);
pm_runtime_enable(uc->dev);
pm_runtime_idle(uc->dev);
@@ -1209,7 +1202,6 @@ static int ucsi_ccg_remove(struct i2c_client *client)
ucsi_unregister_ppm(uc->ucsi);
pm_runtime_disable(uc->dev);
free_irq(uc->irq, uc);
- sysfs_remove_group(&uc->dev->kobj, &ucsi_ccg_attr_group);
return 0;
}
@@ -1270,6 +1262,7 @@ static struct i2c_driver ucsi_ccg_driver = {
.driver = {
.name = "ucsi_ccg",
.pm = &ucsi_ccg_pm,
+ .dev_groups = ucsi_ccg_groups,
},
.probe = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index f101347e3ea3..c31d17d05810 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -35,9 +35,11 @@ MODULE_DEVICE_TABLE(usb, skel_table);
/* our private defines. if this grows any larger, use your own .h file */
#define MAX_TRANSFER (PAGE_SIZE - 512)
-/* MAX_TRANSFER is chosen so that the VM is not stressed by
- allocations > PAGE_SIZE and the number of packets in a page
- is an integer 512 is the largest possible packet on EHCI */
+/*
+ * MAX_TRANSFER is chosen so that the VM is not stressed by
+ * allocations > PAGE_SIZE and the number of packets in a page
+ * is an integer 512 is the largest possible packet on EHCI
+ */
#define WRITES_IN_FLIGHT 8
/* arbitrarily chosen */
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 35618ceb2791..d11270560c24 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -52,7 +52,11 @@ struct stub_priv {
unsigned long seqnum;
struct list_head list;
struct stub_device *sdev;
- struct urb *urb;
+ struct urb **urbs;
+ struct scatterlist *sgl;
+ int num_urbs;
+ int completed_urbs;
+ int urb_status;
int unlinking;
};
@@ -86,6 +90,7 @@ extern struct usb_device_driver stub_driver;
struct bus_id_priv *get_busid_priv(const char *busid);
void put_busid_priv(struct bus_id_priv *bid);
int del_match_busid(char *busid);
+void stub_free_priv_and_urb(struct stub_priv *priv);
void stub_device_cleanup_urbs(struct stub_device *sdev);
/* stub_rx.c */
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 7931e6cecc70..2305d425e6c9 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -106,38 +106,13 @@ err:
}
static DEVICE_ATTR_WO(usbip_sockfd);
-static int stub_add_files(struct device *dev)
-{
- int err = 0;
-
- err = device_create_file(dev, &dev_attr_usbip_status);
- if (err)
- goto err_status;
-
- err = device_create_file(dev, &dev_attr_usbip_sockfd);
- if (err)
- goto err_sockfd;
-
- err = device_create_file(dev, &dev_attr_usbip_debug);
- if (err)
- goto err_debug;
-
- return 0;
-
-err_debug:
- device_remove_file(dev, &dev_attr_usbip_sockfd);
-err_sockfd:
- device_remove_file(dev, &dev_attr_usbip_status);
-err_status:
- return err;
-}
-
-static void stub_remove_files(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_usbip_status);
- device_remove_file(dev, &dev_attr_usbip_sockfd);
- device_remove_file(dev, &dev_attr_usbip_debug);
-}
+static struct attribute *usbip_attrs[] = {
+ &dev_attr_usbip_status.attr,
+ &dev_attr_usbip_sockfd.attr,
+ &dev_attr_usbip_debug.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(usbip);
static void stub_shutdown_connection(struct usbip_device *ud)
{
@@ -379,17 +354,8 @@ static int stub_probe(struct usb_device *udev)
goto err_port;
}
- rc = stub_add_files(&udev->dev);
- if (rc) {
- dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid);
- goto err_files;
- }
-
return 0;
-err_files:
- usb_hub_release_port(udev->parent, udev->portnum,
- (struct usb_dev_state *) udev);
err_port:
dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(udev);
@@ -457,7 +423,6 @@ static void stub_disconnect(struct usb_device *udev)
/*
* NOTE: rx/tx threads are invoked for each usb_device.
*/
- stub_remove_files(&udev->dev);
/* release port */
rc = usb_hub_release_port(udev->parent, udev->portnum,
@@ -526,4 +491,5 @@ struct usb_device_driver stub_driver = {
.resume = stub_resume,
#endif
.supports_autosuspend = 0,
+ .dev_groups = usbip_groups,
};
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 2e4bfccd4bfc..c1c0bbc9f8b1 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
@@ -281,13 +282,49 @@ static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead)
struct stub_priv *priv, *tmp;
list_for_each_entry_safe(priv, tmp, listhead, list) {
- list_del(&priv->list);
+ list_del_init(&priv->list);
return priv;
}
return NULL;
}
+void stub_free_priv_and_urb(struct stub_priv *priv)
+{
+ struct urb *urb;
+ int i;
+
+ for (i = 0; i < priv->num_urbs; i++) {
+ urb = priv->urbs[i];
+
+ if (!urb)
+ return;
+
+ kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
+
+ if (urb->transfer_buffer && !priv->sgl) {
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+ }
+
+ if (urb->num_sgs) {
+ sgl_free(urb->sg);
+ urb->sg = NULL;
+ urb->num_sgs = 0;
+ }
+
+ usb_free_urb(urb);
+ }
+ if (!list_empty(&priv->list))
+ list_del(&priv->list);
+ if (priv->sgl)
+ sgl_free(priv->sgl);
+ kfree(priv->urbs);
+ kmem_cache_free(stub_priv_cache, priv);
+}
+
static struct stub_priv *stub_priv_pop(struct stub_device *sdev)
{
unsigned long flags;
@@ -314,25 +351,15 @@ done:
void stub_device_cleanup_urbs(struct stub_device *sdev)
{
struct stub_priv *priv;
- struct urb *urb;
+ int i;
dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) {
- urb = priv->urb;
- dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
- priv->seqnum);
- usb_kill_urb(urb);
-
- kmem_cache_free(stub_priv_cache, priv);
+ for (i = 0; i < priv->num_urbs; i++)
+ usb_kill_urb(priv->urbs[i]);
- kfree(urb->transfer_buffer);
- urb->transfer_buffer = NULL;
-
- kfree(urb->setup_packet);
- urb->setup_packet = NULL;
-
- usb_free_urb(urb);
+ stub_free_priv_and_urb(priv);
}
}
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index b0a855acafa3..66edfeea68fe 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
@@ -201,7 +202,7 @@ static void tweak_special_requests(struct urb *urb)
static int stub_recv_cmd_unlink(struct stub_device *sdev,
struct usbip_header *pdu)
{
- int ret;
+ int ret, i;
unsigned long flags;
struct stub_priv *priv;
@@ -246,12 +247,14 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
* so a driver in a client host will know the failure
* of the unlink request ?
*/
- ret = usb_unlink_urb(priv->urb);
- if (ret != -EINPROGRESS)
- dev_err(&priv->urb->dev->dev,
- "failed to unlink a urb # %lu, ret %d\n",
- priv->seqnum, ret);
-
+ for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
+ ret = usb_unlink_urb(priv->urbs[i]);
+ if (ret != -EINPROGRESS)
+ dev_err(&priv->urbs[i]->dev->dev,
+ "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
+ i + 1, priv->num_urbs,
+ priv->seqnum, ret);
+ }
return 0;
}
@@ -433,14 +436,36 @@ static void masking_bogus_flags(struct urb *urb)
urb->transfer_flags &= allowed;
}
+static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->num_urbs; i++) {
+ ret = usbip_recv_xbuff(ud, priv->urbs[i]);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
static void stub_recv_cmd_submit(struct stub_device *sdev,
struct usbip_header *pdu)
{
- int ret;
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
+ struct scatterlist *sgl = NULL, *sg;
+ void *buffer = NULL;
+ unsigned long long buf_len;
+ int nents;
+ int num_urbs = 1;
int pipe = get_pipe(sdev, pdu);
+ int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
+ int support_sg = 1;
+ int np = 0;
+ int ret, i;
if (pipe == -1)
return;
@@ -449,76 +474,139 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
if (!priv)
return;
- /* setup a urb */
- if (usb_pipeisoc(pipe))
- priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
- GFP_KERNEL);
- else
- priv->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
- if (!priv->urb) {
- usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
- return;
+ /* allocate urb transfer buffer, if needed */
+ if (buf_len) {
+ if (use_sg) {
+ sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
+ if (!sgl)
+ goto err_malloc;
+ } else {
+ buffer = kzalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ goto err_malloc;
+ }
}
- /* allocate urb transfer buffer, if needed */
- if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
- priv->urb->transfer_buffer =
- kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
- GFP_KERNEL);
- if (!priv->urb->transfer_buffer) {
+ /* Check if the server's HCD supports SG */
+ if (use_sg && !udev->bus->sg_tablesize) {
+ /*
+ * If the server's HCD doesn't support SG, break a single SG
+ * request into several URBs and map each SG list entry to
+ * corresponding URB buffer. The previously allocated SG
+ * list is stored in priv->sgl (If the server's HCD support SG,
+ * SG list is stored only in urb->sg) and it is used as an
+ * indicator that the server split single SG request into
+ * several URBs. Later, priv->sgl is used by stub_complete() and
+ * stub_send_ret_submit() to reassemble the divied URBs.
+ */
+ support_sg = 0;
+ num_urbs = nents;
+ priv->completed_urbs = 0;
+ pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
+ }
+
+ /* allocate urb array */
+ priv->num_urbs = num_urbs;
+ priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
+ if (!priv->urbs)
+ goto err_urbs;
+
+ /* setup a urb */
+ if (support_sg) {
+ if (usb_pipeisoc(pipe))
+ np = pdu->u.cmd_submit.number_of_packets;
+
+ priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
+ if (!priv->urbs[0])
+ goto err_urb;
+
+ if (buf_len) {
+ if (use_sg) {
+ priv->urbs[0]->sg = sgl;
+ priv->urbs[0]->num_sgs = nents;
+ priv->urbs[0]->transfer_buffer = NULL;
+ } else {
+ priv->urbs[0]->transfer_buffer = buffer;
+ }
+ }
+
+ /* copy urb setup packet */
+ priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
+ 8, GFP_KERNEL);
+ if (!priv->urbs[0]->setup_packet) {
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
- }
- /* copy urb setup packet */
- priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
- GFP_KERNEL);
- if (!priv->urb->setup_packet) {
- dev_err(&udev->dev, "allocate setup_packet\n");
- usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
- return;
+ usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
+ } else {
+ for_each_sg(sgl, sg, nents, i) {
+ priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+ /* The URBs which is previously allocated will be freed
+ * in stub_device_cleanup_urbs() if error occurs.
+ */
+ if (!priv->urbs[i])
+ goto err_urb;
+
+ usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
+ priv->urbs[i]->transfer_buffer = sg_virt(sg);
+ priv->urbs[i]->transfer_buffer_length = sg->length;
+ }
+ priv->sgl = sgl;
}
- /* set other members from the base header of pdu */
- priv->urb->context = (void *) priv;
- priv->urb->dev = udev;
- priv->urb->pipe = pipe;
- priv->urb->complete = stub_complete;
+ for (i = 0; i < num_urbs; i++) {
+ /* set other members from the base header of pdu */
+ priv->urbs[i]->context = (void *) priv;
+ priv->urbs[i]->dev = udev;
+ priv->urbs[i]->pipe = pipe;
+ priv->urbs[i]->complete = stub_complete;
- usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
+ /* no need to submit an intercepted request, but harmless? */
+ tweak_special_requests(priv->urbs[i]);
+ masking_bogus_flags(priv->urbs[i]);
+ }
- if (usbip_recv_xbuff(ud, priv->urb) < 0)
+ if (stub_recv_xbuff(ud, priv) < 0)
return;
- if (usbip_recv_iso(ud, priv->urb) < 0)
+ if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
return;
- /* no need to submit an intercepted request, but harmless? */
- tweak_special_requests(priv->urb);
-
- masking_bogus_flags(priv->urb);
/* urb is now ready to submit */
- ret = usb_submit_urb(priv->urb, GFP_KERNEL);
-
- if (ret == 0)
- usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
- pdu->base.seqnum);
- else {
- dev_err(&udev->dev, "submit_urb error, %d\n", ret);
- usbip_dump_header(pdu);
- usbip_dump_urb(priv->urb);
-
- /*
- * Pessimistic.
- * This connection will be discarded.
- */
- usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+ for (i = 0; i < priv->num_urbs; i++) {
+ ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+
+ if (ret == 0)
+ usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+ pdu->base.seqnum);
+ else {
+ dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+ usbip_dump_header(pdu);
+ usbip_dump_urb(priv->urbs[i]);
+
+ /*
+ * Pessimistic.
+ * This connection will be discarded.
+ */
+ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+ break;
+ }
}
usbip_dbg_stub_rx("Leave\n");
+ return;
+
+err_urb:
+ kfree(priv->urbs);
+err_urbs:
+ kfree(buffer);
+ sgl_free(sgl);
+err_malloc:
+ usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
}
/* recv a pdu */
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index f0ec41a50cbc..36010a82b359 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -5,25 +5,11 @@
#include <linux/kthread.h>
#include <linux/socket.h>
+#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "stub.h"
-static void stub_free_priv_and_urb(struct stub_priv *priv)
-{
- struct urb *urb = priv->urb;
-
- kfree(urb->setup_packet);
- urb->setup_packet = NULL;
-
- kfree(urb->transfer_buffer);
- urb->transfer_buffer = NULL;
-
- list_del(&priv->list);
- kmem_cache_free(stub_priv_cache, priv);
- usb_free_urb(urb);
-}
-
/* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
__u32 status)
@@ -85,6 +71,22 @@ void stub_complete(struct urb *urb)
break;
}
+ /*
+ * If the server breaks single SG request into the several URBs, the
+ * URBs must be reassembled before sending completed URB to the vhci.
+ * Don't wake up the tx thread until all the URBs are completed.
+ */
+ if (priv->sgl) {
+ priv->completed_urbs++;
+
+ /* Only save the first error status */
+ if (urb->status && !priv->urb_status)
+ priv->urb_status = urb->status;
+
+ if (priv->completed_urbs < priv->num_urbs)
+ return;
+ }
+
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
if (sdev->ud.tcp_socket == NULL) {
@@ -156,18 +158,22 @@ static int stub_send_ret_submit(struct stub_device *sdev)
size_t total_size = 0;
while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
- int ret;
- struct urb *urb = priv->urb;
+ struct urb *urb = priv->urbs[0];
struct usbip_header pdu_header;
struct usbip_iso_packet_descriptor *iso_buffer = NULL;
struct kvec *iov = NULL;
+ struct scatterlist *sg;
+ u32 actual_length = 0;
int iovnum = 0;
+ int ret;
+ int i;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
- if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ if (urb->actual_length > 0 && !urb->transfer_buffer &&
+ !urb->num_sgs) {
dev_err(&sdev->udev->dev,
"urb: actual_length %d transfer_buffer null\n",
urb->actual_length);
@@ -176,6 +182,11 @@ static int stub_send_ret_submit(struct stub_device *sdev)
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
+ else if (usb_pipein(urb->pipe) && urb->actual_length > 0 &&
+ urb->num_sgs)
+ iovnum = 1 + urb->num_sgs;
+ else if (usb_pipein(urb->pipe) && priv->sgl)
+ iovnum = 1 + priv->num_urbs;
else
iovnum = 2;
@@ -192,6 +203,15 @@ static int stub_send_ret_submit(struct stub_device *sdev)
setup_ret_submit_pdu(&pdu_header, urb);
usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
pdu_header.base.seqnum);
+
+ if (priv->sgl) {
+ for (i = 0; i < priv->num_urbs; i++)
+ actual_length += priv->urbs[i]->actual_length;
+
+ pdu_header.u.ret_submit.status = priv->urb_status;
+ pdu_header.u.ret_submit.actual_length = actual_length;
+ }
+
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
@@ -200,12 +220,47 @@ static int stub_send_ret_submit(struct stub_device *sdev)
txsize += sizeof(pdu_header);
/* 2. setup transfer buffer */
- if (usb_pipein(urb->pipe) &&
+ if (usb_pipein(urb->pipe) && priv->sgl) {
+ /* If the server split a single SG request into several
+ * URBs because the server's HCD doesn't support SG,
+ * reassemble the split URB buffers into a single
+ * return command.
+ */
+ for (i = 0; i < priv->num_urbs; i++) {
+ iov[iovnum].iov_base =
+ priv->urbs[i]->transfer_buffer;
+ iov[iovnum].iov_len =
+ priv->urbs[i]->actual_length;
+ iovnum++;
+ }
+ txsize += actual_length;
+ } else if (usb_pipein(urb->pipe) &&
usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
urb->actual_length > 0) {
- iov[iovnum].iov_base = urb->transfer_buffer;
- iov[iovnum].iov_len = urb->actual_length;
- iovnum++;
+ if (urb->num_sgs) {
+ unsigned int copy = urb->actual_length;
+ int size;
+
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ if (copy == 0)
+ break;
+
+ if (copy < sg->length)
+ size = copy;
+ else
+ size = sg->length;
+
+ iov[iovnum].iov_base = sg_virt(sg);
+ iov[iovnum].iov_len = size;
+
+ iovnum++;
+ copy -= size;
+ }
+ } else {
+ iov[iovnum].iov_base = urb->transfer_buffer;
+ iov[iovnum].iov_len = urb->actual_length;
+ iovnum++;
+ }
txsize += urb->actual_length;
} else if (usb_pipein(urb->pipe) &&
usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 45da3e01c7b0..6532d68e8808 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -680,8 +680,12 @@ EXPORT_SYMBOL_GPL(usbip_pad_iso);
/* some members of urb must be substituted before. */
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
{
- int ret;
+ struct scatterlist *sg;
+ int ret = 0;
+ int recv;
int size;
+ int copy;
+ int i;
if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
/* the direction of urb must be OUT. */
@@ -701,29 +705,48 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
- if (size > urb->transfer_buffer_length) {
+ if (size > urb->transfer_buffer_length)
/* should not happen, probably malicious packet */
- if (ud->side == USBIP_STUB) {
- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
- return 0;
- } else {
- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
- return -EPIPE;
- }
- }
+ goto error;
- ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
- if (ret != size) {
- dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
- if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
- } else {
- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
- return -EPIPE;
+ if (urb->num_sgs) {
+ copy = size;
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ int recv_size;
+
+ if (copy < sg->length)
+ recv_size = copy;
+ else
+ recv_size = sg->length;
+
+ recv = usbip_recv(ud->tcp_socket, sg_virt(sg),
+ recv_size);
+
+ if (recv != recv_size)
+ goto error;
+
+ copy -= recv;
+ ret += recv;
}
+
+ if (ret != size)
+ goto error;
+ } else {
+ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+ if (ret != size)
+ goto error;
}
return ret;
+
+error:
+ dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+ else
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+
+ return -EPIPE;
}
EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 000ab7225717..585a84d319bd 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -697,7 +697,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
}
vdev = &vhci_hcd->vdev[portnum-1];
- if (!urb->transfer_buffer && urb->transfer_buffer_length) {
+ if (!urb->transfer_buffer && !urb->num_sgs &&
+ urb->transfer_buffer_length) {
dev_dbg(dev, "Null URB transfer buffer\n");
return -EINVAL;
}
@@ -1143,6 +1144,15 @@ static int vhci_setup(struct usb_hcd *hcd)
hcd->speed = HCD_USB3;
hcd->self.root_hub->speed = USB_SPEED_SUPER;
}
+
+ /*
+ * Support SG.
+ * sg_tablesize is an arbitrary value to alleviate memory pressure
+ * on the host.
+ */
+ hcd->self.sg_tablesize = 32;
+ hcd->self.no_sg_constraint = 1;
+
return 0;
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 44cd64518925..33f8972ba842 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -90,6 +90,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
+ if (urb->num_sgs)
+ urb->transfer_flags &= ~URB_DMA_MAP_SG;
+
usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock_irqsave(&vhci->lock, flags);
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 2fa26d0578d7..c3803785f6ef 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -5,6 +5,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/scatterlist.h>
#include "usbip_common.h"
#include "vhci.h"
@@ -50,19 +51,23 @@ static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
static int vhci_send_cmd_submit(struct vhci_device *vdev)
{
+ struct usbip_iso_packet_descriptor *iso_buffer = NULL;
struct vhci_priv *priv = NULL;
+ struct scatterlist *sg;
struct msghdr msg;
- struct kvec iov[3];
+ struct kvec *iov;
size_t txsize;
size_t total_size = 0;
+ int iovnum;
+ int err = -ENOMEM;
+ int i;
while ((priv = dequeue_from_priv_tx(vdev)) != NULL) {
int ret;
struct urb *urb = priv->urb;
struct usbip_header pdu_header;
- struct usbip_iso_packet_descriptor *iso_buffer = NULL;
txsize = 0;
memset(&pdu_header, 0, sizeof(pdu_header));
@@ -72,18 +77,45 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
priv->seqnum);
+ if (urb->num_sgs && usb_pipeout(urb->pipe))
+ iovnum = 2 + urb->num_sgs;
+ else
+ iovnum = 3;
+
+ iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
+ if (!iov) {
+ usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC);
+ return -ENOMEM;
+ }
+
+ if (urb->num_sgs)
+ urb->transfer_flags |= URB_DMA_MAP_SG;
+
/* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb);
usbip_header_correct_endian(&pdu_header, 1);
+ iovnum = 0;
- iov[0].iov_base = &pdu_header;
- iov[0].iov_len = sizeof(pdu_header);
+ iov[iovnum].iov_base = &pdu_header;
+ iov[iovnum].iov_len = sizeof(pdu_header);
txsize += sizeof(pdu_header);
+ iovnum++;
/* 2. setup transfer buffer */
if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) {
- iov[1].iov_base = urb->transfer_buffer;
- iov[1].iov_len = urb->transfer_buffer_length;
+ if (urb->num_sgs &&
+ !usb_endpoint_xfer_isoc(&urb->ep->desc)) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ iov[iovnum].iov_base = sg_virt(sg);
+ iov[iovnum].iov_len = sg->length;
+ iovnum++;
+ }
+ } else {
+ iov[iovnum].iov_base = urb->transfer_buffer;
+ iov[iovnum].iov_len =
+ urb->transfer_buffer_length;
+ iovnum++;
+ }
txsize += urb->transfer_buffer_length;
}
@@ -95,23 +127,26 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
if (!iso_buffer) {
usbip_event_add(&vdev->ud,
SDEV_EVENT_ERROR_MALLOC);
- return -1;
+ goto err_iso_buffer;
}
- iov[2].iov_base = iso_buffer;
- iov[2].iov_len = len;
+ iov[iovnum].iov_base = iso_buffer;
+ iov[iovnum].iov_len = len;
+ iovnum++;
txsize += len;
}
- ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, 3, txsize);
+ ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum,
+ txsize);
if (ret != txsize) {
pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
txsize);
- kfree(iso_buffer);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
- return -1;
+ err = -EPIPE;
+ goto err_tx;
}
+ kfree(iov);
kfree(iso_buffer);
usbip_dbg_vhci_tx("send txdata\n");
@@ -119,6 +154,13 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
}
return total_size;
+
+err_tx:
+ kfree(iso_buffer);
+err_iso_buffer:
+ kfree(iov);
+
+ return err;
}
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
diff --git a/drivers/usb/usbip/vudc.h b/drivers/usb/usbip/vudc.h
index cf968192e59f..1bd4bc005829 100644
--- a/drivers/usb/usbip/vudc.h
+++ b/drivers/usb/usbip/vudc.h
@@ -115,7 +115,7 @@ struct vudc_device {
struct list_head dev_entry;
};
-extern const struct attribute_group vudc_attr_group;
+extern const struct attribute_group *vudc_groups[];
/* visible everywhere */
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index a72c17ff1c6a..c8eeabdd9b56 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -616,18 +616,10 @@ int vudc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_add_udc;
- ret = sysfs_create_group(&pdev->dev.kobj, &vudc_attr_group);
- if (ret) {
- dev_err(&udc->pdev->dev, "create sysfs files\n");
- goto err_sysfs;
- }
-
platform_set_drvdata(pdev, udc);
return ret;
-err_sysfs:
- usb_del_gadget_udc(&udc->gadget);
err_add_udc:
cleanup_vudc_hw(udc);
err_init_vudc_hw:
@@ -640,7 +632,6 @@ int vudc_remove(struct platform_device *pdev)
{
struct vudc *udc = platform_get_drvdata(pdev);
- sysfs_remove_group(&pdev->dev.kobj, &vudc_attr_group);
usb_del_gadget_udc(&udc->gadget);
cleanup_vudc_hw(udc);
kfree(udc);
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 390733e6937e..678faa82598c 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -22,6 +22,7 @@ static struct platform_driver vudc_driver = {
.remove = vudc_remove,
.driver = {
.name = GADGET_NAME,
+ .dev_groups = vudc_groups,
},
};
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 6dcd3ff655c3..100f680c572a 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -215,7 +215,12 @@ static struct bin_attribute *dev_bin_attrs[] = {
NULL,
};
-const struct attribute_group vudc_attr_group = {
+static const struct attribute_group vudc_attr_group = {
.attrs = dev_attrs,
.bin_attrs = dev_bin_attrs,
};
+
+const struct attribute_group *vudc_groups[] = {
+ &vudc_attr_group,
+ NULL,
+};
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 054391f30fa8..ad830abe1021 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -650,12 +650,13 @@ unpin_exit:
}
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
- struct list_head *regions)
+ struct list_head *regions,
+ struct iommu_iotlb_gather *iotlb_gather)
{
long unlocked = 0;
struct vfio_regions *entry, *next;
- iommu_tlb_sync(domain->domain);
+ iommu_tlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,
@@ -685,18 +686,19 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
struct vfio_dma *dma, dma_addr_t *iova,
size_t len, phys_addr_t phys, long *unlocked,
struct list_head *unmapped_list,
- int *unmapped_cnt)
+ int *unmapped_cnt,
+ struct iommu_iotlb_gather *iotlb_gather)
{
size_t unmapped = 0;
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
- unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+ unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+ iotlb_gather);
if (!unmapped) {
kfree(entry);
} else {
- iommu_tlb_range_add(domain->domain, *iova, unmapped);
entry->iova = *iova;
entry->phys = phys;
entry->len = unmapped;
@@ -712,8 +714,8 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
* or in case of errors.
*/
if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
- *unlocked += vfio_sync_unpin(dma, domain,
- unmapped_list);
+ *unlocked += vfio_sync_unpin(dma, domain, unmapped_list,
+ iotlb_gather);
*unmapped_cnt = 0;
}
@@ -744,6 +746,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
+ struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
@@ -768,6 +771,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
cond_resched();
}
+ iommu_iotlb_gather_init(&iotlb_gather);
while (iova < end) {
size_t unmapped, len;
phys_addr_t phys, next;
@@ -796,7 +800,8 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
*/
unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
&unlocked, &unmapped_region_list,
- &unmapped_region_cnt);
+ &unmapped_region_cnt,
+ &iotlb_gather);
if (!unmapped) {
unmapped = unmap_unpin_slow(domain, dma, &iova, len,
phys, &unlocked);
@@ -807,8 +812,10 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->iommu_mapped = false;
- if (unmapped_region_cnt)
- unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
+ if (unmapped_region_cnt) {
+ unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
+ &iotlb_gather);
+ }
if (do_accounting) {
vfio_lock_acct(dma, -unlocked, true);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 9e90e969af55..7804869c6a31 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -22,6 +22,12 @@
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_TEST_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
enum {
VHOST_TEST_VQ = 0,
VHOST_TEST_VQ_MAX = 1,
@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
}
vhost_add_used_and_signal(&n->dev, vq, head, 0);
total_len += len;
- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
+ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
break;
- }
}
mutex_unlock(&vq->mutex);
@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 0536f8526359..36ca2cf419bf 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
__poll_t mask;
- int ret = 0;
if (poll->wqh)
return 0;
@@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) {
vhost_poll_stop(poll);
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
@@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
__vhost_vq_meta_reset(d->vqs[i]);
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_map_unprefetch(struct vhost_map *map)
-{
- kfree(map->pages);
- map->pages = NULL;
- map->npages = 0;
- map->addr = NULL;
-}
-
-static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
-{
- struct vhost_map *map[VHOST_NUM_ADDRS];
- int i;
-
- spin_lock(&vq->mmu_lock);
- for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- map[i] = rcu_dereference_protected(vq->maps[i],
- lockdep_is_held(&vq->mmu_lock));
- if (map[i])
- rcu_assign_pointer(vq->maps[i], NULL);
- }
- spin_unlock(&vq->mmu_lock);
-
- synchronize_rcu();
-
- for (i = 0; i < VHOST_NUM_ADDRS; i++)
- if (map[i])
- vhost_map_unprefetch(map[i]);
-
-}
-
-static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
-{
- int i;
-
- vhost_uninit_vq_maps(vq);
- for (i = 0; i < VHOST_NUM_ADDRS; i++)
- vq->uaddrs[i].size = 0;
-}
-
-static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
- unsigned long start,
- unsigned long end)
-{
- if (unlikely(!uaddr->size))
- return false;
-
- return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
-}
-
-static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
-{
- struct vhost_uaddr *uaddr = &vq->uaddrs[index];
- struct vhost_map *map;
- int i;
-
- if (!vhost_map_range_overlap(uaddr, start, end))
- return;
-
- spin_lock(&vq->mmu_lock);
- ++vq->invalidate_count;
-
- map = rcu_dereference_protected(vq->maps[index],
- lockdep_is_held(&vq->mmu_lock));
- if (map) {
- if (uaddr->write) {
- for (i = 0; i < map->npages; i++)
- set_page_dirty(map->pages[i]);
- }
- rcu_assign_pointer(vq->maps[index], NULL);
- }
- spin_unlock(&vq->mmu_lock);
-
- if (map) {
- synchronize_rcu();
- vhost_map_unprefetch(map);
- }
-}
-
-static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
-{
- if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
- return;
-
- spin_lock(&vq->mmu_lock);
- --vq->invalidate_count;
- spin_unlock(&vq->mmu_lock);
-}
-
-static int vhost_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct vhost_dev *dev = container_of(mn, struct vhost_dev,
- mmu_notifier);
- int i, j;
-
- if (!mmu_notifier_range_blockable(range))
- return -EAGAIN;
-
- for (i = 0; i < dev->nvqs; i++) {
- struct vhost_virtqueue *vq = dev->vqs[i];
-
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- vhost_invalidate_vq_start(vq, j,
- range->start,
- range->end);
- }
-
- return 0;
-}
-
-static void vhost_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct vhost_dev *dev = container_of(mn, struct vhost_dev,
- mmu_notifier);
- int i, j;
-
- for (i = 0; i < dev->nvqs; i++) {
- struct vhost_virtqueue *vq = dev->vqs[i];
-
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- vhost_invalidate_vq_end(vq, j,
- range->start,
- range->end);
- }
-}
-
-static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
- .invalidate_range_start = vhost_invalidate_range_start,
- .invalidate_range_end = vhost_invalidate_range_end,
-};
-
-static void vhost_init_maps(struct vhost_dev *dev)
-{
- struct vhost_virtqueue *vq;
- int i, j;
-
- dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
-
- for (i = 0; i < dev->nvqs; ++i) {
- vq = dev->vqs[i];
- for (j = 0; j < VHOST_NUM_ADDRS; j++)
- RCU_INIT_POINTER(vq->maps[j], NULL);
- }
-}
-#endif
-
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->busyloop_timeout = 0;
vq->umem = NULL;
vq->iotlb = NULL;
- vq->invalidate_count = 0;
__vhost_vq_meta_reset(vq);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_reset_vq_maps(vq);
-#endif
}
static int vhost_worker(void *data)
@@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
spin_lock_init(&dev->iotlb_lock);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_init_maps(dev);
-#endif
+
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
@@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
vq->heads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
- spin_lock_init(&vq->mmu_lock);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
@@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
if (err)
goto err_cgroup;
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
- if (err)
- goto err_mmu_notifier;
-#endif
-
return 0;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-err_mmu_notifier:
- vhost_dev_free_iovecs(dev);
-#endif
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
@@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
- int index, unsigned long uaddr,
- size_t size, bool write)
-{
- struct vhost_uaddr *addr = &vq->uaddrs[index];
-
- addr->uaddr = uaddr;
- addr->size = size;
- addr->write = write;
-}
-
-static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
-{
- vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
- (unsigned long)vq->desc,
- vhost_get_desc_size(vq, vq->num),
- false);
- vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
- (unsigned long)vq->avail,
- vhost_get_avail_size(vq, vq->num),
- false);
- vhost_setup_uaddr(vq, VHOST_ADDR_USED,
- (unsigned long)vq->used,
- vhost_get_used_size(vq, vq->num),
- true);
-}
-
-static int vhost_map_prefetch(struct vhost_virtqueue *vq,
- int index)
-{
- struct vhost_map *map;
- struct vhost_uaddr *uaddr = &vq->uaddrs[index];
- struct page **pages;
- int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
- int npinned;
- void *vaddr, *v;
- int err;
- int i;
-
- spin_lock(&vq->mmu_lock);
-
- err = -EFAULT;
- if (vq->invalidate_count)
- goto err;
-
- err = -ENOMEM;
- map = kmalloc(sizeof(*map), GFP_ATOMIC);
- if (!map)
- goto err;
-
- pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
- if (!pages)
- goto err_pages;
-
- err = EFAULT;
- npinned = __get_user_pages_fast(uaddr->uaddr, npages,
- uaddr->write, pages);
- if (npinned > 0)
- release_pages(pages, npinned);
- if (npinned != npages)
- goto err_gup;
-
- for (i = 0; i < npinned; i++)
- if (PageHighMem(pages[i]))
- goto err_gup;
-
- vaddr = v = page_address(pages[0]);
-
- /* For simplicity, fallback to userspace address if VA is not
- * contigious.
- */
- for (i = 1; i < npinned; i++) {
- v += PAGE_SIZE;
- if (v != page_address(pages[i]))
- goto err_gup;
- }
-
- map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
- map->npages = npages;
- map->pages = pages;
-
- rcu_assign_pointer(vq->maps[index], map);
- /* No need for a synchronize_rcu(). This function should be
- * called by dev->worker so we are serialized with all
- * readers.
- */
- spin_unlock(&vq->mmu_lock);
-
- return 0;
-
-err_gup:
- kfree(pages);
-err_pages:
- kfree(map);
-err:
- spin_unlock(&vq->mmu_lock);
- return err;
-}
-#endif
-
void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
@@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
kthread_stop(dev->worker);
dev->worker = NULL;
}
- if (dev->mm) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
-#endif
+ if (dev->mm)
mmput(dev->mm);
- }
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- for (i = 0; i < dev->nvqs; i++)
- vhost_uninit_vq_maps(dev->vqs[i]);
-#endif
dev->mm = NULL;
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- *((__virtio16 *)&used->ring[vq->num]) =
- cpu_to_vhost16(vq, vq->avail_idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
vhost_avail_event(vq));
}
@@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
struct vring_used_elem *head, int idx,
int count)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
- size_t size;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- size = count * sizeof(*head);
- memcpy(used->ring + idx, head, size);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_copy_to_user(vq, vq->used->ring + idx, head,
count * sizeof(*head));
}
@@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- used->flags = cpu_to_vhost16(vq, vq->used_flags);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
&vq->used->flags);
}
@@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
&vq->used->idx);
}
@@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *idx = avail->idx;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *idx, &vq->avail->idx);
}
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
__virtio16 *head, int idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *head = avail->ring[idx & (vq->num - 1)];
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *head,
&vq->avail->ring[idx & (vq->num - 1)]);
}
@@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
__virtio16 *flags)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *flags = avail->flags;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *flags, &vq->avail->flags);
}
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
__virtio16 *event)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_avail *avail;
-
- if (!vq->iotlb) {
- rcu_read_lock();
- map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
- if (likely(map)) {
- avail = map->addr;
- *event = (__virtio16)avail->ring[vq->num];
- rcu_read_unlock();
- return 0;
- }
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_avail(vq, *event, vhost_used_event(vq));
}
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_used *used;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
- if (likely(map)) {
- used = map->addr;
- *idx = used->idx;
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_get_used(vq, *idx, &vq->used->idx);
}
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
struct vring_desc *desc, int idx)
{
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- struct vhost_map *map;
- struct vring_desc *d;
-
- if (!vq->iotlb) {
- rcu_read_lock();
-
- map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
- if (likely(map)) {
- d = map->addr;
- *desc = *(d + idx);
- rcu_read_unlock();
- return 0;
- }
-
- rcu_read_unlock();
- }
-#endif
-
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}
@@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
return true;
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
-static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
-{
- struct vhost_map __rcu *map;
- int i;
-
- for (i = 0; i < VHOST_NUM_ADDRS; i++) {
- rcu_read_lock();
- map = rcu_dereference(vq->maps[i]);
- rcu_read_unlock();
- if (unlikely(!map))
- vhost_map_prefetch(vq, i);
- }
-}
-#endif
-
int vq_meta_prefetch(struct vhost_virtqueue *vq)
{
unsigned int num = vq->num;
- if (!vq->iotlb) {
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_vq_map_prefetch(vq);
-#endif
+ if (!vq->iotlb)
return 1;
- }
return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
@@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
mutex_lock(&vq->mutex);
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- /* Unregister MMU notifer to allow invalidation callback
- * can access vq->uaddrs[] without holding a lock.
- */
- if (d->mm)
- mmu_notifier_unregister(&d->mmu_notifier, d->mm);
-
- vhost_uninit_vq_maps(vq);
-#endif
-
switch (ioctl) {
case VHOST_SET_VRING_NUM:
r = vhost_vring_set_num(d, vq, argp);
@@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
BUG();
}
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- vhost_setup_vq_uaddr(vq);
-
- if (d->mm)
- mmu_notifier_register(&d->mmu_notifier, d->mm);
-#endif
-
mutex_unlock(&vq->mutex);
return r;
@@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
/* If this is an input descriptor, increment that count. */
if (access == VHOST_ACCESS_WO) {
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
@@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 819296332913..e9ed2722b633 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,9 +12,6 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-#include <linux/pagemap.h>
-#include <linux/mmu_notifier.h>
-#include <asm/cacheflush.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -83,24 +80,6 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3,
};
-struct vhost_map {
- int npages;
- void *addr;
- struct page **pages;
-};
-
-struct vhost_uaddr {
- unsigned long uaddr;
- size_t size;
- bool write;
-};
-
-#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
-#else
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
-#endif
-
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -111,22 +90,7 @@ struct vhost_virtqueue {
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
-
-#if VHOST_ARCH_CAN_ACCEL_UACCESS
- /* Read by memory accessors, modified by meta data
- * prefetching, MMU notifier and vring ioctl().
- * Synchonrized through mmu_lock (writers) and RCU (writers
- * and readers).
- */
- struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
- /* Read by MMU notifier, modified by vring ioctl(),
- * synchronized through MMU notifier
- * registering/unregistering.
- */
- struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
-#endif
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
-
struct file *kick;
struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx;
@@ -181,8 +145,6 @@ struct vhost_virtqueue {
bool user_be;
#endif
u32 busyloop_timeout;
- spinlock_t mmu_lock;
- int invalidate_count;
};
struct vhost_msg_node {
@@ -196,9 +158,6 @@ struct vhost_msg_node {
struct vhost_dev {
struct mm_struct *mm;
-#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier mmu_notifier;
-#endif
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 6b2de93bd302..5f83cd715387 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1924,20 +1924,6 @@ config FB_S3C2410_DEBUG
Turn on debugging messages. Note that you can set/unset at run time
through sysfs
-config FB_NUC900
- tristate "NUC900 LCD framebuffer support"
- depends on FB && ARCH_W90X900
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- ---help---
- Frame buffer driver for the built-in LCD controller in the Nuvoton
- NUC900 processor
-
-config GPM1040A0_320X240
- bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
- depends on FB_NUC900
-
config FB_SM501
tristate "Silicon Motion SM501 framebuffer support"
depends on FB && MFD_SM501
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 7dc4861a93e6..aab7155884ea 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -116,7 +116,6 @@ obj-y += omap2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
-obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
case 'M':
case 'm':
size *= 1024;
+ /* Fall through */
case 'K':
case 'k':
size *= 1024;
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index fc9dfb0a95af..51f5d1c56fd9 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -763,17 +763,17 @@ static void tt_get_par(struct atafb_par *par)
{
unsigned long addr;
par->hw.tt.mode = shifter_tt.tt_shiftmode;
- par->hw.tt.sync = shifter.syncmode;
- addr = ((shifter.bas_hi & 0xff) << 16) |
- ((shifter.bas_md & 0xff) << 8) |
- ((shifter.bas_lo & 0xff));
+ par->hw.tt.sync = shifter_st.syncmode;
+ addr = ((shifter_st.bas_hi & 0xff) << 16) |
+ ((shifter_st.bas_md & 0xff) << 8) |
+ ((shifter_st.bas_lo & 0xff));
par->screen_base = atari_stram_to_virt(addr);
}
static void tt_set_par(struct atafb_par *par)
{
shifter_tt.tt_shiftmode = par->hw.tt.mode;
- shifter.syncmode = par->hw.tt.sync;
+ shifter_st.syncmode = par->hw.tt.sync;
/* only set screen_base if really necessary */
if (current_par.screen_base != par->screen_base)
fbhw->set_screen_base(par->screen_base);
@@ -1543,7 +1543,7 @@ static void falcon_get_par(struct atafb_par *par)
hw->f_shift = videl.f_shift;
hw->vid_control = videl.control;
hw->vid_mode = videl.mode;
- hw->sync = shifter.syncmode & 0x1;
+ hw->sync = shifter_st.syncmode & 0x1;
hw->xoffset = videl.xoffset & 0xf;
hw->hht = videl.hht;
hw->hbb = videl.hbb;
@@ -1558,9 +1558,9 @@ static void falcon_get_par(struct atafb_par *par)
hw->vde = videl.vde;
hw->vss = videl.vss;
- addr = (shifter.bas_hi & 0xff) << 16 |
- (shifter.bas_md & 0xff) << 8 |
- (shifter.bas_lo & 0xff);
+ addr = (shifter_st.bas_hi & 0xff) << 16 |
+ (shifter_st.bas_md & 0xff) << 8 |
+ (shifter_st.bas_lo & 0xff);
par->screen_base = atari_stram_to_virt(addr);
/* derived parameters */
@@ -1605,7 +1605,7 @@ static irqreturn_t falcon_vbl_switcher(int irq, void *dummy)
/* Turn off external clocks. Read sets all output bits to 1. */
*(volatile unsigned short *)0xffff9202;
}
- shifter.syncmode = hw->sync;
+ shifter_st.syncmode = hw->sync;
videl.hht = hw->hht;
videl.hbb = hw->hbb;
@@ -1952,18 +1952,18 @@ static void stste_get_par(struct atafb_par *par)
{
unsigned long addr;
par->hw.st.mode = shifter_tt.st_shiftmode;
- par->hw.st.sync = shifter.syncmode;
- addr = ((shifter.bas_hi & 0xff) << 16) |
- ((shifter.bas_md & 0xff) << 8);
+ par->hw.st.sync = shifter_st.syncmode;
+ addr = ((shifter_st.bas_hi & 0xff) << 16) |
+ ((shifter_st.bas_md & 0xff) << 8);
if (ATARIHW_PRESENT(EXTD_SHIFTER))
- addr |= (shifter.bas_lo & 0xff);
+ addr |= (shifter_st.bas_lo & 0xff);
par->screen_base = atari_stram_to_virt(addr);
}
static void stste_set_par(struct atafb_par *par)
{
shifter_tt.st_shiftmode = par->hw.st.mode;
- shifter.syncmode = par->hw.st.sync;
+ shifter_st.syncmode = par->hw.st.sync;
/* only set screen_base if really necessary */
if (current_par.screen_base != par->screen_base)
fbhw->set_screen_base(par->screen_base);
@@ -2018,10 +2018,10 @@ static void stste_set_screen_base(void *s_base)
unsigned long addr;
addr = atari_stram_to_phys(s_base);
/* Setup Screen Memory */
- shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
- shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
+ shifter_st.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
+ shifter_st.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
if (ATARIHW_PRESENT(EXTD_SHIFTER))
- shifter.bas_lo = (unsigned char)(addr & 0x0000ff);
+ shifter_st.bas_lo = (unsigned char)(addr & 0x0000ff);
}
#endif /* ATAFB_STE */
@@ -2265,9 +2265,9 @@ static void set_screen_base(void *s_base)
addr = atari_stram_to_phys(s_base);
/* Setup Screen Memory */
- shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
- shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
- shifter.bas_lo = (unsigned char)(addr & 0x0000ff);
+ shifter_st.bas_hi = (unsigned char)((addr & 0xff0000) >> 16);
+ shifter_st.bas_md = (unsigned char)((addr & 0x00ff00) >> 8);
+ shifter_st.bas_lo = (unsigned char)(addr & 0x0000ff);
}
static int pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index b1cf248f3291..2d3dcc52fcf3 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/console.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -164,7 +165,7 @@ struct da8xx_fb_par {
struct notifier_block freq_transition;
#endif
unsigned int lcdc_clk_rate;
- void (*panel_power_ctrl)(int);
+ struct regulator *lcd_supply;
u32 pseudo_palette[16];
struct fb_videomode mode;
struct lcd_ctrl_config cfg;
@@ -1066,33 +1067,30 @@ static void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
static int fb_remove(struct platform_device *dev)
{
struct fb_info *info = dev_get_drvdata(&dev->dev);
-
- if (info) {
- struct da8xx_fb_par *par = info->par;
+ struct da8xx_fb_par *par = info->par;
+ int ret;
#ifdef CONFIG_CPU_FREQ
- lcd_da8xx_cpufreq_deregister(par);
+ lcd_da8xx_cpufreq_deregister(par);
#endif
- if (par->panel_power_ctrl)
- par->panel_power_ctrl(0);
+ if (par->lcd_supply) {
+ ret = regulator_disable(par->lcd_supply);
+ if (ret)
+ return ret;
+ }
- lcd_disable_raster(DA8XX_FRAME_WAIT);
- lcdc_write(0, LCD_RASTER_CTRL_REG);
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
+ lcdc_write(0, LCD_RASTER_CTRL_REG);
- /* disable DMA */
- lcdc_write(0, LCD_DMA_CTRL_REG);
+ /* disable DMA */
+ lcdc_write(0, LCD_DMA_CTRL_REG);
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info->cmap);
- dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
- par->p_palette_base);
- dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
- par->vram_phys);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);
- framebuffer_release(info);
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ pm_runtime_put_sync(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ framebuffer_release(info);
- }
return 0;
}
@@ -1179,15 +1177,21 @@ static int cfb_blank(int blank, struct fb_info *info)
case FB_BLANK_UNBLANK:
lcd_enable_raster();
- if (par->panel_power_ctrl)
- par->panel_power_ctrl(1);
+ if (par->lcd_supply) {
+ ret = regulator_enable(par->lcd_supply);
+ if (ret)
+ return ret;
+ }
break;
case FB_BLANK_NORMAL:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
- if (par->panel_power_ctrl)
- par->panel_power_ctrl(0);
+ if (par->lcd_supply) {
+ ret = regulator_disable(par->lcd_supply);
+ if (ret)
+ return ret;
+ }
lcd_disable_raster(DA8XX_FRAME_WAIT);
break;
@@ -1328,7 +1332,6 @@ static int fb_probe(struct platform_device *device)
{
struct da8xx_lcdc_platform_data *fb_pdata =
dev_get_platdata(&device->dev);
- struct resource *lcdc_regs;
struct lcd_ctrl_config *lcd_cfg;
struct fb_videomode *lcdc_info;
struct fb_info *da8xx_fb_info;
@@ -1346,8 +1349,7 @@ static int fb_probe(struct platform_device *device)
if (lcdc_info == NULL)
return -ENODEV;
- lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0);
- da8xx_fb_reg_base = devm_ioremap_resource(&device->dev, lcdc_regs);
+ da8xx_fb_reg_base = devm_platform_ioremap_resource(device, 0);
if (IS_ERR(da8xx_fb_reg_base))
return PTR_ERR(da8xx_fb_reg_base);
@@ -1395,9 +1397,19 @@ static int fb_probe(struct platform_device *device)
par->dev = &device->dev;
par->lcdc_clk = tmp_lcdc_clk;
par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
- if (fb_pdata->panel_power_ctrl) {
- par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
- par->panel_power_ctrl(1);
+
+ par->lcd_supply = devm_regulator_get_optional(&device->dev, "lcd");
+ if (IS_ERR(par->lcd_supply)) {
+ if (PTR_ERR(par->lcd_supply) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_pm_runtime_disable;
+ }
+
+ par->lcd_supply = NULL;
+ } else {
+ ret = regulator_enable(par->lcd_supply);
+ if (ret)
+ goto err_pm_runtime_disable;
}
fb_videomode_to_var(&da8xx_fb_var, lcdc_info);
@@ -1411,10 +1423,10 @@ static int fb_probe(struct platform_device *device)
par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
- par->vram_virt = dma_alloc_coherent(par->dev,
- par->vram_size,
- &par->vram_phys,
- GFP_KERNEL | GFP_DMA);
+ par->vram_virt = dmam_alloc_coherent(par->dev,
+ par->vram_size,
+ &par->vram_phys,
+ GFP_KERNEL | GFP_DMA);
if (!par->vram_virt) {
dev_err(&device->dev,
"GLCD: kmalloc for frame buffer failed\n");
@@ -1432,20 +1444,20 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_alloc_coherent(par->dev, PALETTE_SIZE,
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
+ par->v_palette_base = dmam_alloc_coherent(par->dev, PALETTE_SIZE,
+ &par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
"GLCD: kmalloc for palette buffer failed\n");
ret = -EINVAL;
- goto err_release_fb_mem;
+ goto err_release_fb;
}
par->irq = platform_get_irq(device, 0);
if (par->irq < 0) {
ret = -ENOENT;
- goto err_release_pl_mem;
+ goto err_release_fb;
}
da8xx_fb_var.grayscale =
@@ -1463,7 +1475,7 @@ static int fb_probe(struct platform_device *device)
ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
if (ret)
- goto err_release_pl_mem;
+ goto err_release_fb;
da8xx_fb_info->cmap.len = par->palette_sz;
/* initialize var_screeninfo */
@@ -1517,14 +1529,6 @@ err_cpu_freq:
err_dealloc_cmap:
fb_dealloc_cmap(&da8xx_fb_info->cmap);
-err_release_pl_mem:
- dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
- par->p_palette_base);
-
-err_release_fb_mem:
- dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
- par->vram_phys);
-
err_release_fb:
framebuffer_release(da8xx_fb_info);
@@ -1603,10 +1607,14 @@ static int fb_suspend(struct device *dev)
{
struct fb_info *info = dev_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
+ int ret;
console_lock();
- if (par->panel_power_ctrl)
- par->panel_power_ctrl(0);
+ if (par->lcd_supply) {
+ ret = regulator_disable(par->lcd_supply);
+ if (ret)
+ return ret;
+ }
fb_set_suspend(info, 1);
lcd_disable_raster(DA8XX_FRAME_WAIT);
@@ -1620,6 +1628,7 @@ static int fb_resume(struct device *dev)
{
struct fb_info *info = dev_get_drvdata(dev);
struct da8xx_fb_par *par = info->par;
+ int ret;
console_lock();
pm_runtime_get_sync(dev);
@@ -1627,8 +1636,11 @@ static int fb_resume(struct device *dev)
if (par->blank == FB_BLANK_UNBLANK) {
lcd_enable_raster();
- if (par->panel_power_ctrl)
- par->panel_power_ctrl(1);
+ if (par->lcd_supply) {
+ ret = regulator_enable(par->lcd_supply);
+ if (ret)
+ return ret;
+ }
}
fb_set_suspend(info, 0);
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
deleted file mode 100644
index 4fd851598584..000000000000
--- a/drivers/video/fbdev/nuc900fb.c
+++ /dev/null
@@ -1,760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *
- * Copyright (c) 2009 Nuvoton technology corporation
- * All rights reserved.
- *
- * Description:
- * Nuvoton LCD Controller Driver
- * Author:
- * Wang Qiang (rurality.linux@gmail.com) 2009/12/11
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-#include <linux/pm.h>
-#include <linux/device.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-#include <mach/regs-ldm.h>
-#include <linux/platform_data/video-nuc900fb.h>
-
-#include "nuc900fb.h"
-
-
-/*
- * Initialize the nuc900 video (dual) buffer address
- */
-static void nuc900fb_set_lcdaddr(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
- unsigned long vbaddr1, vbaddr2;
-
- vbaddr1 = info->fix.smem_start;
- vbaddr2 = info->fix.smem_start;
- vbaddr2 += info->fix.line_length * info->var.yres;
-
- /* set frambuffer start phy addr*/
- writel(vbaddr1, regs + REG_LCM_VA_BADDR0);
- writel(vbaddr2, regs + REG_LCM_VA_BADDR1);
-
- writel(fbi->regs.lcd_va_fbctrl, regs + REG_LCM_VA_FBCTRL);
- writel(fbi->regs.lcd_va_scale, regs + REG_LCM_VA_SCALE);
-}
-
-/*
- * calculate divider for lcd div
- */
-static unsigned int nuc900fb_calc_pixclk(struct nuc900fb_info *fbi,
- unsigned long pixclk)
-{
- unsigned long clk = fbi->clk_rate;
- unsigned long long div;
-
- /* pixclk is in picseconds. our clock is in Hz*/
- /* div = (clk * pixclk)/10^12 */
- div = (unsigned long long)clk * pixclk;
- div >>= 12;
- do_div(div, 625 * 625UL * 625);
-
- dev_dbg(fbi->dev, "pixclk %ld, divisor is %lld\n", pixclk, div);
-
- return div;
-}
-
-/*
- * Check the video params of 'var'.
- */
-static int nuc900fb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
- struct nuc900fb_display *display = NULL;
- struct nuc900fb_display *default_display = mach_info->displays +
- mach_info->default_display;
- int i;
-
- dev_dbg(fbi->dev, "check_var(var=%p, info=%p)\n", var, info);
-
- /* validate x/y resolution */
- /* choose default mode if possible */
- if (var->xres == default_display->xres &&
- var->yres == default_display->yres &&
- var->bits_per_pixel == default_display->bpp)
- display = default_display;
- else
- for (i = 0; i < mach_info->num_displays; i++)
- if (var->xres == mach_info->displays[i].xres &&
- var->yres == mach_info->displays[i].yres &&
- var->bits_per_pixel == mach_info->displays[i].bpp) {
- display = mach_info->displays + i;
- break;
- }
-
- if (display == NULL) {
- printk(KERN_ERR "wrong resolution or depth %dx%d at %d bit per pixel\n",
- var->xres, var->yres, var->bits_per_pixel);
- return -EINVAL;
- }
-
- /* it should be the same size as the display */
- var->xres_virtual = display->xres;
- var->yres_virtual = display->yres;
- var->height = display->height;
- var->width = display->width;
-
- /* copy lcd settings */
- var->pixclock = display->pixclock;
- var->left_margin = display->left_margin;
- var->right_margin = display->right_margin;
- var->upper_margin = display->upper_margin;
- var->lower_margin = display->lower_margin;
- var->vsync_len = display->vsync_len;
- var->hsync_len = display->hsync_len;
-
- var->transp.offset = 0;
- var->transp.length = 0;
-
- fbi->regs.lcd_dccs = display->dccs;
- fbi->regs.lcd_device_ctrl = display->devctl;
- fbi->regs.lcd_va_fbctrl = display->fbctrl;
- fbi->regs.lcd_va_scale = display->scale;
-
- /* set R/G/B possions */
- switch (var->bits_per_pixel) {
- case 1:
- case 2:
- case 4:
- case 8:
- default:
- var->red.offset = 0;
- var->red.length = var->bits_per_pixel;
- var->green = var->red;
- var->blue = var->red;
- break;
- case 12:
- var->red.length = 4;
- var->green.length = 4;
- var->blue.length = 4;
- var->red.offset = 8;
- var->green.offset = 4;
- var->blue.offset = 0;
- break;
- case 16:
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- break;
- case 18:
- var->red.length = 6;
- var->green.length = 6;
- var->blue.length = 6;
- var->red.offset = 12;
- var->green.offset = 6;
- var->blue.offset = 0;
- break;
- case 32:
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- break;
- }
-
- return 0;
-}
-
-/*
- * Calculate lcd register values from var setting & save into hw
- */
-static void nuc900fb_calculate_lcd_regs(const struct fb_info *info,
- struct nuc900fb_hw *regs)
-{
- const struct fb_var_screeninfo *var = &info->var;
- int vtt = var->height + var->upper_margin + var->lower_margin;
- int htt = var->width + var->left_margin + var->right_margin;
- int hsync = var->width + var->right_margin;
- int vsync = var->height + var->lower_margin;
-
- regs->lcd_crtc_size = LCM_CRTC_SIZE_VTTVAL(vtt) |
- LCM_CRTC_SIZE_HTTVAL(htt);
- regs->lcd_crtc_dend = LCM_CRTC_DEND_VDENDVAL(var->height) |
- LCM_CRTC_DEND_HDENDVAL(var->width);
- regs->lcd_crtc_hr = LCM_CRTC_HR_EVAL(var->width + 5) |
- LCM_CRTC_HR_SVAL(var->width + 1);
- regs->lcd_crtc_hsync = LCM_CRTC_HSYNC_EVAL(hsync + var->hsync_len) |
- LCM_CRTC_HSYNC_SVAL(hsync);
- regs->lcd_crtc_vr = LCM_CRTC_VR_EVAL(vsync + var->vsync_len) |
- LCM_CRTC_VR_SVAL(vsync);
-
-}
-
-/*
- * Activate (set) the controller from the given framebuffer
- * information
- */
-static void nuc900fb_activate_var(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
- struct fb_var_screeninfo *var = &info->var;
- int clkdiv;
-
- clkdiv = nuc900fb_calc_pixclk(fbi, var->pixclock) - 1;
- if (clkdiv < 0)
- clkdiv = 0;
-
- nuc900fb_calculate_lcd_regs(info, &fbi->regs);
-
- /* set the new lcd registers*/
-
- dev_dbg(fbi->dev, "new lcd register set:\n");
- dev_dbg(fbi->dev, "dccs = 0x%08x\n", fbi->regs.lcd_dccs);
- dev_dbg(fbi->dev, "dev_ctl = 0x%08x\n", fbi->regs.lcd_device_ctrl);
- dev_dbg(fbi->dev, "crtc_size = 0x%08x\n", fbi->regs.lcd_crtc_size);
- dev_dbg(fbi->dev, "crtc_dend = 0x%08x\n", fbi->regs.lcd_crtc_dend);
- dev_dbg(fbi->dev, "crtc_hr = 0x%08x\n", fbi->regs.lcd_crtc_hr);
- dev_dbg(fbi->dev, "crtc_hsync = 0x%08x\n", fbi->regs.lcd_crtc_hsync);
- dev_dbg(fbi->dev, "crtc_vr = 0x%08x\n", fbi->regs.lcd_crtc_vr);
-
- writel(fbi->regs.lcd_device_ctrl, regs + REG_LCM_DEV_CTRL);
- writel(fbi->regs.lcd_crtc_size, regs + REG_LCM_CRTC_SIZE);
- writel(fbi->regs.lcd_crtc_dend, regs + REG_LCM_CRTC_DEND);
- writel(fbi->regs.lcd_crtc_hr, regs + REG_LCM_CRTC_HR);
- writel(fbi->regs.lcd_crtc_hsync, regs + REG_LCM_CRTC_HSYNC);
- writel(fbi->regs.lcd_crtc_vr, regs + REG_LCM_CRTC_VR);
-
- /* set lcd address pointers */
- nuc900fb_set_lcdaddr(info);
-
- writel(fbi->regs.lcd_dccs, regs + REG_LCM_DCCS);
-}
-
-/*
- * Alters the hardware state.
- *
- */
-static int nuc900fb_set_par(struct fb_info *info)
-{
- struct fb_var_screeninfo *var = &info->var;
-
- switch (var->bits_per_pixel) {
- case 32:
- case 24:
- case 18:
- case 16:
- case 12:
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- case 1:
- info->fix.visual = FB_VISUAL_MONO01;
- break;
- default:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- }
-
- info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
-
- /* activate this new configuration */
- nuc900fb_activate_var(info);
- return 0;
-}
-
-static inline unsigned int chan_to_field(unsigned int chan,
- struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int nuc900fb_setcolreg(unsigned regno,
- unsigned red, unsigned green, unsigned blue,
- unsigned transp, struct fb_info *info)
-{
- unsigned int val;
-
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- /* true-colour, use pseuo-palette */
- if (regno < 16) {
- u32 *pal = info->pseudo_palette;
-
- val = chan_to_field(red, &info->var.red);
- val |= chan_to_field(green, &info->var.green);
- val |= chan_to_field(blue, &info->var.blue);
- pal[regno] = val;
- }
- break;
-
- default:
- return 1; /* unknown type */
- }
- return 0;
-}
-
-/**
- * nuc900fb_blank
- *
- */
-static int nuc900fb_blank(int blank_mode, struct fb_info *info)
-{
-
- return 0;
-}
-
-static struct fb_ops nuc900fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = nuc900fb_check_var,
- .fb_set_par = nuc900fb_set_par,
- .fb_blank = nuc900fb_blank,
- .fb_setcolreg = nuc900fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-
-static inline void modify_gpio(void __iomem *reg,
- unsigned long set, unsigned long mask)
-{
- unsigned long tmp;
- tmp = readl(reg) & ~mask;
- writel(tmp | set, reg);
-}
-
-/*
- * Initialise LCD-related registers
- */
-static int nuc900fb_init_registers(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
- void __iomem *regs = fbi->io;
-
- /*reset the display engine*/
- writel(0, regs + REG_LCM_DCCS);
- writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_ENG_RST,
- regs + REG_LCM_DCCS);
- ndelay(100);
- writel(readl(regs + REG_LCM_DCCS) & (~LCM_DCCS_ENG_RST),
- regs + REG_LCM_DCCS);
- ndelay(100);
-
- writel(0, regs + REG_LCM_DEV_CTRL);
-
- /* config gpio output */
- modify_gpio(W90X900_VA_GPIO + 0x54, mach_info->gpio_dir,
- mach_info->gpio_dir_mask);
- modify_gpio(W90X900_VA_GPIO + 0x58, mach_info->gpio_data,
- mach_info->gpio_data_mask);
-
- return 0;
-}
-
-
-/*
- * Alloc the SDRAM region of NUC900 for the frame buffer.
- * The buffer should be a non-cached, non-buffered, memory region
- * to allow palette and pixel writes without flushing the cache.
- */
-static int nuc900fb_map_video_memory(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- dma_addr_t map_dma;
- unsigned long map_size = PAGE_ALIGN(info->fix.smem_len);
-
- dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n",
- fbi, map_size);
-
- info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
- GFP_KERNEL);
-
- if (!info->screen_base)
- return -ENOMEM;
-
- memset(info->screen_base, 0x00, map_size);
- info->fix.smem_start = map_dma;
-
- return 0;
-}
-
-static inline void nuc900fb_unmap_video_memory(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
-}
-
-static irqreturn_t nuc900fb_irqhandler(int irq, void *dev_id)
-{
- struct nuc900fb_info *fbi = dev_id;
- void __iomem *regs = fbi->io;
- void __iomem *irq_base = fbi->irq_base;
- unsigned long lcdirq = readl(regs + REG_LCM_INT_CS);
-
- if (lcdirq & LCM_INT_CS_DISP_F_STATUS) {
- writel(readl(irq_base) | 1<<30, irq_base);
-
- /* wait VA_EN low */
- if ((readl(regs + REG_LCM_DCCS) &
- LCM_DCCS_SINGLE) == LCM_DCCS_SINGLE)
- while ((readl(regs + REG_LCM_DCCS) &
- LCM_DCCS_VA_EN) == LCM_DCCS_VA_EN)
- ;
- /* display_out-enable */
- writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_DISP_OUT_EN,
- regs + REG_LCM_DCCS);
- /* va-enable*/
- writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_VA_EN,
- regs + REG_LCM_DCCS);
- } else if (lcdirq & LCM_INT_CS_UNDERRUN_INT) {
- writel(readl(irq_base) | LCM_INT_CS_UNDERRUN_INT, irq_base);
- } else if (lcdirq & LCM_INT_CS_BUS_ERROR_INT) {
- writel(readl(irq_base) | LCM_INT_CS_BUS_ERROR_INT, irq_base);
- }
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_CPU_FREQ
-
-static int nuc900fb_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct nuc900fb_info *info;
- struct fb_info *fbinfo;
- long delta_f;
- info = container_of(nb, struct nuc900fb_info, freq_transition);
- fbinfo = dev_get_drvdata(info->dev);
-
- delta_f = info->clk_rate - clk_get_rate(info->clk);
-
- if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
- (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
- info->clk_rate = clk_get_rate(info->clk);
- nuc900fb_activate_var(fbinfo);
- }
-
- return 0;
-}
-
-static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi)
-{
- fbi->freq_transition.notifier_call = nuc900fb_cpufreq_transition;
- return cpufreq_register_notifier(&fbi->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *fbi)
-{
- cpufreq_unregister_notifier(&fbi->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-#else
-static inline int nuc900fb_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- return 0;
-}
-
-static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi)
-{
- return 0;
-}
-
-static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *info)
-{
-}
-#endif
-
-static char driver_name[] = "nuc900fb";
-
-static int nuc900fb_probe(struct platform_device *pdev)
-{
- struct nuc900fb_info *fbi;
- struct nuc900fb_display *display;
- struct fb_info *fbinfo;
- struct nuc900fb_mach_info *mach_info;
- struct resource *res;
- int ret;
- int irq;
- int i;
- int size;
-
- dev_dbg(&pdev->dev, "devinit\n");
- mach_info = dev_get_platdata(&pdev->dev);
- if (mach_info == NULL) {
- dev_err(&pdev->dev,
- "no platform data for lcd, cannot attach\n");
- return -EINVAL;
- }
-
- if (mach_info->default_display > mach_info->num_displays) {
- dev_err(&pdev->dev,
- "default display No. is %d but only %d displays \n",
- mach_info->default_display, mach_info->num_displays);
- return -EINVAL;
- }
-
-
- display = mach_info->displays + mach_info->default_display;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq for device\n");
- return -ENOENT;
- }
-
- fbinfo = framebuffer_alloc(sizeof(struct nuc900fb_info), &pdev->dev);
- if (!fbinfo)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, fbinfo);
-
- fbi = fbinfo->par;
- fbi->dev = &pdev->dev;
-
-#ifdef CONFIG_CPU_NUC950
- fbi->drv_type = LCDDRV_NUC950;
-#endif
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- size = resource_size(res);
- fbi->mem = request_mem_region(res->start, size, pdev->name);
- if (fbi->mem == NULL) {
- dev_err(&pdev->dev, "failed to alloc memory region\n");
- ret = -ENOENT;
- goto free_fb;
- }
-
- fbi->io = ioremap(res->start, size);
- if (fbi->io == NULL) {
- dev_err(&pdev->dev, "ioremap() of lcd registers failed\n");
- ret = -ENXIO;
- goto release_mem_region;
- }
-
- fbi->irq_base = fbi->io + REG_LCM_INT_CS;
-
-
- /* Stop the LCD */
- writel(0, fbi->io + REG_LCM_DCCS);
-
- /* fill the fbinfo*/
- strcpy(fbinfo->fix.id, driver_name);
- fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
- fbinfo->fix.type_aux = 0;
- fbinfo->fix.xpanstep = 0;
- fbinfo->fix.ypanstep = 0;
- fbinfo->fix.ywrapstep = 0;
- fbinfo->fix.accel = FB_ACCEL_NONE;
- fbinfo->var.nonstd = 0;
- fbinfo->var.activate = FB_ACTIVATE_NOW;
- fbinfo->var.accel_flags = 0;
- fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
- fbinfo->fbops = &nuc900fb_ops;
- fbinfo->flags = FBINFO_FLAG_DEFAULT;
- fbinfo->pseudo_palette = &fbi->pseudo_pal;
-
- ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi);
- if (ret) {
- dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n",
- irq, ret);
- ret = -EBUSY;
- goto release_regs;
- }
-
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n");
- ret = PTR_ERR(fbi->clk);
- goto release_irq;
- }
-
- clk_enable(fbi->clk);
- dev_dbg(&pdev->dev, "got and enabled clock\n");
-
- fbi->clk_rate = clk_get_rate(fbi->clk);
-
- /* calutate the video buffer size */
- for (i = 0; i < mach_info->num_displays; i++) {
- unsigned long smem_len = mach_info->displays[i].xres;
- smem_len *= mach_info->displays[i].yres;
- smem_len *= mach_info->displays[i].bpp;
- smem_len >>= 3;
- if (fbinfo->fix.smem_len < smem_len)
- fbinfo->fix.smem_len = smem_len;
- }
-
- /* Initialize Video Memory */
- ret = nuc900fb_map_video_memory(fbinfo);
- if (ret) {
- printk(KERN_ERR "Failed to allocate video RAM: %x\n", ret);
- goto release_clock;
- }
-
- dev_dbg(&pdev->dev, "got video memory\n");
-
- fbinfo->var.xres = display->xres;
- fbinfo->var.yres = display->yres;
- fbinfo->var.bits_per_pixel = display->bpp;
-
- nuc900fb_init_registers(fbinfo);
-
- nuc900fb_check_var(&fbinfo->var, fbinfo);
-
- ret = nuc900fb_cpufreq_register(fbi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register cpufreq\n");
- goto free_video_memory;
- }
-
- ret = register_framebuffer(fbinfo);
- if (ret) {
- printk(KERN_ERR "failed to register framebuffer device: %d\n",
- ret);
- goto free_cpufreq;
- }
-
- fb_info(fbinfo, "%s frame buffer device\n", fbinfo->fix.id);
-
- return 0;
-
-free_cpufreq:
- nuc900fb_cpufreq_deregister(fbi);
-free_video_memory:
- nuc900fb_unmap_video_memory(fbinfo);
-release_clock:
- clk_disable(fbi->clk);
- clk_put(fbi->clk);
-release_irq:
- free_irq(irq, fbi);
-release_regs:
- iounmap(fbi->io);
-release_mem_region:
- release_mem_region(res->start, size);
-free_fb:
- framebuffer_release(fbinfo);
- return ret;
-}
-
-/*
- * shutdown the lcd controller
- */
-static void nuc900fb_stop_lcd(struct fb_info *info)
-{
- struct nuc900fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
-
- writel((~LCM_DCCS_DISP_INT_EN) | (~LCM_DCCS_VA_EN) | (~LCM_DCCS_OSD_EN),
- regs + REG_LCM_DCCS);
-}
-
-/*
- * Cleanup
- */
-static int nuc900fb_remove(struct platform_device *pdev)
-{
- struct fb_info *fbinfo = platform_get_drvdata(pdev);
- struct nuc900fb_info *fbi = fbinfo->par;
- int irq;
-
- nuc900fb_stop_lcd(fbinfo);
- msleep(1);
-
- unregister_framebuffer(fbinfo);
- nuc900fb_cpufreq_deregister(fbi);
- nuc900fb_unmap_video_memory(fbinfo);
-
- iounmap(fbi->io);
-
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, fbi);
-
- release_resource(fbi->mem);
- kfree(fbi->mem);
-
- framebuffer_release(fbinfo);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-/*
- * suspend and resume support for the lcd controller
- */
-
-static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct fb_info *fbinfo = platform_get_drvdata(dev);
- struct nuc900fb_info *info = fbinfo->par;
-
- nuc900fb_stop_lcd(fbinfo);
- msleep(1);
- clk_disable(info->clk);
- return 0;
-}
-
-static int nuc900fb_resume(struct platform_device *dev)
-{
- struct fb_info *fbinfo = platform_get_drvdata(dev);
- struct nuc900fb_info *fbi = fbinfo->par;
-
- printk(KERN_INFO "nuc900fb resume\n");
-
- clk_enable(fbi->clk);
- msleep(1);
-
- nuc900fb_init_registers(fbinfo);
- nuc900fb_activate_var(fbinfo);
-
- return 0;
-}
-
-#else
-#define nuc900fb_suspend NULL
-#define nuc900fb_resume NULL
-#endif
-
-static struct platform_driver nuc900fb_driver = {
- .probe = nuc900fb_probe,
- .remove = nuc900fb_remove,
- .suspend = nuc900fb_suspend,
- .resume = nuc900fb_resume,
- .driver = {
- .name = "nuc900-lcd",
- },
-};
-
-module_platform_driver(nuc900fb_driver);
-
-MODULE_DESCRIPTION("Framebuffer driver for the NUC900");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/nuc900fb.h b/drivers/video/fbdev/nuc900fb.h
deleted file mode 100644
index 055ae9297931..000000000000
--- a/drivers/video/fbdev/nuc900fb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Copyright (c) 2009 Nuvoton technology corporation
- * All rights reserved.
- *
- * Author:
- * Wang Qiang(rurality.linux@gmail.com) 2009/12/16
- */
-
-#ifndef __NUC900FB_H
-#define __NUC900FB_H
-
-#include <mach/map.h>
-#include <linux/platform_data/video-nuc900fb.h>
-
-enum nuc900_lcddrv_type {
- LCDDRV_NUC910,
- LCDDRV_NUC930,
- LCDDRV_NUC932,
- LCDDRV_NUC950,
- LCDDRV_NUC960,
-};
-
-
-#define PALETTE_BUFFER_SIZE 256
-#define PALETTE_BUFF_CLEAR (0x80000000) /* entry is clear/invalid */
-
-struct nuc900fb_info {
- struct device *dev;
- struct clk *clk;
-
- struct resource *mem;
- void __iomem *io;
- void __iomem *irq_base;
- int drv_type;
- struct nuc900fb_hw regs;
- unsigned long clk_rate;
-
-#ifdef CONFIG_CPU_FREQ
- struct notifier_block freq_transition;
-#endif
-
- /* keep these registers in case we need to re-write palette */
- u32 palette_buffer[PALETTE_BUFFER_SIZE];
- u32 pseudo_pal[16];
-};
-
-int nuc900fb_init(void);
-
-#endif /* __NUC900FB_H */
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 90eca64e3144..702cca59bda1 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -447,6 +447,7 @@ static int set_color_mode(struct omapfb_plane_struct *plane,
return 0;
case 12:
var->bits_per_pixel = 16;
+ /* fall through */
case 16:
if (plane->fbdev->panel->bpp == 12)
plane->color_mode = OMAPFB_COLOR_RGB444;
@@ -1534,20 +1535,27 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
case OMAPFB_ACTIVE:
for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
unregister_framebuffer(fbdev->fb_info[i]);
+ /* fall through */
case 7:
omapfb_unregister_sysfs(fbdev);
+ /* fall through */
case 6:
if (fbdev->panel->disable)
fbdev->panel->disable(fbdev->panel);
+ /* fall through */
case 5:
omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
+ /* fall through */
case 4:
planes_cleanup(fbdev);
+ /* fall through */
case 3:
ctrl_cleanup(fbdev);
+ /* fall through */
case 2:
if (fbdev->panel->cleanup)
fbdev->panel->cleanup(fbdev->panel);
+ /* fall through */
case 1:
dev_set_drvdata(fbdev->dev, NULL);
kfree(fbdev);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 4282cb117b92..f70c9f79622e 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1678,24 +1678,6 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
}
return 0;
}
-
-static int
-pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
-{
- struct pxafb_info *fbi = TO_INF(nb, freq_policy);
- struct fb_var_screeninfo *var = &fbi->fb.var;
- struct cpufreq_policy *policy = data;
-
- switch (val) {
- case CPUFREQ_ADJUST:
- pr_debug("min dma period: %d ps, "
- "new clock %d kHz\n", pxafb_display_dma_period(var),
- policy->max);
- /* TODO: fill in min/max values */
- break;
- }
- return 0;
-}
#endif
#ifdef CONFIG_PM
@@ -2400,11 +2382,8 @@ static int pxafb_probe(struct platform_device *dev)
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = pxafb_freq_transition;
- fbi->freq_policy.notifier_call = pxafb_freq_policy;
cpufreq_register_notifier(&fbi->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_register_notifier(&fbi->freq_policy,
- CPUFREQ_POLICY_NOTIFIER);
#endif
/*
diff --git a/drivers/video/fbdev/pxafb.h b/drivers/video/fbdev/pxafb.h
index b641289c8a99..86b1e9ab1a38 100644
--- a/drivers/video/fbdev/pxafb.h
+++ b/drivers/video/fbdev/pxafb.h
@@ -162,7 +162,6 @@ struct pxafb_info {
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
- struct notifier_block freq_policy;
#endif
struct regulator *lcd_supply;
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index f7f8dee044b1..ae2bcfee338a 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1005,31 +1005,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
}
return 0;
}
-
-static int
-sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct sa1100fb_info *fbi = TO_INF(nb, freq_policy);
- struct cpufreq_policy *policy = data;
-
- switch (val) {
- case CPUFREQ_ADJUST:
- dev_dbg(fbi->dev, "min dma period: %d ps, "
- "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
- policy->max);
- /* todo: fill in min/max values */
- break;
- case CPUFREQ_NOTIFY:
- do {} while(0);
- /* todo: panic if min/max values aren't fulfilled
- * [can't really happen unless there's a bug in the
- * CPU policy verififcation process *
- */
- break;
- }
- return 0;
-}
#endif
#ifdef CONFIG_PM
@@ -1242,9 +1217,7 @@ static int sa1100fb_probe(struct platform_device *pdev)
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
- fbi->freq_policy.notifier_call = sa1100fb_freq_policy;
cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER);
#endif
/* This driver cannot be unloaded at the moment */
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index 7a1a9ca33cec..d0aa33b0b88a 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -64,7 +64,6 @@ struct sa1100fb_info {
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
- struct notifier_block freq_policy;
#endif
const struct sa1100fb_mach_info *inf;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 6edb4492e675..3dd1b1d76e98 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1271,6 +1271,14 @@ static ssize_t sm501fb_debug_show_pnl(struct device *dev,
static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL);
+static struct attribute *sm501fb_attrs[] = {
+ &dev_attr_crt_src.attr,
+ &dev_attr_fbregs_pnl.attr,
+ &dev_attr_fbregs_crt.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(sm501fb);
+
/* acceleration operations */
static int sm501fb_sync(struct fb_info *info)
{
@@ -2011,33 +2019,9 @@ static int sm501fb_probe(struct platform_device *pdev)
goto err_started_crt;
}
- /* create device files */
-
- ret = device_create_file(dev, &dev_attr_crt_src);
- if (ret)
- goto err_started_panel;
-
- ret = device_create_file(dev, &dev_attr_fbregs_pnl);
- if (ret)
- goto err_attached_crtsrc_file;
-
- ret = device_create_file(dev, &dev_attr_fbregs_crt);
- if (ret)
- goto err_attached_pnlregs_file;
-
/* we registered, return ok */
return 0;
-err_attached_pnlregs_file:
- device_remove_file(dev, &dev_attr_fbregs_pnl);
-
-err_attached_crtsrc_file:
- device_remove_file(dev, &dev_attr_crt_src);
-
-err_started_panel:
- unregister_framebuffer(info->fb[HEAD_PANEL]);
- sm501_free_init_fb(info, HEAD_PANEL);
-
err_started_crt:
unregister_framebuffer(info->fb[HEAD_CRT]);
sm501_free_init_fb(info, HEAD_CRT);
@@ -2067,10 +2051,6 @@ static int sm501fb_remove(struct platform_device *pdev)
struct fb_info *fbinfo_crt = info->fb[0];
struct fb_info *fbinfo_pnl = info->fb[1];
- device_remove_file(&pdev->dev, &dev_attr_fbregs_crt);
- device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl);
- device_remove_file(&pdev->dev, &dev_attr_crt_src);
-
sm501_free_init_fb(info, HEAD_CRT);
sm501_free_init_fb(info, HEAD_PANEL);
@@ -2234,6 +2214,7 @@ static struct platform_driver sm501fb_driver = {
.resume = sm501fb_resume,
.driver = {
.name = "sm501-fb",
+ .dev_groups = sm501fb_groups,
},
};
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index 597ffaa13cd2..3be07807edcd 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -164,6 +164,15 @@ static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *att
static DEVICE_ATTR_RW(fastpllclk);
+static struct attribute *w100fb_attrs[] = {
+ &dev_attr_fastpllclk.attr,
+ &dev_attr_reg_read.attr,
+ &dev_attr_reg_write.attr,
+ &dev_attr_flip.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(w100fb);
+
/*
* Some touchscreens need hsync information from the video driver to
* function correctly. We export it here.
@@ -752,14 +761,6 @@ int w100fb_probe(struct platform_device *pdev)
goto out;
}
- err = device_create_file(&pdev->dev, &dev_attr_fastpllclk);
- err |= device_create_file(&pdev->dev, &dev_attr_reg_read);
- err |= device_create_file(&pdev->dev, &dev_attr_reg_write);
- err |= device_create_file(&pdev->dev, &dev_attr_flip);
-
- if (err != 0)
- fb_warn(info, "failed to register attributes (%d)\n", err);
-
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
out:
@@ -784,11 +785,6 @@ static int w100fb_remove(struct platform_device *pdev)
struct fb_info *info = platform_get_drvdata(pdev);
struct w100fb_par *par=info->par;
- device_remove_file(&pdev->dev, &dev_attr_fastpllclk);
- device_remove_file(&pdev->dev, &dev_attr_reg_read);
- device_remove_file(&pdev->dev, &dev_attr_reg_write);
- device_remove_file(&pdev->dev, &dev_attr_flip);
-
unregister_framebuffer(info);
vfree(par->saved_intmem);
@@ -1625,6 +1621,7 @@ static struct platform_driver w100fb_driver = {
.resume = w100fb_resume,
.driver = {
.name = "w100fb",
+ .dev_groups = w100fb_groups,
},
};
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index ff752635a31c..17c780315ca5 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -176,6 +176,12 @@ static ssize_t contrast_store(struct device *dev,
static DEVICE_ATTR_RW(contrast);
+static struct attribute *wm8505fb_attrs[] = {
+ &dev_attr_contrast.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wm8505fb);
+
static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf)
{
chan &= 0xffff;
@@ -361,10 +367,6 @@ static int wm8505fb_probe(struct platform_device *pdev)
return ret;
}
- ret = device_create_file(&pdev->dev, &dev_attr_contrast);
- if (ret < 0)
- fb_warn(&fbi->fb, "failed to register attributes (%d)\n", ret);
-
fb_info(&fbi->fb, "%s frame buffer at 0x%lx-0x%lx\n",
fbi->fb.fix.id, fbi->fb.fix.smem_start,
fbi->fb.fix.smem_start + fbi->fb.fix.smem_len - 1);
@@ -376,8 +378,6 @@ static int wm8505fb_remove(struct platform_device *pdev)
{
struct wm8505fb_info *fbi = platform_get_drvdata(pdev);
- device_remove_file(&pdev->dev, &dev_attr_contrast);
-
unregister_framebuffer(&fbi->fb);
writel(0, fbi->regbase);
@@ -399,6 +399,7 @@ static struct platform_driver wm8505fb_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = wmt_dt_ids,
+ .dev_groups = wm8505fb_groups,
},
};
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c8be1c4f5b55..bdc08244a648 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unmap_release:
err_idx = i;
- i = head;
+
+ if (indirect)
+ i = 0;
+ else
+ i = head;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
vring_unmap_one_split(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
+ i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
if (indirect)
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 7ae260577901..24b9a8e05f64 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -65,5 +65,14 @@ config HDQ_MASTER_OMAP
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
+config W1_MASTER_SGI
+ tristate "SGI ASIC driver"
+ help
+ Say Y here if you want support for your 1-wire devices using
+ SGI ASIC 1-Wire interface
+
+ This support is also available as a module. If so, the module
+ will be called sgi_w1.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 18954cae4256..dae629b7ab49 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
+obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index c3b2095ef6a9..1ca880e01476 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -92,7 +92,6 @@ static int mxc_w1_probe(struct platform_device *pdev)
{
struct mxc_w1_device *mdev;
unsigned long clkrate;
- struct resource *res;
unsigned int clkdiv;
int err;
@@ -120,8 +119,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"Incorrect time base frequency %lu Hz\n", clkrate);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ mdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->regs)) {
err = PTR_ERR(mdev->regs);
goto out_disable_clk;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3099052e1243..4164045866b3 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -660,7 +660,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdq_data *hdq_data;
- struct resource *res;
int ret, irq;
u8 rev;
const char *mode;
@@ -674,8 +673,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
hdq_data->dev = dev;
platform_set_drvdata(pdev, hdq_data);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdq_data->hdq_base = devm_ioremap_resource(dev, res);
+ hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdq_data->hdq_base))
return PTR_ERR(hdq_data->hdq_base);
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
new file mode 100644
index 000000000000..1b2d96b945be
--- /dev/null
+++ b/drivers/w1/masters/sgi_w1.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sgi_w1.c - w1 master driver for one wire support in SGI ASICs
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/sgi-w1.h>
+
+#include <linux/w1.h>
+
+#define MCR_RD_DATA BIT(0)
+#define MCR_DONE BIT(1)
+
+#define MCR_PACK(pulse, sample) (((pulse) << 10) | ((sample) << 2))
+
+struct sgi_w1_device {
+ u32 __iomem *mcr;
+ struct w1_bus_master bus_master;
+ char dev_id[64];
+};
+
+static u8 sgi_w1_wait(u32 __iomem *mcr)
+{
+ u32 mcr_val;
+
+ do {
+ mcr_val = readl(mcr);
+ } while (!(mcr_val & MCR_DONE));
+
+ return (mcr_val & MCR_RD_DATA) ? 1 : 0;
+}
+
+/*
+ * this is the low level routine to
+ * reset the device on the One Wire interface
+ * on the hardware
+ */
+static u8 sgi_w1_reset_bus(void *data)
+{
+ struct sgi_w1_device *dev = data;
+ u8 ret;
+
+ writel(MCR_PACK(520, 65), dev->mcr);
+ ret = sgi_w1_wait(dev->mcr);
+ udelay(500); /* recovery time */
+ return ret;
+}
+
+/*
+ * this is the low level routine to read/write a bit on the One Wire
+ * interface on the hardware. It does write 0 if parameter bit is set
+ * to 0, otherwise a write 1/read.
+ */
+static u8 sgi_w1_touch_bit(void *data, u8 bit)
+{
+ struct sgi_w1_device *dev = data;
+ u8 ret;
+
+ if (bit)
+ writel(MCR_PACK(6, 13), dev->mcr);
+ else
+ writel(MCR_PACK(80, 30), dev->mcr);
+
+ ret = sgi_w1_wait(dev->mcr);
+ if (bit)
+ udelay(100); /* recovery */
+ return ret;
+}
+
+static int sgi_w1_probe(struct platform_device *pdev)
+{
+ struct sgi_w1_device *sdev;
+ struct sgi_w1_platform_data *pdata;
+ struct resource *res;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
+ GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sdev->mcr))
+ return PTR_ERR(sdev->mcr);
+
+ sdev->bus_master.data = sdev;
+ sdev->bus_master.reset_bus = sgi_w1_reset_bus;
+ sdev->bus_master.touch_bit = sgi_w1_touch_bit;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ strlcpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
+ sdev->bus_master.dev_id = sdev->dev_id;
+ }
+
+ platform_set_drvdata(pdev, sdev);
+
+ return w1_add_master_device(&sdev->bus_master);
+}
+
+/*
+ * disassociate the w1 device from the driver
+ */
+static int sgi_w1_remove(struct platform_device *pdev)
+{
+ struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
+
+ w1_remove_master_device(&sdev->bus_master);
+
+ return 0;
+}
+
+static struct platform_driver sgi_w1_driver = {
+ .driver = {
+ .name = "sgi_w1",
+ },
+ .probe = sgi_w1_probe,
+ .remove = sgi_w1_remove,
+};
+module_platform_driver(sgi_w1_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for One-Wire IP in SGI ASICs");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 37aaad26b373..ebed495b9e69 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -101,6 +101,12 @@ config W1_SLAVE_DS2438
Say Y here if you want to use a 1-wire
DS2438 Smart Battery Monitor device support
+config W1_SLAVE_DS250X
+ tristate "512b/1kb/16kb EPROM family support"
+ help
+ Say Y here if you want to use a 1-wire
+ 512b/1kb/16kb EPROM family device (DS250x).
+
config W1_SLAVE_DS2780
tristate "Dallas 2780 battery monitor chip"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index eab29f151413..8e9655eaa478 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
+obj-$(CONFIG_W1_SLAVE_DS250X) += w1_ds250x.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
new file mode 100644
index 000000000000..e507117444d8
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds250x.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * w1_ds250x.c - w1 family 09/0b/89/91 (DS250x) driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/crc16.h>
+
+#include <linux/w1.h>
+#include <linux/nvmem-provider.h>
+
+#define W1_DS2501_UNW_FAMILY 0x91
+#define W1_DS2501_SIZE 64
+
+#define W1_DS2502_FAMILY 0x09
+#define W1_DS2502_UNW_FAMILY 0x89
+#define W1_DS2502_SIZE 128
+
+#define W1_DS2505_FAMILY 0x0b
+#define W1_DS2505_SIZE 2048
+
+#define W1_PAGE_SIZE 32
+
+#define W1_EXT_READ_MEMORY 0xA5
+#define W1_READ_DATA_CRC 0xC3
+
+#define OFF2PG(off) ((off) / W1_PAGE_SIZE)
+
+#define CRC16_INIT 0
+#define CRC16_VALID 0xb001
+
+struct w1_eprom_data {
+ size_t size;
+ int (*read)(struct w1_slave *sl, int pageno);
+ u8 eprom[W1_DS2505_SIZE];
+ DECLARE_BITMAP(page_present, W1_DS2505_SIZE / W1_PAGE_SIZE);
+ char nvmem_name[64];
+};
+
+static int w1_ds2502_read_page(struct w1_slave *sl, int pageno)
+{
+ struct w1_eprom_data *data = sl->family_data;
+ int pgoff = pageno * W1_PAGE_SIZE;
+ int ret = -EIO;
+ u8 buf[3];
+ u8 crc8;
+
+ if (test_bit(pageno, data->page_present))
+ return 0; /* page already present */
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_reset_select_slave(sl))
+ goto err;
+
+ buf[0] = W1_READ_DATA_CRC;
+ buf[1] = pgoff & 0xff;
+ buf[2] = pgoff >> 8;
+ w1_write_block(sl->master, buf, 3);
+
+ crc8 = w1_read_8(sl->master);
+ if (w1_calc_crc8(buf, 3) != crc8)
+ goto err;
+
+ w1_read_block(sl->master, &data->eprom[pgoff], W1_PAGE_SIZE);
+
+ crc8 = w1_read_8(sl->master);
+ if (w1_calc_crc8(&data->eprom[pgoff], W1_PAGE_SIZE) != crc8)
+ goto err;
+
+ set_bit(pageno, data->page_present); /* mark page present */
+ ret = 0;
+err:
+ mutex_unlock(&sl->master->bus_mutex);
+ return ret;
+}
+
+static int w1_ds2505_read_page(struct w1_slave *sl, int pageno)
+{
+ struct w1_eprom_data *data = sl->family_data;
+ int redir_retries = 16;
+ int pgoff, epoff;
+ int ret = -EIO;
+ u8 buf[6];
+ u8 redir;
+ u16 crc;
+
+ if (test_bit(pageno, data->page_present))
+ return 0; /* page already present */
+
+ epoff = pgoff = pageno * W1_PAGE_SIZE;
+ mutex_lock(&sl->master->bus_mutex);
+
+retry:
+ if (w1_reset_select_slave(sl))
+ goto err;
+
+ buf[0] = W1_EXT_READ_MEMORY;
+ buf[1] = pgoff & 0xff;
+ buf[2] = pgoff >> 8;
+ w1_write_block(sl->master, buf, 3);
+ w1_read_block(sl->master, buf + 3, 3); /* redir, crc16 */
+ redir = buf[3];
+ crc = crc16(CRC16_INIT, buf, 6);
+
+ if (crc != CRC16_VALID)
+ goto err;
+
+
+ if (redir != 0xff) {
+ redir_retries--;
+ if (redir_retries < 0)
+ goto err;
+
+ pgoff = (redir ^ 0xff) * W1_PAGE_SIZE;
+ goto retry;
+ }
+
+ w1_read_block(sl->master, &data->eprom[epoff], W1_PAGE_SIZE);
+ w1_read_block(sl->master, buf, 2); /* crc16 */
+ crc = crc16(CRC16_INIT, &data->eprom[epoff], W1_PAGE_SIZE);
+ crc = crc16(crc, buf, 2);
+
+ if (crc != CRC16_VALID)
+ goto err;
+
+ set_bit(pageno, data->page_present);
+ ret = 0;
+err:
+ mutex_unlock(&sl->master->bus_mutex);
+ return ret;
+}
+
+static int w1_nvmem_read(void *priv, unsigned int off, void *buf, size_t count)
+{
+ struct w1_slave *sl = priv;
+ struct w1_eprom_data *data = sl->family_data;
+ size_t eprom_size = data->size;
+ int ret;
+ int i;
+
+ if (off > eprom_size)
+ return -EINVAL;
+
+ if ((off + count) > eprom_size)
+ count = eprom_size - off;
+
+ i = OFF2PG(off);
+ do {
+ ret = data->read(sl, i++);
+ if (ret < 0)
+ return ret;
+ } while (i < OFF2PG(off + count));
+
+ memcpy(buf, &data->eprom[off], count);
+ return 0;
+}
+
+static int w1_eprom_add_slave(struct w1_slave *sl)
+{
+ struct w1_eprom_data *data;
+ struct nvmem_device *nvmem;
+ struct nvmem_config nvmem_cfg = {
+ .dev = &sl->dev,
+ .reg_read = w1_nvmem_read,
+ .type = NVMEM_TYPE_OTP,
+ .read_only = true,
+ .word_size = 1,
+ .priv = sl,
+ .id = -1
+ };
+
+ data = devm_kzalloc(&sl->dev, sizeof(struct w1_eprom_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ sl->family_data = data;
+ switch (sl->family->fid) {
+ case W1_DS2501_UNW_FAMILY:
+ data->size = W1_DS2501_SIZE;
+ data->read = w1_ds2502_read_page;
+ break;
+ case W1_DS2502_FAMILY:
+ case W1_DS2502_UNW_FAMILY:
+ data->size = W1_DS2502_SIZE;
+ data->read = w1_ds2502_read_page;
+ break;
+ case W1_DS2505_FAMILY:
+ data->size = W1_DS2505_SIZE;
+ data->read = w1_ds2505_read_page;
+ break;
+ }
+
+ if (sl->master->bus_master->dev_id)
+ snprintf(data->nvmem_name, sizeof(data->nvmem_name),
+ "%s-%02x-%012llx",
+ sl->master->bus_master->dev_id, sl->reg_num.family,
+ (unsigned long long)sl->reg_num.id);
+ else
+ snprintf(data->nvmem_name, sizeof(data->nvmem_name),
+ "%02x-%012llx",
+ sl->reg_num.family,
+ (unsigned long long)sl->reg_num.id);
+
+ nvmem_cfg.name = data->nvmem_name;
+ nvmem_cfg.size = data->size;
+
+ nvmem = devm_nvmem_register(&sl->dev, &nvmem_cfg);
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct w1_family_ops w1_eprom_fops = {
+ .add_slave = w1_eprom_add_slave,
+};
+
+static struct w1_family w1_family_09 = {
+ .fid = W1_DS2502_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_0b = {
+ .fid = W1_DS2505_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_89 = {
+ .fid = W1_DS2502_UNW_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static struct w1_family w1_family_91 = {
+ .fid = W1_DS2501_UNW_FAMILY,
+ .fops = &w1_eprom_fops,
+};
+
+static int __init w1_ds250x_init(void)
+{
+ int err;
+
+ err = w1_register_family(&w1_family_09);
+ if (err)
+ return err;
+
+ err = w1_register_family(&w1_family_0b);
+ if (err)
+ goto err_0b;
+
+ err = w1_register_family(&w1_family_89);
+ if (err)
+ goto err_89;
+
+ err = w1_register_family(&w1_family_91);
+ if (err)
+ goto err_91;
+
+ return 0;
+
+err_91:
+ w1_unregister_family(&w1_family_89);
+err_89:
+ w1_unregister_family(&w1_family_0b);
+err_0b:
+ w1_unregister_family(&w1_family_09);
+ return err;
+}
+
+static void __exit w1_ds250x_exit(void)
+{
+ w1_unregister_family(&w1_family_09);
+ w1_unregister_family(&w1_family_0b);
+ w1_unregister_family(&w1_family_89);
+ w1_unregister_family(&w1_family_91);
+}
+
+module_init(w1_ds250x_init);
+module_exit(w1_ds250x_exit);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfe@suse.de>");
+MODULE_DESCRIPTION("w1 family driver for DS250x Add Only Memory");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2505_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2501_UNW_FAMILY));
+MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_UNW_FAMILY));
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 8188963a405b..a45f9e3e442b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -551,7 +551,7 @@ config OMAP_WATCHDOG
config PNX4008_WATCHDOG
tristate "LPC32XX Watchdog"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b9b2d06b3879..668a1c704f28 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -235,6 +235,7 @@ static long ar7_wdt_ioctl(struct file *file,
ar7_wdt_update_margin(new_margin);
ar7_wdt_kick(1);
spin_unlock(&wdt_lock);
+ /* Fall through */
case WDIOC_GETTIMEOUT:
if (put_user(margin, (int *)arg))
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 1b2cf5b95a89..c3c93e00b320 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
pcwd_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, argp);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 7b446b696f2b..e0ea133c1690 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -30,7 +30,6 @@
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/reboot.h>
-#include <mach/hardware.h>
/* WatchDog Timer - Chapter 23 Page 207 */
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 41a2a11535a6..b35f7be20c00 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
riowd_timeout = (new_margin + 59) / 60;
riowd_writereg(p, riowd_timeout, WDTO_INDEX);
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(riowd_timeout * 60, (int __user *)argp);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 5a6ced7a7e8f..202fc8d8ca5f 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -202,6 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd,
timeout = time;
sbwdog_set(user_dog, timeout);
sbwdog_pet(user_dog);
+ /* Fall through */
case WDIOC_GETTIMEOUT:
/*
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index efd7996694de..46268309ee9b 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -186,6 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
margin = new_margin;
scx200_wdt_update_margin();
scx200_wdt_ping();
+ /* Fall through */
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 0650100fad00..7d278b37e083 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (wdt_set_heartbeat(new_heartbeat))
return -EINVAL;
wdt_ping();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
soft_margin = new_margin;
reload = soft_margin * (mem_fclk_21285 / 256);
watchdog_ping();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
ret = put_user(soft_margin, int_arg);
break;
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 567005d7598e..5c52c73e1839 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
wdt977_keepalive();
- /* Fall */
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, uarg.i);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4c339c7e66e5..a446a7221e13 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- err = vm_map_pages(vma, map->pages, map->count);
+ err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
} else {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 2f5ce7230a43..c6070e70dd73 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-struct remap_pfn {
- struct mm_struct *mm;
- struct page **pages;
- pgprot_t prot;
- unsigned long i;
-};
-
-static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
- struct page *page = r->pages[r->i];
- pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
-
- set_pte_at(r->mm, addr, ptep, pte);
- r->i++;
-
- return 0;
-}
-
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
{
struct privcmd_data *data = file->private_data;
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
goto out;
}
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
struct page **pages;
unsigned int i;
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (rc)
goto out;
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- struct remap_pfn r = {
- .mm = vma->vm_mm,
- .pages = vma->vm_private_data,
- .prot = vma->vm_page_prot,
- };
-
- rc = apply_to_page_range(r.mm, kdata.addr,
- kdata.num << PAGE_SHIFT,
- remap_pfn_fn, &r);
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
+ rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
} else {
unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index cfbe46785a3b..adcabd9473eb 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address));
}
-static int check_pages_physically_contiguous(unsigned long xen_pfn,
- unsigned int offset,
- size_t length)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
- unsigned long next_bfn;
- int i;
- int nr_pages;
+ unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
next_bfn = pfn_to_bfn(xen_pfn);
- nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
- for (i = 1; i < nr_pages; i++) {
+ for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
- return 0;
- }
- return 1;
-}
+ return 1;
-static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
-{
- unsigned long xen_pfn = XEN_PFN_DOWN(p);
- unsigned int offset = p & ~XEN_PAGE_MASK;
-
- if (offset + size <= XEN_PAGE_SIZE)
- return 0;
- if (check_pages_physically_contiguous(xen_pfn, offset, size))
- return 0;
- return 1;
+ return 0;
}
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
return ret;
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)) &&
+ TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
@@ -400,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
- attrs);
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
+ size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
@@ -411,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* Ensure that the address returned is DMA'ble
*/
if (unlikely(!dma_capable(dev, dev_addr, size))) {
- swiotlb_tbl_unmap_single(dev, map, size, dir,
+ swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
@@ -447,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
- swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
}
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e0116..e5694133ebe5 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{
int err;
u16 old_value;
- pci_power_t new_state, old_state;
+ pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index ba883a80b3c0..7b1077f0abcb 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
+
+struct remap_pfn {
+ struct mm_struct *mm;
+ struct page **pages;
+ pgprot_t prot;
+ unsigned long i;
+};
+
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+ struct page *page = r->pages[r->i];
+ pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
+
+ set_pte_at(r->mm, addr, ptep, pte);
+ r->i++;
+
+ return 0;
+}
+
+/* Used by the privcmd module, but has to be built-in on ARM */
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .pages = vma->vm_private_data,
+ .prot = vma->vm_page_prot,
+ };
+
+ return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
+}
+EXPORT_SYMBOL_GPL(xen_remap_vma_range);